X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_ethdev.c;h=ee5a98b7c073bba636c9fa96fdf28144b7410742;hb=239810607333f7e68979781b0c7be2a14c242f6a;hp=e6ba1282be926773805984046ffd1f1d865f7ac9;hpb=f803734b0f2e6c556d9bf7fe8f11638429e3a00f;p=dpdk.git diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index e6ba1282be..ee5a98b7c0 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -141,6 +141,97 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; +static struct virtio_pmd_ctrl * +virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + struct virtqueue *vq = cvq->vq; + int head; + struct vring_packed_desc *desc = vq->ring_packed.desc_packed; + struct virtio_pmd_ctrl *result; + bool avail_wrap_counter, used_wrap_counter; + uint16_t flags; + int sum = 0; + int k; + + /* + * Format is enforced in qemu code: + * One TX packet for header; + * At least one TX packet per argument; + * One RX packet for ACK. + */ + head = vq->vq_avail_idx; + avail_wrap_counter = vq->avail_wrap_counter; + used_wrap_counter = vq->used_wrap_counter; + desc[head].flags = VRING_DESC_F_NEXT; + desc[head].addr = cvq->virtio_net_hdr_mem; + desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_free_cnt--; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->avail_wrap_counter ^= 1; + } + + for (k = 0; k < pkt_num; k++) { + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr) + + sizeof(ctrl->status) + sizeof(uint8_t) * sum; + desc[vq->vq_avail_idx].len = dlen[k]; + flags = VRING_DESC_F_NEXT; + sum += dlen[k]; + vq->vq_free_cnt--; + flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | + VRING_DESC_F_USED(!vq->avail_wrap_counter); + desc[vq->vq_avail_idx].flags = flags; + rte_smp_wmb(); + vq->vq_free_cnt--; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->avail_wrap_counter ^= 1; + } + } + + + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr); + desc[vq->vq_avail_idx].len = sizeof(ctrl->status); + flags = VRING_DESC_F_WRITE; + flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | + VRING_DESC_F_USED(!vq->avail_wrap_counter); + desc[vq->vq_avail_idx].flags = flags; + flags = VRING_DESC_F_NEXT; + flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) | + VRING_DESC_F_USED(!avail_wrap_counter); + desc[head].flags = flags; + rte_smp_wmb(); + + vq->vq_free_cnt--; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->avail_wrap_counter ^= 1; + } + + virtqueue_notify(vq); + + /* wait for used descriptors in virtqueue */ + do { + rte_rmb(); + usleep(100); + } while (!__desc_is_used(&desc[head], used_wrap_counter)); + + /* now get used descriptors */ + while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) { + vq->vq_free_cnt++; + if (++vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->used_wrap_counter ^= 1; + } + } + + result = cvq->virtio_net_hdr_mz->addr; + return result; +} + static int virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num) @@ -174,6 +265,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, sizeof(struct virtio_pmd_ctrl)); + if (vtpci_packed_queue(vq->hw)) { + result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num); + goto out_unlock; + } + /* * Format is enforced in qemu code: * One TX packet for header; @@ -245,6 +341,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, result = cvq->virtio_net_hdr_mz->addr; +out_unlock: rte_spinlock_unlock(&cvq->lock); return result->status; } @@ -390,6 +487,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) if (vtpci_packed_queue(hw)) { vq->avail_wrap_counter = 1; vq->used_wrap_counter = 1; + vq->avail_used_flags = + VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | + VRING_DESC_F_USED(!vq->avail_wrap_counter); } /* @@ -497,17 +597,26 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) memset(txr, 0, vq_size * sizeof(*txr)); for (i = 0; i < vq_size; i++) { struct vring_desc *start_dp = txr[i].tx_indir; - - vring_desc_init_split(start_dp, - RTE_DIM(txr[i].tx_indir)); + struct vring_packed_desc *start_dp_packed = + txr[i].tx_indir_pq; /* first indirect descriptor is always the tx header */ - start_dp->addr = txvq->virtio_net_hdr_mem - + i * sizeof(*txr) - + offsetof(struct virtio_tx_region, tx_hdr); - - start_dp->len = hw->vtnet_hdr_size; - start_dp->flags = VRING_DESC_F_NEXT; + if (vtpci_packed_queue(hw)) { + start_dp_packed->addr = txvq->virtio_net_hdr_mem + + i * sizeof(*txr) + + offsetof(struct virtio_tx_region, + tx_hdr); + start_dp_packed->len = hw->vtnet_hdr_size; + } else { + vring_desc_init_split(start_dp, + RTE_DIM(txr[i].tx_indir)); + start_dp->addr = txvq->virtio_net_hdr_mem + + i * sizeof(*txr) + + offsetof(struct virtio_tx_region, + tx_hdr); + start_dp->len = hw->vtnet_hdr_size; + start_dp->flags = VRING_DESC_F_NEXT; + } } } @@ -1336,35 +1445,58 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; - if (hw->use_simple_rx) { - PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; - } else if (hw->use_inorder_rx) { - PMD_INIT_LOG(INFO, - "virtio: using inorder mergeable buffer Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder; - } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + if (vtpci_packed_queue(hw)) { PMD_INIT_LOG(INFO, - "virtio: using mergeable buffer Rx path on port %u", + "virtio: using packed ring standard Tx path on port %u", eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; + eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; } else { - PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", - eth_dev->data->port_id); - eth_dev->rx_pkt_burst = &virtio_recv_pkts; + if (hw->use_inorder_tx) { + PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; + } else { + PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", + eth_dev->data->port_id); + eth_dev->tx_pkt_burst = virtio_xmit_pkts; + } } - if (hw->use_inorder_tx) { - PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", - eth_dev->data->port_id); - eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; + if (vtpci_packed_queue(hw)) { + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_INIT_LOG(INFO, + "virtio: using packed ring mergeable buffer Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = + &virtio_recv_mergeable_pkts_packed; + } else { + PMD_INIT_LOG(INFO, + "virtio: using packed ring standard Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; + } } else { - PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", - eth_dev->data->port_id); - eth_dev->tx_pkt_burst = virtio_xmit_pkts; + if (hw->use_simple_rx) { + PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; + } else if (hw->use_inorder_rx) { + PMD_INIT_LOG(INFO, + "virtio: using inorder Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder; + } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + PMD_INIT_LOG(INFO, + "virtio: using mergeable buffer Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; + } else { + PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", + eth_dev->data->port_id); + eth_dev->rx_pkt_burst = &virtio_recv_pkts; + } } + } /* Only support 1:1 queue/interrupt mapping so far. @@ -1482,6 +1614,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (virtio_negotiate_features(hw, req_features) < 0) return -1; + hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM); + if (!hw->virtio_user_dev) { pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); @@ -1917,13 +2051,14 @@ virtio_dev_configure(struct rte_eth_dev *dev) if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { hw->use_inorder_tx = 1; - if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) && - !vtpci_packed_queue(hw)) { - hw->use_inorder_rx = 1; - hw->use_simple_rx = 0; - } else { - hw->use_inorder_rx = 0; - } + hw->use_inorder_rx = 1; + hw->use_simple_rx = 0; + } + + if (vtpci_packed_queue(hw)) { + hw->use_simple_rx = 0; + hw->use_inorder_rx = 0; + hw->use_inorder_tx = 0; } #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM