]> git.droids-corp.org - dpdk.git/commitdiff
net/virtio: add parameter to enable vectorized path
authorMarvin Liu <yong.liu@intel.com>
Wed, 29 Apr 2020 07:28:16 +0000 (15:28 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 5 May 2020 13:54:26 +0000 (15:54 +0200)
Previously, virtio split ring vectorized path was enabled by default.
This is not suitable for everyone because that path does not follow
virtio spec. Add new devarg for virtio vectorized path selection. By
default vectorized path is disabled.

Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
doc/guides/nics/virtio.rst
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_pci.h
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_user_ethdev.c
drivers/net/virtio/virtqueue.c

index 6286286dbfb8de6f711e550d4a8bfb4bb49e55aa..aaa8176511ec853515ad6dd47ce0a3c630ee189e 100644 (file)
@@ -363,6 +363,13 @@ Below devargs are supported by the PCI virtio driver:
     rte_eth_link_get_nowait function.
     (Default: 10000 (10G))
 
+#.  ``vectorized``:
+
+    It is used to specify whether virtio device prefers to use vectorized path.
+    Afterwards, dependencies of vectorized path will be checked in path
+    election.
+    (Default: 0 (disabled))
+
 Below devargs are supported by the virtio-user vdev:
 
 #.  ``path``:
index 37766cbb6483351cfbea29cb069eba6f0e17212d..0a69a4db11c41f520bbddf6d21946e77e5e14e96 100644 (file)
@@ -48,7 +48,8 @@ static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
 static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
 static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
        int *vdpa,
-       uint32_t *speed);
+       uint32_t *speed,
+       int *vectorized);
 static int virtio_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
 static int virtio_dev_link_update(struct rte_eth_dev *dev,
@@ -1551,8 +1552,8 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
                        eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
                }
        } else {
-               if (hw->use_simple_rx) {
-                       PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+               if (hw->use_vec_rx) {
+                       PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
                                eth_dev->data->port_id);
                        eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
                } else if (hw->use_inorder_rx) {
@@ -1886,6 +1887,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct virtio_hw *hw = eth_dev->data->dev_private;
        uint32_t speed = SPEED_UNKNOWN;
+       int vectorized = 0;
        int ret;
 
        if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
@@ -1912,7 +1914,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
        ret = virtio_dev_devargs_parse(eth_dev->device->devargs,
-                NULL, &speed);
+                NULL, &speed, &vectorized);
        if (ret < 0)
                return ret;
        hw->speed = speed;
@@ -1949,6 +1951,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
        if (ret < 0)
                goto err_virtio_init;
 
+       if (vectorized) {
+               if (!vtpci_packed_queue(hw))
+                       hw->use_vec_rx = 1;
+       }
+
        hw->opened = true;
 
        return 0;
@@ -2021,9 +2028,20 @@ virtio_dev_speed_capa_get(uint32_t speed)
        }
 }
 
+static int vectorized_check_handler(__rte_unused const char *key,
+               const char *value, void *ret_val)
+{
+       if (strcmp(value, "1") == 0)
+               *(int *)ret_val = 1;
+       else
+               *(int *)ret_val = 0;
+
+       return 0;
+}
 
 #define VIRTIO_ARG_SPEED      "speed"
 #define VIRTIO_ARG_VDPA       "vdpa"
+#define VIRTIO_ARG_VECTORIZED "vectorized"
 
 
 static int
@@ -2045,7 +2063,7 @@ link_speed_handler(const char *key __rte_unused,
 
 static int
 virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
-       uint32_t *speed)
+       uint32_t *speed, int *vectorized)
 {
        struct rte_kvargs *kvlist;
        int ret = 0;
@@ -2081,6 +2099,18 @@ virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
                }
        }
 
+       if (vectorized &&
+               rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
+               ret = rte_kvargs_process(kvlist,
+                               VIRTIO_ARG_VECTORIZED,
+                               vectorized_check_handler, vectorized);
+               if (ret < 0) {
+                       PMD_INIT_LOG(ERR, "Failed to parse %s",
+                                       VIRTIO_ARG_VECTORIZED);
+                       goto exit;
+               }
+       }
+
 exit:
        rte_kvargs_free(kvlist);
        return ret;
@@ -2092,7 +2122,8 @@ static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        int vdpa = 0;
        int ret = 0;
 
-       ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL);
+       ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL,
+               NULL);
        if (ret < 0) {
                PMD_INIT_LOG(ERR, "devargs parsing is failed");
                return ret;
@@ -2257,33 +2288,31 @@ virtio_dev_configure(struct rte_eth_dev *dev)
                        return -EBUSY;
                }
 
-       hw->use_simple_rx = 1;
-
        if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
                hw->use_inorder_tx = 1;
                hw->use_inorder_rx = 1;
-               hw->use_simple_rx = 0;
+               hw->use_vec_rx = 0;
        }
 
        if (vtpci_packed_queue(hw)) {
-               hw->use_simple_rx = 0;
+               hw->use_vec_rx = 0;
                hw->use_inorder_rx = 0;
        }
 
 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
        if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
-               hw->use_simple_rx = 0;
+               hw->use_vec_rx = 0;
        }
 #endif
        if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
-                hw->use_simple_rx = 0;
+               hw->use_vec_rx = 0;
        }
 
        if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
                           DEV_RX_OFFLOAD_TCP_CKSUM |
                           DEV_RX_OFFLOAD_TCP_LRO |
                           DEV_RX_OFFLOAD_VLAN_STRIP))
-               hw->use_simple_rx = 0;
+               hw->use_vec_rx = 0;
 
        return 0;
 }
index 2ac665bea1852db6d00e19596a687fe59c80f880..74ed77e3314c98c200c2d3679f14513d77a35726 100644 (file)
@@ -253,7 +253,8 @@ struct virtio_hw {
        uint8_t     vlan_strip;
        uint8_t     use_msix;
        uint8_t     modern;
-       uint8_t     use_simple_rx;
+       uint8_t     use_vec_rx;
+       uint8_t     use_vec_tx;
        uint8_t     use_inorder_rx;
        uint8_t     use_inorder_tx;
        uint8_t     weak_barriers;
index e450477e8c460aa73c03b938b4876e4a04e2430b..84f4cf946f833582600105db05e727aa5a74cb0e 100644 (file)
@@ -996,7 +996,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
        /* Allocate blank mbufs for the each rx descriptor */
        nbufs = 0;
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
                for (desc_idx = 0; desc_idx < vq->vq_nentries;
                     desc_idx++) {
                        vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
@@ -1014,7 +1014,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                        &rxvq->fake_mbuf;
        }
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
                while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
                        virtio_rxq_rearm_vec(rxvq);
                        nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
index 953f00d72d1047ed94ecf8583bc274444afed2b4..150a8d9877f0bf28d0db32b317d60da3571af537 100644 (file)
@@ -525,7 +525,7 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
         */
        hw->use_msix = 1;
        hw->modern   = 0;
-       hw->use_simple_rx = 0;
+       hw->use_vec_rx = 0;
        hw->use_inorder_rx = 0;
        hw->use_inorder_tx = 0;
        hw->virtio_user_dev = dev;
index 0b4e3bf3e229e0ec26df8844386a6f1af0246760..ca23180de9e821bbf71a1b830828a20ed99696c7 100644 (file)
@@ -32,7 +32,8 @@ virtqueue_detach_unused(struct virtqueue *vq)
        end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
 
        for (idx = 0; idx < vq->vq_nentries; idx++) {
-               if (hw->use_simple_rx && type == VTNET_RQ) {
+               if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
+                   type == VTNET_RQ) {
                        if (start <= end && idx >= start && idx < end)
                                continue;
                        if (start > end && (idx >= start || idx < end))
@@ -97,7 +98,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
        for (i = 0; i < nb_used; i++) {
                used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
                uep = &vq->vq_split.ring.used->ring[used_idx];
-               if (hw->use_simple_rx) {
+               if (hw->use_vec_rx) {
                        desc_idx = used_idx;
                        rte_pktmbuf_free(vq->sw_ring[desc_idx]);
                        vq->vq_free_cnt++;
@@ -121,7 +122,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
                vq->vq_used_cons_idx++;
        }
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx) {
                while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
                        virtio_rxq_rearm_vec(rxq);
                        if (virtqueue_kick_prepare(vq))