log: introduce logtype register macro
[dpdk.git] / drivers / net / virtio / virtio_ethdev.c
index f9d0ea7..dc0093b 100644 (file)
@@ -45,6 +45,11 @@ static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
 static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
 static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
+static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
+       int *vdpa,
+       uint32_t *speed,
+       int *vectorized);
 static int virtio_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
 static int virtio_dev_link_update(struct rte_eth_dev *dev,
@@ -80,9 +85,6 @@ static int virtio_dev_queue_stats_mapping_set(
        uint8_t stat_idx,
        uint8_t is_rx);
 
-int virtio_logtype_init;
-int virtio_logtype_driver;
-
 static void virtio_notify_peers(struct rte_eth_dev *dev);
 static void virtio_ack_link_announce(struct rte_eth_dev *dev);
 
@@ -285,13 +287,10 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
 
        virtqueue_notify(vq);
 
-       rte_rmb();
-       while (VIRTQUEUE_NUSED(vq) == 0) {
-               rte_rmb();
+       while (virtqueue_nused(vq) == 0)
                usleep(100);
-       }
 
-       while (VIRTQUEUE_NUSED(vq)) {
+       while (virtqueue_nused(vq)) {
                uint32_t idx, desc_idx, used_idx;
                struct vring_used_elem *uep;
 
@@ -466,7 +465,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
        }
 
        if (!vtpci_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
-               PMD_INIT_LOG(ERR, "split virtqueue size is not powerof 2");
+               PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
                return -EINVAL;
        }
 
@@ -588,8 +587,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
                hw->cvq = cvq;
        }
 
-       /* For virtio_user case (that is when hw->dev is NULL), we use
-        * virtual address. And we need properly set _offset_, please see
+       /* For virtio_user case (that is when hw->virtio_user_dev is not NULL),
+        * we use virtual address. And we need properly set _offset_, please see
         * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
         */
        if (!hw->virtio_user_dev)
@@ -1518,9 +1517,12 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
        if (vtpci_packed_queue(hw)) {
                PMD_INIT_LOG(INFO,
                        "virtio: using packed ring %s Tx path on port %u",
-                       hw->use_inorder_tx ? "inorder" : "standard",
+                       hw->use_vec_tx ? "vectorized" : "standard",
                        eth_dev->data->port_id);
-               eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+               if (hw->use_vec_tx)
+                       eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
+               else
+                       eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
        } else {
                if (hw->use_inorder_tx) {
                        PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
@@ -1534,7 +1536,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
        }
 
        if (vtpci_packed_queue(hw)) {
-               if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+               if (hw->use_vec_rx) {
+                       PMD_INIT_LOG(INFO,
+                               "virtio: using packed ring vectorized Rx path on port %u",
+                               eth_dev->data->port_id);
+                       eth_dev->rx_pkt_burst =
+                               &virtio_recv_pkts_packed_vec;
+               } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
                        PMD_INIT_LOG(INFO,
                                "virtio: using packed ring mergeable buffer Rx path on port %u",
                                eth_dev->data->port_id);
@@ -1547,8 +1555,8 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
                        eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
                }
        } else {
-               if (hw->use_simple_rx) {
-                       PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+               if (hw->use_vec_rx) {
+                       PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
                                eth_dev->data->port_id);
                        eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
                } else if (hw->use_inorder_rx) {
@@ -1658,7 +1666,8 @@ virtio_configure_intr(struct rte_eth_dev *dev)
 
        return 0;
 }
-
+#define SPEED_UNKNOWN    0xffffffff
+#define DUPLEX_UNKNOWN   0xff
 /* reset device and renegotiate features if needed */
 static int
 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
@@ -1714,6 +1723,25 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
                     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
                     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
 
+       if (hw->speed == SPEED_UNKNOWN) {
+               if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
+                       config = &local_config;
+                       vtpci_read_dev_config(hw,
+                               offsetof(struct virtio_net_config, speed),
+                               &config->speed, sizeof(config->speed));
+                       vtpci_read_dev_config(hw,
+                               offsetof(struct virtio_net_config, duplex),
+                               &config->duplex, sizeof(config->duplex));
+                       hw->speed = config->speed;
+                       hw->duplex = config->duplex;
+               }
+       }
+       if (hw->speed == SPEED_UNKNOWN)
+               hw->speed = ETH_SPEED_NUM_10G;
+       if (hw->duplex == DUPLEX_UNKNOWN)
+               hw->duplex = ETH_LINK_FULL_DUPLEX;
+       PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
+               hw->speed, hw->duplex);
        if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
                config = &local_config;
 
@@ -1861,6 +1889,8 @@ int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct virtio_hw *hw = eth_dev->data->dev_private;
+       uint32_t speed = SPEED_UNKNOWN;
+       int vectorized = 0;
        int ret;
 
        if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
@@ -1886,7 +1916,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 
                return 0;
        }
-
+       ret = virtio_dev_devargs_parse(eth_dev->device->devargs,
+                NULL, &speed, &vectorized);
+       if (ret < 0)
+               return ret;
+       hw->speed = speed;
        /*
         * Pass the information to the rte_eth_dev_close() that it should also
         * release the private port resources.
@@ -1920,6 +1954,20 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
        if (ret < 0)
                goto err_virtio_init;
 
+       if (vectorized) {
+               if (!vtpci_packed_queue(hw)) {
+                       hw->use_vec_rx = 1;
+               } else {
+#if !defined(CC_AVX512_SUPPORT)
+                       PMD_DRV_LOG(INFO,
+                               "building environment do not support packed ring vectorized");
+#else
+                       hw->use_vec_rx = 1;
+                       hw->use_vec_tx = 1;
+#endif
+               }
+       }
+
        hw->opened = true;
 
        return 0;
@@ -1956,38 +2004,124 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+
 static int vdpa_check_handler(__rte_unused const char *key,
-               const char *value, __rte_unused void *opaque)
+               const char *value, void *ret_val)
 {
-       if (strcmp(value, "1"))
-               return -1;
+       if (strcmp(value, "1") == 0)
+               *(int *)ret_val = 1;
+       else
+               *(int *)ret_val = 0;
 
        return 0;
 }
 
+
+static uint32_t
+virtio_dev_speed_capa_get(uint32_t speed)
+{
+       switch (speed) {
+       case ETH_SPEED_NUM_10G:
+               return ETH_LINK_SPEED_10G;
+       case ETH_SPEED_NUM_20G:
+               return ETH_LINK_SPEED_20G;
+       case ETH_SPEED_NUM_25G:
+               return ETH_LINK_SPEED_25G;
+       case ETH_SPEED_NUM_40G:
+               return ETH_LINK_SPEED_40G;
+       case ETH_SPEED_NUM_50G:
+               return ETH_LINK_SPEED_50G;
+       case ETH_SPEED_NUM_56G:
+               return ETH_LINK_SPEED_56G;
+       case ETH_SPEED_NUM_100G:
+               return ETH_LINK_SPEED_100G;
+       default:
+               return 0;
+       }
+}
+
+static int vectorized_check_handler(__rte_unused const char *key,
+               const char *value, void *ret_val)
+{
+       if (strcmp(value, "1") == 0)
+               *(int *)ret_val = 1;
+       else
+               *(int *)ret_val = 0;
+
+       return 0;
+}
+
+#define VIRTIO_ARG_SPEED      "speed"
+#define VIRTIO_ARG_VDPA       "vdpa"
+#define VIRTIO_ARG_VECTORIZED "vectorized"
+
+
 static int
-vdpa_mode_selected(struct rte_devargs *devargs)
+link_speed_handler(const char *key __rte_unused,
+               const char *value, void *ret_val)
+{
+       uint32_t val;
+       if (!value || !ret_val)
+               return -EINVAL;
+       val = strtoul(value, NULL, 0);
+       /* validate input */
+       if (virtio_dev_speed_capa_get(val) == 0)
+               return -EINVAL;
+       *(uint32_t *)ret_val = val;
+
+       return 0;
+}
+
+
+static int
+virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
+       uint32_t *speed, int *vectorized)
 {
        struct rte_kvargs *kvlist;
-       const char *key = "vdpa";
        int ret = 0;
 
        if (devargs == NULL)
                return 0;
 
        kvlist = rte_kvargs_parse(devargs->args, NULL);
-       if (kvlist == NULL)
+       if (kvlist == NULL) {
+               PMD_INIT_LOG(ERR, "error when parsing param");
                return 0;
+       }
+       if (vdpa && rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) {
+               /* vdpa mode selected when there's a key-value pair:
+                * vdpa=1
+                */
+               ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA,
+                               vdpa_check_handler, vdpa);
+               if (ret < 0) {
+                       PMD_INIT_LOG(ERR, "Failed to parse %s",
+                               VIRTIO_ARG_VDPA);
+                       goto exit;
+               }
+       }
+       if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
+               ret = rte_kvargs_process(kvlist,
+                                       VIRTIO_ARG_SPEED,
+                                       link_speed_handler, speed);
+               if (ret < 0) {
+                       PMD_INIT_LOG(ERR, "Failed to parse %s",
+                                       VIRTIO_ARG_SPEED);
+                       goto exit;
+               }
+       }
 
-       if (!rte_kvargs_count(kvlist, key))
-               goto exit;
-
-       /* vdpa mode selected when there's a key-value pair: vdpa=1 */
-       if (rte_kvargs_process(kvlist, key,
-                               vdpa_check_handler, NULL) < 0) {
-               goto exit;
+       if (vectorized &&
+               rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
+               ret = rte_kvargs_process(kvlist,
+                               VIRTIO_ARG_VECTORIZED,
+                               vectorized_check_handler, vectorized);
+               if (ret < 0) {
+                       PMD_INIT_LOG(ERR, "Failed to parse %s",
+                                       VIRTIO_ARG_VECTORIZED);
+                       goto exit;
+               }
        }
-       ret = 1;
 
 exit:
        rte_kvargs_free(kvlist);
@@ -1997,8 +2131,17 @@ exit:
 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        struct rte_pci_device *pci_dev)
 {
+       int vdpa = 0;
+       int ret = 0;
+
+       ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL,
+               NULL);
+       if (ret < 0) {
+               PMD_INIT_LOG(ERR, "devargs parsing is failed");
+               return ret;
+       }
        /* virtio pmd skips probe if device needs to work in vdpa mode */
-       if (vdpa_mode_selected(pci_dev->device.devargs))
+       if (vdpa == 1)
                return 1;
 
        return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
@@ -2157,33 +2300,66 @@ virtio_dev_configure(struct rte_eth_dev *dev)
                        return -EBUSY;
                }
 
-       hw->use_simple_rx = 1;
+       if (vtpci_packed_queue(hw)) {
+#if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
+               if ((hw->use_vec_rx || hw->use_vec_tx) &&
+                   (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
+                    !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+                    !vtpci_with_feature(hw, VIRTIO_F_VERSION_1))) {
+                       PMD_DRV_LOG(INFO,
+                               "disabled packed ring vectorized path for requirements not met");
+                       hw->use_vec_rx = 0;
+                       hw->use_vec_tx = 0;
+               }
+#else
+               hw->use_vec_rx = 0;
+               hw->use_vec_tx = 0;
+#endif
 
-       if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
-               hw->use_inorder_tx = 1;
-               hw->use_inorder_rx = 1;
-               hw->use_simple_rx = 0;
-       }
+               if (hw->use_vec_rx) {
+                       if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+                               PMD_DRV_LOG(INFO,
+                                       "disabled packed ring vectorized rx for mrg_rxbuf enabled");
+                               hw->use_vec_rx = 0;
+                       }
 
-       if (vtpci_packed_queue(hw)) {
-               hw->use_simple_rx = 0;
-               hw->use_inorder_rx = 0;
-       }
+                       if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+                               PMD_DRV_LOG(INFO,
+                                       "disabled packed ring vectorized rx for TCP_LRO enabled");
+                               hw->use_vec_rx = 0;
+                       }
+               }
+       } else {
+               if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+                       hw->use_inorder_tx = 1;
+                       hw->use_inorder_rx = 1;
+                       hw->use_vec_rx = 0;
+               }
 
+               if (hw->use_vec_rx) {
 #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
-       if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
-               hw->use_simple_rx = 0;
-       }
+                       if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+                               PMD_DRV_LOG(INFO,
+                                       "disabled split ring vectorized path for requirement not met");
+                               hw->use_vec_rx = 0;
+                       }
 #endif
-       if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
-                hw->use_simple_rx = 0;
-       }
+                       if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+                               PMD_DRV_LOG(INFO,
+                                       "disabled split ring vectorized rx for mrg_rxbuf enabled");
+                               hw->use_vec_rx = 0;
+                       }
 
-       if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
-                          DEV_RX_OFFLOAD_TCP_CKSUM |
-                          DEV_RX_OFFLOAD_TCP_LRO |
-                          DEV_RX_OFFLOAD_VLAN_STRIP))
-               hw->use_simple_rx = 0;
+                       if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+                                          DEV_RX_OFFLOAD_TCP_CKSUM |
+                                          DEV_RX_OFFLOAD_TCP_LRO |
+                                          DEV_RX_OFFLOAD_VLAN_STRIP)) {
+                               PMD_DRV_LOG(INFO,
+                                       "disabled split ring vectorized rx for offloading enabled");
+                               hw->use_vec_rx = 0;
+                       }
+               }
+       }
 
        return 0;
 }
@@ -2370,9 +2546,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
        struct virtio_hw *hw = dev->data->dev_private;
 
        memset(&link, 0, sizeof(link));
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       link.link_speed  = ETH_SPEED_NUM_10G;
-       link.link_autoneg = ETH_LINK_FIXED;
+       link.link_duplex = hw->duplex;
+       link.link_speed  = hw->speed;
+       link.link_autoneg = ETH_LINK_AUTONEG;
 
        if (!hw->started) {
                link.link_status = ETH_LINK_DOWN;
@@ -2426,8 +2602,7 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        uint64_t tso_mask, host_features;
        struct virtio_hw *hw = dev->data->dev_private;
-
-       dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+       dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
 
        dev_info->max_rx_queues =
                RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
@@ -2481,13 +2656,5 @@ __rte_unused uint8_t is_rx)
 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
-
-RTE_INIT(virtio_init_log)
-{
-       virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
-       if (virtio_logtype_init >= 0)
-               rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
-       virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
-       if (virtio_logtype_driver >= 0)
-               rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(virtio_logtype_init, pmd.net.virtio.init, NOTICE);
+RTE_LOG_REGISTER(virtio_logtype_driver, pmd.net.virtio.driver, NOTICE);