ethdev: add namespace
[dpdk.git] / drivers / net / enic / enic_ethdev.c
index d91c2cd..c8bdaf1 100644 (file)
@@ -11,6 +11,7 @@
 #include <rte_bus_pci.h>
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
+#include <rte_geneve.h>
 #include <rte_kvargs.h>
 #include <rte_string_fns.h>
 
@@ -37,41 +38,40 @@ static const struct vic_speed_capa {
        uint16_t sub_devid;
        uint32_t capa;
 } vic_speed_capa_map[] = {
-       { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-       { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-       { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-       { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-       { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-       { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-       { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-       { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-       { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-       { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-       { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-       { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-       { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-       { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-       { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-                 ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-       { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-                 ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-       { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-       { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-       { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-       { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-       { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-       { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+       { 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+       { 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+       { 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+       { 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+       { 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+       { 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+       { 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+       { 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+       { 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+       { 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+       { 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+       { 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+       { 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+       { 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+       { 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+                 RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+       { 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+                 RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+       { 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+       { 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+       { 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+       { 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+       { 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+       { 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
        { 0, 0 }, /* End marker */
 };
 
 #define ENIC_DEVARG_CQ64 "cq64"
 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
-#define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
 #define ENIC_DEVARG_REPRESENTOR "representor"
 
-RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
+RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO);
 
 static int
 enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
@@ -81,13 +81,6 @@ enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
 
        ENICPMD_FUNC_TRACE();
 
-       /*
-        * Currently, when Geneve with options offload is enabled, host
-        * cannot insert match-action rules.
-        */
-       if (enic->geneve_opt_enabled)
-               return -ENOTSUP;
-
        if (enic->flow_filter_mode == FILTER_FLOWMAN)
                *ops = &enic_fm_flow_ops;
        else
@@ -95,8 +88,10 @@ enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
        return 0;
 }
 
-static void enicpmd_dev_tx_queue_release(void *txq)
+static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *txq = dev->data->tx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -230,8 +225,10 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
        return ret;
 }
 
-static void enicpmd_dev_rx_queue_release(void *rxq)
+static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *rxq = dev->data->rx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -240,18 +237,18 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
        enic_free_rq(rxq);
 }
 
-static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
-                                          uint16_t rx_queue_id)
+static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
 {
-       struct enic *enic = pmd_priv(dev);
+       struct enic *enic;
+       struct vnic_rq *sop_rq;
        uint32_t queue_count = 0;
        struct vnic_cq *cq;
        uint32_t cq_tail;
        uint16_t cq_idx;
-       int rq_num;
 
-       rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
-       cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+       sop_rq = rx_queue;
+       enic = vnic_dev_priv(sop_rq->vdev);
+       cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
        cq_idx = cq->to_clean;
 
        cq_tail = ioread32(&cq->ctrl->cq_tail);
@@ -300,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
        ENICPMD_FUNC_TRACE();
 
        offloads = eth_dev->data->dev_conf.rxmode.offloads;
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        enic->ig_vlan_strip_en = 1;
                else
                        enic->ig_vlan_strip_en = 0;
@@ -326,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
                return ret;
        }
 
-       if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+       if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
                eth_dev->data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_RSS_HASH;
+                       RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        enic->mc_count = 0;
        enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-                                 DEV_RX_OFFLOAD_CHECKSUM);
+                                 RTE_ETH_RX_OFFLOAD_CHECKSUM);
        /* All vlan offload masks to apply the current settings */
-       mask = ETH_VLAN_STRIP_MASK |
-               ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK |
+               RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
        ret = enicpmd_vlan_offload_set(eth_dev, mask);
        if (ret) {
                dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -438,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
        }
        /* 1300 and later models are at least 40G */
        if (id >= 0x0100)
-               return ETH_LINK_SPEED_40G;
+               return RTE_ETH_LINK_SPEED_40G;
        /* VFs have subsystem id 0, check device id */
        if (id == 0) {
                /* Newer VF implies at least 40G model */
                if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-                       return ETH_LINK_SPEED_40G;
+                       return RTE_ETH_LINK_SPEED_40G;
        }
-       return ETH_LINK_SPEED_10G;
+       return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -462,7 +459,7 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
         * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
         * a hint to the driver to size receive buffers accordingly so that
         * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
-        * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+        * the user decide the buffer size via rxmode.mtu, basically
         * ignoring vNIC mtu.
         */
        device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
@@ -777,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
        }
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
                                enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -809,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
         */
        rss_cpu = enic->rss_cpu;
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        rss_cpu.cpu[i / 4].b[i % 4] =
                                enic_rte_rq_idx_to_sop_idx(
@@ -886,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
         */
        conf->offloads = enic->rx_offload_capa;
        if (!enic->ig_vlan_strip_en)
-               conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+               conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        /* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -972,26 +969,32 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
                                   struct rte_eth_udp_tunnel *tnl)
 {
-       if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+       if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+           tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
                return -ENOTSUP;
        if (!enic->overlay_offload) {
-               ENICPMD_LOG(DEBUG, " vxlan (overlay offload) is not "
-                            "supported\n");
+               ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
                return -ENOTSUP;
        }
        return 0;
 }
 
-static int update_vxlan_port(struct enic *enic, uint16_t port)
+static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan)
 {
-       if (vnic_dev_overlay_offload_cfg(enic->vdev,
-                                        OVERLAY_CFG_VXLAN_PORT_UPDATE,
-                                        port)) {
-               ENICPMD_LOG(DEBUG, " failed to update vxlan port\n");
+       uint8_t cfg;
+
+       cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE :
+               OVERLAY_CFG_GENEVE_PORT_UPDATE;
+       if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) {
+               ENICPMD_LOG(DEBUG, " failed to update tunnel port\n");
                return -EINVAL;
        }
-       ENICPMD_LOG(DEBUG, " updated vxlan port to %u\n", port);
-       enic->vxlan_port = port;
+       ENICPMD_LOG(DEBUG, " updated %s port to %u\n",
+                   vxlan ? "vxlan" : "geneve", port);
+       if (vxlan)
+               enic->vxlan_port = port;
+       else
+               enic->geneve_port = port;
        return 0;
 }
 
@@ -999,34 +1002,48 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
                                           struct rte_eth_udp_tunnel *tnl)
 {
        struct enic *enic = pmd_priv(eth_dev);
+       uint16_t port;
+       bool vxlan;
        int ret;
 
        ENICPMD_FUNC_TRACE();
        ret = udp_tunnel_common_check(enic, tnl);
        if (ret)
                return ret;
+       vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
+       if (vxlan)
+               port = enic->vxlan_port;
+       else
+               port = enic->geneve_port;
        /*
-        * The NIC has 1 configurable VXLAN port number. "Adding" a new port
-        * number replaces it.
+        * The NIC has 1 configurable port number per tunnel type.
+        * "Adding" a new port number replaces it.
         */
-       if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+       if (tnl->udp_port == port || tnl->udp_port == 0) {
                ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
                             tnl->udp_port);
                return -EINVAL;
        }
-       return update_vxlan_port(enic, tnl->udp_port);
+       return update_tunnel_port(enic, tnl->udp_port, vxlan);
 }
 
 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
                                           struct rte_eth_udp_tunnel *tnl)
 {
        struct enic *enic = pmd_priv(eth_dev);
+       uint16_t port;
+       bool vxlan;
        int ret;
 
        ENICPMD_FUNC_TRACE();
        ret = udp_tunnel_common_check(enic, tnl);
        if (ret)
                return ret;
+       vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
+       if (vxlan)
+               port = enic->vxlan_port;
+       else
+               port = enic->geneve_port;
        /*
         * Clear the previously set port number and restore the
         * hardware default port number. Some drivers disable VXLAN
@@ -1034,12 +1051,13 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
         * enic does not do that as VXLAN is part of overlay offload,
         * which is tied to inner RSS and TSO.
         */
-       if (tnl->udp_port != enic->vxlan_port) {
-               ENICPMD_LOG(DEBUG, " %u is not a configured vxlan port\n",
+       if (tnl->udp_port != port) {
+               ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n",
                             tnl->udp_port);
                return -EINVAL;
        }
-       return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT);
+       port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT;
+       return update_tunnel_port(enic, port, vxlan);
 }
 
 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
@@ -1050,16 +1068,21 @@ static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       if (fw_version == NULL || fw_size <= 0)
-               return -EINVAL;
+
        enic = pmd_priv(eth_dev);
        ret = vnic_dev_fw_info(enic->vdev, &info);
        if (ret)
                return ret;
-       snprintf(fw_version, fw_size, "%s %s",
+       ret = snprintf(fw_version, fw_size, "%s %s",
                 info->fw_version, info->fw_build);
-       fw_version[fw_size - 1] = '\0';
-       return 0;
+       if (ret < 0)
+               return -EINVAL;
+
+       ret += 1; /* add the size of '\0' */
+       if (fw_size < (size_t)ret)
+               return ret;
+       else
+               return 0;
 }
 
 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
@@ -1140,8 +1163,6 @@ static int enic_parse_zero_one(const char *key,
                enic->disable_overlay = b;
        if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
                enic->enable_avx2_rx = b;
-       if (strcmp(key, ENIC_DEVARG_GENEVE_OPT) == 0)
-               enic->geneve_opt_request = b;
        return 0;
 }
 
@@ -1184,7 +1205,6 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
                ENIC_DEVARG_CQ64,
                ENIC_DEVARG_DISABLE_OVERLAY,
                ENIC_DEVARG_ENABLE_AVX2_RX,
-               ENIC_DEVARG_GENEVE_OPT,
                ENIC_DEVARG_IG_VLAN_REWRITE,
                ENIC_DEVARG_REPRESENTOR,
                NULL};
@@ -1196,7 +1216,6 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
        enic->cq64_request = true; /* Use 64B entry if available */
        enic->disable_overlay = false;
        enic->enable_avx2_rx = false;
-       enic->geneve_opt_request = false;
        enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
        if (!dev->device->devargs)
                return 0;
@@ -1209,8 +1228,6 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
                               enic_parse_zero_one, enic) < 0 ||
            rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
                               enic_parse_zero_one, enic) < 0 ||
-           rte_kvargs_process(kvlist, ENIC_DEVARG_GENEVE_OPT,
-                              enic_parse_zero_one, enic) < 0 ||
            rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
                               enic_parse_ig_vlan_rewrite, enic) < 0) {
                rte_kvargs_free(kvlist);
@@ -1247,7 +1264,6 @@ static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
 
        pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pdev);
-       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
        enic->pdev = pdev;
        addr = &pdev->addr;
 
@@ -1386,5 +1402,4 @@ RTE_PMD_REGISTER_PARAM_STRING(net_enic,
        ENIC_DEVARG_CQ64 "=0|1"
        ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
        ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
-       ENIC_DEVARG_GENEVE_OPT "=0|1 "
        ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");