ethdev: add namespace
[dpdk.git] / drivers / net / enic / enic_ethdev.c
index 27f60b4..c8bdaf1 100644 (file)
@@ -9,8 +9,9 @@
 #include <rte_dev.h>
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_geneve.h>
 #include <rte_kvargs.h>
 #include <rte_string_fns.h>
 
@@ -37,127 +38,60 @@ static const struct vic_speed_capa {
        uint16_t sub_devid;
        uint32_t capa;
 } vic_speed_capa_map[] = {
-       { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
-       { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
-       { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
-       { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
-       { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
-       { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
-       { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
-       { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
-       { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
-       { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
-       { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
-       { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
-       { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
-       { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
-       { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-                 ETH_LINK_SPEED_40G }, /* 1440 Mezz */
-       { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
-                 ETH_LINK_SPEED_40G }, /* 1480 MLOM */
-       { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
-       { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
-       { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
-       { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
-       { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
-       { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+       { 0x0043, RTE_ETH_LINK_SPEED_10G }, /* VIC */
+       { 0x0047, RTE_ETH_LINK_SPEED_10G }, /* P81E PCIe */
+       { 0x0048, RTE_ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+       { 0x004f, RTE_ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+       { 0x0084, RTE_ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+       { 0x0085, RTE_ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+       { 0x00cd, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+       { 0x00ce, RTE_ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+       { 0x012a, RTE_ETH_LINK_SPEED_40G }, /* M4308 */
+       { 0x012c, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+       { 0x012e, RTE_ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+       { 0x0137, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+       { 0x014d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+       { 0x015d, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+       { 0x0215, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+                 RTE_ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+       { 0x0216, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G |
+                 RTE_ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+       { 0x0217, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+       { 0x0218, RTE_ETH_LINK_SPEED_10G | RTE_ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+       { 0x0219, RTE_ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+       { 0x021a, RTE_ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+       { 0x024a, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+       { 0x024b, RTE_ETH_LINK_SPEED_40G | RTE_ETH_LINK_SPEED_100G }, /* 1497 MLOM */
        { 0, 0 }, /* End marker */
 };
 
+#define ENIC_DEVARG_CQ64 "cq64"
 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
-#define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
 #define ENIC_DEVARG_REPRESENTOR "representor"
 
-RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
+RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO);
 
 static int
-enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
-                       enum rte_filter_op filter_op, void *arg)
-{
-       struct enic *enic = pmd_priv(eth_dev);
-       int ret = 0;
-
-       ENICPMD_FUNC_TRACE();
-       if (filter_op == RTE_ETH_FILTER_NOP)
-               return 0;
-
-       if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
-               return -EINVAL;
-
-       switch (filter_op) {
-       case RTE_ETH_FILTER_ADD:
-       case RTE_ETH_FILTER_UPDATE:
-               ret = enic_fdir_add_fltr(enic,
-                       (struct rte_eth_fdir_filter *)arg);
-               break;
-
-       case RTE_ETH_FILTER_DELETE:
-               ret = enic_fdir_del_fltr(enic,
-                       (struct rte_eth_fdir_filter *)arg);
-               break;
-
-       case RTE_ETH_FILTER_STATS:
-               enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
-               break;
-
-       case RTE_ETH_FILTER_FLUSH:
-               dev_warning(enic, "unsupported operation %u", filter_op);
-               ret = -ENOTSUP;
-               break;
-       case RTE_ETH_FILTER_INFO:
-               enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
-               break;
-       default:
-               dev_err(enic, "unknown operation %u", filter_op);
-               ret = -EINVAL;
-               break;
-       }
-       return ret;
-}
-
-static int
-enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
-                    enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op,
-                    void *arg)
+enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
+                        const struct rte_flow_ops **ops)
 {
        struct enic *enic = pmd_priv(dev);
-       int ret = 0;
 
        ENICPMD_FUNC_TRACE();
 
-       /*
-        * Currently, when Geneve with options offload is enabled, host
-        * cannot insert match-action rules.
-        */
-       if (enic->geneve_opt_enabled)
-               return -ENOTSUP;
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               if (enic->flow_filter_mode == FILTER_FLOWMAN)
-                       *(const void **)arg = &enic_fm_flow_ops;
-               else
-                       *(const void **)arg = &enic_flow_ops;
-               break;
-       case RTE_ETH_FILTER_FDIR:
-               ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
-               break;
-       default:
-               dev_warning(enic, "Filter type (%d) not supported",
-                       filter_type);
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
+       if (enic->flow_filter_mode == FILTER_FLOWMAN)
+               *ops = &enic_fm_flow_ops;
+       else
+               *ops = &enic_flow_ops;
+       return 0;
 }
 
-static void enicpmd_dev_tx_queue_release(void *txq)
+static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *txq = dev->data->tx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -291,8 +225,10 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
        return ret;
 }
 
-static void enicpmd_dev_rx_queue_release(void *rxq)
+static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
+       void *rxq = dev->data->rx_queues[qid];
+
        ENICPMD_FUNC_TRACE();
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -301,18 +237,18 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
        enic_free_rq(rxq);
 }
 
-static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
-                                          uint16_t rx_queue_id)
+static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue)
 {
-       struct enic *enic = pmd_priv(dev);
+       struct enic *enic;
+       struct vnic_rq *sop_rq;
        uint32_t queue_count = 0;
        struct vnic_cq *cq;
        uint32_t cq_tail;
        uint16_t cq_idx;
-       int rq_num;
 
-       rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
-       cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+       sop_rq = rx_queue;
+       enic = vnic_dev_priv(sop_rq->vdev);
+       cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
        cq_idx = cq->to_clean;
 
        cq_tail = ioread32(&cq->ctrl->cq_tail);
@@ -361,8 +297,8 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
        ENICPMD_FUNC_TRACE();
 
        offloads = eth_dev->data->dev_conf.rxmode.offloads;
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        enic->ig_vlan_strip_en = 1;
                else
                        enic->ig_vlan_strip_en = 0;
@@ -387,17 +323,17 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
                return ret;
        }
 
-       if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+       if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
                eth_dev->data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_RSS_HASH;
+                       RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        enic->mc_count = 0;
        enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
-                                 DEV_RX_OFFLOAD_CHECKSUM);
+                                 RTE_ETH_RX_OFFLOAD_CHECKSUM);
        /* All vlan offload masks to apply the current settings */
-       mask = ETH_VLAN_STRIP_MASK |
-               ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK |
+               RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
        ret = enicpmd_vlan_offload_set(eth_dev, mask);
        if (ret) {
                dev_err(enic, "Failed to configure VLAN offloads\n");
@@ -428,19 +364,21 @@ static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
 /*
  * Stop device: disable rx and tx functions to allow for reconfiguring.
  */
-static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
 {
        struct rte_eth_link link;
        struct enic *enic = pmd_priv(eth_dev);
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return;
+               return 0;
 
        ENICPMD_FUNC_TRACE();
        enic_disable(enic);
 
        memset(&link, 0, sizeof(link));
        rte_eth_linkstatus_set(eth_dev, &link);
+
+       return 0;
 }
 
 /*
@@ -451,6 +389,9 @@ static int enicpmd_dev_close(struct rte_eth_dev *eth_dev)
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
        enic_remove(enic);
 
        return 0;
@@ -494,14 +435,14 @@ static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
        }
        /* 1300 and later models are at least 40G */
        if (id >= 0x0100)
-               return ETH_LINK_SPEED_40G;
+               return RTE_ETH_LINK_SPEED_40G;
        /* VFs have subsystem id 0, check device id */
        if (id == 0) {
                /* Newer VF implies at least 40G model */
                if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
-                       return ETH_LINK_SPEED_40G;
+                       return RTE_ETH_LINK_SPEED_40G;
        }
-       return ETH_LINK_SPEED_10G;
+       return RTE_ETH_LINK_SPEED_10G;
 }
 
 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
@@ -518,7 +459,7 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
         * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
         * a hint to the driver to size receive buffers accordingly so that
         * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
-        * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+        * the user decide the buffer size via rxmode.mtu, basically
         * ignoring vNIC mtu.
         */
        device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
@@ -833,8 +774,8 @@ static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
        }
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
                                enic->rss_cpu.cpu[i / 4].b[i % 4]);
@@ -865,8 +806,8 @@ static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
         */
        rss_cpu = enic->rss_cpu;
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift))
                        rss_cpu.cpu[i / 4].b[i % 4] =
                                enic_rte_rq_idx_to_sop_idx(
@@ -942,7 +883,7 @@ static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
         */
        conf->offloads = enic->rx_offload_capa;
        if (!enic->ig_vlan_strip_en)
-               conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+               conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        /* rx_thresh and other fields are not applicable for enic */
 }
 
@@ -976,6 +917,8 @@ static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
                info_str = "Scalar No Scatter";
        else if (pkt_burst == enic_recv_pkts)
                info_str = "Scalar";
+       else if (pkt_burst == enic_recv_pkts_64)
+               info_str = "Scalar 64B Completion";
        if (info_str) {
                strlcpy(mode->info, info_str, sizeof(mode->info));
                ret = 0;
@@ -1026,26 +969,32 @@ static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
 static int udp_tunnel_common_check(struct enic *enic,
                                   struct rte_eth_udp_tunnel *tnl)
 {
-       if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+       if (tnl->prot_type != RTE_ETH_TUNNEL_TYPE_VXLAN &&
+           tnl->prot_type != RTE_ETH_TUNNEL_TYPE_GENEVE)
                return -ENOTSUP;
        if (!enic->overlay_offload) {
-               ENICPMD_LOG(DEBUG, " vxlan (overlay offload) is not "
-                            "supported\n");
+               ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
                return -ENOTSUP;
        }
        return 0;
 }
 
-static int update_vxlan_port(struct enic *enic, uint16_t port)
+static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan)
 {
-       if (vnic_dev_overlay_offload_cfg(enic->vdev,
-                                        OVERLAY_CFG_VXLAN_PORT_UPDATE,
-                                        port)) {
-               ENICPMD_LOG(DEBUG, " failed to update vxlan port\n");
+       uint8_t cfg;
+
+       cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE :
+               OVERLAY_CFG_GENEVE_PORT_UPDATE;
+       if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) {
+               ENICPMD_LOG(DEBUG, " failed to update tunnel port\n");
                return -EINVAL;
        }
-       ENICPMD_LOG(DEBUG, " updated vxlan port to %u\n", port);
-       enic->vxlan_port = port;
+       ENICPMD_LOG(DEBUG, " updated %s port to %u\n",
+                   vxlan ? "vxlan" : "geneve", port);
+       if (vxlan)
+               enic->vxlan_port = port;
+       else
+               enic->geneve_port = port;
        return 0;
 }
 
@@ -1053,34 +1002,48 @@ static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
                                           struct rte_eth_udp_tunnel *tnl)
 {
        struct enic *enic = pmd_priv(eth_dev);
+       uint16_t port;
+       bool vxlan;
        int ret;
 
        ENICPMD_FUNC_TRACE();
        ret = udp_tunnel_common_check(enic, tnl);
        if (ret)
                return ret;
+       vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
+       if (vxlan)
+               port = enic->vxlan_port;
+       else
+               port = enic->geneve_port;
        /*
-        * The NIC has 1 configurable VXLAN port number. "Adding" a new port
-        * number replaces it.
+        * The NIC has 1 configurable port number per tunnel type.
+        * "Adding" a new port number replaces it.
         */
-       if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+       if (tnl->udp_port == port || tnl->udp_port == 0) {
                ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
                             tnl->udp_port);
                return -EINVAL;
        }
-       return update_vxlan_port(enic, tnl->udp_port);
+       return update_tunnel_port(enic, tnl->udp_port, vxlan);
 }
 
 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
                                           struct rte_eth_udp_tunnel *tnl)
 {
        struct enic *enic = pmd_priv(eth_dev);
+       uint16_t port;
+       bool vxlan;
        int ret;
 
        ENICPMD_FUNC_TRACE();
        ret = udp_tunnel_common_check(enic, tnl);
        if (ret)
                return ret;
+       vxlan = (tnl->prot_type == RTE_ETH_TUNNEL_TYPE_VXLAN);
+       if (vxlan)
+               port = enic->vxlan_port;
+       else
+               port = enic->geneve_port;
        /*
         * Clear the previously set port number and restore the
         * hardware default port number. Some drivers disable VXLAN
@@ -1088,12 +1051,13 @@ static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
         * enic does not do that as VXLAN is part of overlay offload,
         * which is tied to inner RSS and TSO.
         */
-       if (tnl->udp_port != enic->vxlan_port) {
-               ENICPMD_LOG(DEBUG, " %u is not a configured vxlan port\n",
+       if (tnl->udp_port != port) {
+               ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n",
                             tnl->udp_port);
                return -EINVAL;
        }
-       return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT);
+       port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT;
+       return update_tunnel_port(enic, port, vxlan);
 }
 
 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
@@ -1104,16 +1068,21 @@ static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
        int ret;
 
        ENICPMD_FUNC_TRACE();
-       if (fw_version == NULL || fw_size <= 0)
-               return -EINVAL;
+
        enic = pmd_priv(eth_dev);
        ret = vnic_dev_fw_info(enic->vdev, &info);
        if (ret)
                return ret;
-       snprintf(fw_version, fw_size, "%s %s",
+       ret = snprintf(fw_version, fw_size, "%s %s",
                 info->fw_version, info->fw_build);
-       fw_version[fw_size - 1] = '\0';
-       return 0;
+       if (ret < 0)
+               return -EINVAL;
+
+       ret += 1; /* add the size of '\0' */
+       if (fw_size < (size_t)ret)
+               return ret;
+       else
+               return 0;
 }
 
 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
@@ -1161,7 +1130,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .mac_addr_remove      = enicpmd_remove_mac_addr,
        .mac_addr_set         = enicpmd_set_mac_addr,
        .set_mc_addr_list     = enicpmd_set_mc_addr_list,
-       .filter_ctrl          = enicpmd_dev_filter_ctrl,
+       .flow_ops_get         = enicpmd_dev_flow_ops_get,
        .reta_query           = enicpmd_dev_rss_reta_query,
        .reta_update          = enicpmd_dev_rss_reta_update,
        .rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
@@ -1188,12 +1157,12 @@ static int enic_parse_zero_one(const char *key,
                        ": expected=0|1 given=%s\n", key, value);
                return -EINVAL;
        }
+       if (strcmp(key, ENIC_DEVARG_CQ64) == 0)
+               enic->cq64_request = b;
        if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
                enic->disable_overlay = b;
        if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
                enic->enable_avx2_rx = b;
-       if (strcmp(key, ENIC_DEVARG_GENEVE_OPT) == 0)
-               enic->geneve_opt_request = b;
        return 0;
 }
 
@@ -1233,9 +1202,9 @@ static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
 static int enic_check_devargs(struct rte_eth_dev *dev)
 {
        static const char *const valid_keys[] = {
+               ENIC_DEVARG_CQ64,
                ENIC_DEVARG_DISABLE_OVERLAY,
                ENIC_DEVARG_ENABLE_AVX2_RX,
-               ENIC_DEVARG_GENEVE_OPT,
                ENIC_DEVARG_IG_VLAN_REWRITE,
                ENIC_DEVARG_REPRESENTOR,
                NULL};
@@ -1244,20 +1213,20 @@ static int enic_check_devargs(struct rte_eth_dev *dev)
 
        ENICPMD_FUNC_TRACE();
 
+       enic->cq64_request = true; /* Use 64B entry if available */
        enic->disable_overlay = false;
        enic->enable_avx2_rx = false;
-       enic->geneve_opt_request = false;
        enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
        if (!dev->device->devargs)
                return 0;
        kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
        if (!kvlist)
                return -EINVAL;
-       if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
+       if (rte_kvargs_process(kvlist, ENIC_DEVARG_CQ64,
                               enic_parse_zero_one, enic) < 0 ||
-           rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
+           rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
                               enic_parse_zero_one, enic) < 0 ||
-           rte_kvargs_process(kvlist, ENIC_DEVARG_GENEVE_OPT,
+           rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
                               enic_parse_zero_one, enic) < 0 ||
            rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
                               enic_parse_ig_vlan_rewrite, enic) < 0) {
@@ -1345,6 +1314,12 @@ static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                if (retval)
                        return retval;
        }
+       if (eth_da.nb_representor_ports > 0 &&
+           eth_da.type != RTE_ETH_REPRESENTOR_VF) {
+               ENICPMD_LOG(ERR, "unsupported representor type: %s\n",
+                           pci_dev->device.devargs->args);
+               return -ENOTSUP;
+       }
        retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
                sizeof(struct enic),
                eth_dev_pci_specific_init, pci_dev,
@@ -1424,7 +1399,7 @@ RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+       ENIC_DEVARG_CQ64 "=0|1"
        ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
        ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
-       ENIC_DEVARG_GENEVE_OPT "=0|1 "
        ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");