crypto/ipsec_mb: move snow3g PMD
[dpdk.git] / drivers / net / qede / qede_ethdev.c
index b2fd2fd..c907d7f 100644 (file)
@@ -7,13 +7,8 @@
 #include "qede_ethdev.h"
 #include <rte_string_fns.h>
 #include <rte_alarm.h>
-#include <rte_version.h>
 #include <rte_kvargs.h>
 
-/* Globals */
-int qede_logtype_init;
-int qede_logtype_driver;
-
 static const struct qed_eth_ops *qed_ops;
 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
@@ -125,6 +120,8 @@ static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
                offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
        {"rx_mac_filter_discards",
                offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
+       {"rx_gft_filter_drop",
+               offsetof(struct ecore_eth_stats_common, gft_filter_drop)},
        {"rx_hw_buffer_truncates",
                offsetof(struct ecore_eth_stats_common, brb_truncates)},
        {"rx_hw_buffer_discards",
@@ -230,9 +227,60 @@ static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
                offsetof(struct qede_rx_queue, rx_alloc_errors)}
 };
 
+/* Get FW version string based on fw_size */
+static int
+qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
+{
+       struct qede_dev *qdev = dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
+       struct qed_dev_info *info = &qdev->dev_info.common;
+       static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
+       size_t size;
+
+       if (IS_PF(edev))
+               snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+                        QEDE_PMD_FW_VERSION);
+       else
+               snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+                        info->fw_major, info->fw_minor,
+                        info->fw_rev, info->fw_eng);
+       size = strlen(ver_str);
+       if (size + 1 <= fw_size) /* Add 1 byte for "\0" */
+               strlcpy(fw_ver, ver_str, fw_size);
+       else
+               return (size + 1);
+
+       snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
+                " MFW: %d.%d.%d.%d",
+                GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3),
+                GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2),
+                GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1),
+                GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0));
+       size = strlen(ver_str);
+       if (size + 1 <= fw_size)
+               strlcpy(fw_ver, ver_str, fw_size);
+
+       if (fw_size <= 32)
+               goto out;
+
+       snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size),
+                " MBI: %d.%d.%d",
+                GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2),
+                GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1),
+                GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0));
+       size = strlen(ver_str);
+       if (size + 1 <= fw_size)
+               strlcpy(fw_ver, ver_str, fw_size);
+
+out:
+       return 0;
+}
+
 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
 {
+       OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
        ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+       OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
 }
 
 static void
@@ -248,8 +296,8 @@ qede_interrupt_handler_intx(void *param)
        if (status & 0x1) {
                qede_interrupt_action(ECORE_LEADING_HWFN(edev));
 
-               if (rte_intr_enable(eth_dev->intr_handle))
-                       DP_ERR(edev, "rte_intr_enable failed\n");
+               if (rte_intr_ack(eth_dev->intr_handle))
+                       DP_ERR(edev, "rte_intr_ack failed\n");
        }
 }
 
@@ -261,8 +309,50 @@ qede_interrupt_handler(void *param)
        struct ecore_dev *edev = &qdev->edev;
 
        qede_interrupt_action(ECORE_LEADING_HWFN(edev));
-       if (rte_intr_enable(eth_dev->intr_handle))
-               DP_ERR(edev, "rte_intr_enable failed\n");
+       if (rte_intr_ack(eth_dev->intr_handle))
+               DP_ERR(edev, "rte_intr_ack failed\n");
+}
+
+static void
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy)
+{
+       uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+       struct qede_dev *qdev = dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
+       bool use_tx_offload = false;
+
+       if (is_dummy) {
+               dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+               dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+               return;
+       }
+
+       if (ECORE_IS_CMT(edev)) {
+               dev->rx_pkt_burst = qede_recv_pkts_cmt;
+               dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+               return;
+       }
+
+       if (dev->data->lro || dev->data->scattered_rx) {
+               DP_INFO(edev, "Assigning qede_recv_pkts\n");
+               dev->rx_pkt_burst = qede_recv_pkts;
+       } else {
+               DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
+               dev->rx_pkt_burst = qede_recv_pkts_regular;
+       }
+
+       use_tx_offload = !!(tx_offloads &
+                           (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+                            RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */
+                            RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+
+       if (use_tx_offload) {
+               DP_INFO(edev, "Assigning qede_xmit_pkts\n");
+               dev->tx_pkt_burst = qede_xmit_pkts;
+       } else {
+               DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
+               dev->tx_pkt_burst = qede_xmit_pkts_regular;
+       }
 }
 
 static void
@@ -272,38 +362,33 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
        qdev->ops = qed_ops;
 }
 
-static void qede_print_adapter_info(struct qede_dev *qdev)
+static void qede_print_adapter_info(struct rte_eth_dev *dev)
 {
+       struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
-       struct qed_dev_info *info = &qdev->dev_info.common;
-       static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
        static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
 
-       DP_INFO(edev, "*********************************\n");
-       DP_INFO(edev, " DPDK version:%s\n", rte_version());
-       DP_INFO(edev, " Chip details : %s %c%d\n",
+       DP_INFO(edev, "**************************************************\n");
+       DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version());
+       DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details",
                  ECORE_IS_BB(edev) ? "BB" : "AH",
                  'A' + edev->chip_rev,
                  (int)edev->chip_metal);
-       snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
-                info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
-       snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
-                ver_str, QEDE_PMD_VERSION);
-       DP_INFO(edev, " Driver version : %s\n", drv_ver);
-       DP_INFO(edev, " Firmware version : %s\n", ver_str);
-
-       snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
-                "%d.%d.%d.%d",
-               (info->mfw_rev >> 24) & 0xff,
-               (info->mfw_rev >> 16) & 0xff,
-               (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
-       DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
-       DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
-       DP_INFO(edev, "*********************************\n");
+       snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+                QEDE_PMD_DRV_VERSION);
+       DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str);
+       snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+                QEDE_PMD_BASE_VERSION);
+       DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str);
+       qede_fw_version_get(dev, ver_str, sizeof(ver_str));
+       DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str);
+       DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file);
+       DP_INFO(edev, "**************************************************\n");
 }
 
 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
 {
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        unsigned int i = 0, j = 0, qid;
        unsigned int rxq_stat_cntrs, txq_stat_cntrs;
@@ -311,12 +396,12 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
 
        DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
 
-       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
                               RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
                               RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
-       for_each_rss(qid) {
+       for (qid = 0; qid < qdev->num_rx_queues; qid++) {
                OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
                             offsetof(struct qede_rx_queue, rcv_pkts), 0,
                            sizeof(uint64_t));
@@ -342,7 +427,7 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
 
        i = 0;
 
-       for_each_tss(qid) {
+       for (qid = 0; qid < qdev->num_tx_queues; qid++) {
                txq = qdev->fp_array[qid].txq;
 
                OSAL_MEMSET((uint64_t *)(uintptr_t)
@@ -433,7 +518,7 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
        params.update_vport_active_tx_flg = 1;
        params.vport_active_rx_flg = flg;
        params.vport_active_tx_flg = flg;
-       if (~qdev->enable_tx_switching & flg) {
+       if ((qdev->enable_tx_switching == false) && (flg == true)) {
                params.update_tx_switching_flg = 1;
                params.tx_switching_flg = !flg;
        }
@@ -534,17 +619,16 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
                ECORE_ACCEPT_BCAST;
 
        if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
-               flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+               flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED |
+                                          ECORE_ACCEPT_MCAST_UNMATCHED);
                if (IS_VF(edev)) {
-                       flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
-                       DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+                       flags.tx_accept_filter |=
+                                               (ECORE_ACCEPT_UCAST_UNMATCHED |
+                                                ECORE_ACCEPT_MCAST_UNMATCHED);
+                       DP_INFO(edev, "Enabling Tx unmatched flags for VF\n");
                }
        } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
                flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
-       } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
-                               QED_FILTER_RX_MODE_TYPE_PROMISC)) {
-               flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
-                       ECORE_ACCEPT_MCAST_UNMATCHED;
        }
 
        return ecore_filter_accept_cmd(edev, 0, flags, false, false,
@@ -559,13 +643,13 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_ucast_entry *tmp = NULL;
        struct qede_ucast_entry *u;
-       struct ether_addr *mac_addr;
+       struct rte_ether_addr *mac_addr;
 
-       mac_addr  = (struct ether_addr *)ucast->mac;
+       mac_addr  = (struct rte_ether_addr *)ucast->mac;
        if (add) {
                SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
                        if ((memcmp(mac_addr, &tmp->mac,
-                                   ETHER_ADDR_LEN) == 0) &&
+                                   RTE_ETHER_ADDR_LEN) == 0) &&
                             ucast->vni == tmp->vni &&
                             ucast->vlan == tmp->vlan) {
                                DP_INFO(edev, "Unicast MAC is already added"
@@ -580,7 +664,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
                        DP_ERR(edev, "Did not allocate memory for ucast\n");
                        return -ENOMEM;
                }
-               ether_addr_copy(mac_addr, &u->mac);
+               rte_ether_addr_copy(mac_addr, &u->mac);
                u->vlan = ucast->vlan;
                u->vni = ucast->vni;
                SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
@@ -588,7 +672,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
        } else {
                SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
                        if ((memcmp(mac_addr, &tmp->mac,
-                                   ETHER_ADDR_LEN) == 0) &&
+                                   RTE_ETHER_ADDR_LEN) == 0) &&
                            ucast->vlan == tmp->vlan      &&
                            ucast->vni == tmp->vni)
                        break;
@@ -605,8 +689,9 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
 }
 
 static int
-qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
-                      uint32_t mc_addrs_num)
+qede_add_mcast_filters(struct rte_eth_dev *eth_dev,
+               struct rte_ether_addr *mc_addrs,
+               uint32_t mc_addrs_num)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -622,14 +707,14 @@ qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
                        DP_ERR(edev, "Did not allocate memory for mcast\n");
                        return -ENOMEM;
                }
-               ether_addr_copy(&mc_addrs[i], &m->mac);
+               rte_ether_addr_copy(&mc_addrs[i], &m->mac);
                SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
        }
        memset(&mcast, 0, sizeof(mcast));
        mcast.num_mc_addrs = mc_addrs_num;
        mcast.opcode = ECORE_FILTER_ADD;
        for (i = 0; i < mc_addrs_num; i++)
-               ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
+               rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
                                                        &mcast.mac[i]);
        rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
        if (rc != ECORE_SUCCESS) {
@@ -654,7 +739,8 @@ static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
        mcast.opcode = ECORE_FILTER_REMOVE;
        j = 0;
        SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
-               ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
+               rte_ether_addr_copy(&tmp->mac,
+                               (struct rte_ether_addr *)&mcast.mac[j]);
                j++;
        }
        rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
@@ -701,19 +787,19 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
 }
 
 static int
-qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr,
                  __rte_unused uint32_t index, __rte_unused uint32_t pool)
 {
        struct ecore_filter_ucast ucast;
        int re;
 
-       if (!is_valid_assigned_ether_addr(mac_addr))
+       if (!rte_is_valid_assigned_ether_addr(mac_addr))
                return -EINVAL;
 
        qede_set_ucast_cmn_params(&ucast);
        ucast.opcode = ECORE_FILTER_ADD;
        ucast.type = ECORE_FILTER_MAC;
-       ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+       rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
        re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
        return re;
 }
@@ -733,7 +819,7 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
                return;
        }
 
-       if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
+       if (!rte_is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
                return;
 
        qede_set_ucast_cmn_params(&ucast);
@@ -741,14 +827,14 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
        ucast.type = ECORE_FILTER_MAC;
 
        /* Use the index maintained by rte */
-       ether_addr_copy(&eth_dev->data->mac_addrs[index],
-                       (struct ether_addr *)&ucast.mac);
+       rte_ether_addr_copy(&eth_dev->data->mac_addrs[index],
+                       (struct rte_ether_addr *)&ucast.mac);
 
        qede_mac_int_ops(eth_dev, &ucast, false);
 }
 
 static int
-qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -814,6 +900,8 @@ static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
                }
        }
 
+       qdev->vlan_strip_flg = flg;
+
        DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
        return 0;
 }
@@ -914,16 +1002,16 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        (void)qede_vlan_stripping(eth_dev, 1);
                else
                        (void)qede_vlan_stripping(eth_dev, 0);
        }
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
                /* VLAN filtering kicks in when a VLAN is added */
-               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+               if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
                        qede_vlan_filter_set(eth_dev, 0, 1);
                } else {
                        if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -934,16 +1022,13 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
                                 * enabled
                                 */
                                eth_dev->data->dev_conf.rxmode.offloads |=
-                                               DEV_RX_OFFLOAD_VLAN_FILTER;
+                                               RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
                        } else {
                                qede_vlan_filter_set(eth_dev, 0, 0);
                        }
                }
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK)
-               DP_ERR(edev, "Extend VLAN not supported\n");
-
        qdev->vlan_offload_mask = mask;
 
        DP_INFO(edev, "VLAN offload mask %d\n", mask);
@@ -984,12 +1069,12 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
        /* Configure default RETA */
        memset(reta_conf, 0, sizeof(reta_conf));
        for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
-               reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+               reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX;
 
        for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-               id = i / RTE_RETA_GROUP_SIZE;
-               pos = i % RTE_RETA_GROUP_SIZE;
-               q = i % QEDE_RSS_COUNT(qdev);
+               id = i / RTE_ETH_RETA_GROUP_SIZE;
+               pos = i % RTE_ETH_RETA_GROUP_SIZE;
+               q = i % QEDE_RSS_COUNT(eth_dev);
                reta_conf[id].reta[pos] = q;
        }
        if (qede_rss_reta_update(eth_dev, &reta_conf[0],
@@ -1019,18 +1104,20 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE(edev);
 
        /* Update MTU only if it has changed */
-       if (eth_dev->data->mtu != qdev->mtu) {
-               if (qede_update_mtu(eth_dev, qdev->mtu))
+       if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) {
+               if (qede_update_mtu(eth_dev, qdev->new_mtu))
                        goto err;
+               qdev->mtu = qdev->new_mtu;
+               qdev->new_mtu = 0;
        }
 
        /* Configure TPA parameters */
-       if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
                if (qede_enable_tpa(eth_dev, true))
                        return -EINVAL;
                /* Enable scatter mode for LRO */
                if (!eth_dev->data->scattered_rx)
-                       rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
+                       rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER;
        }
 
        /* Start queues */
@@ -1041,11 +1128,11 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
                qede_reset_queue_stats(qdev, true);
 
        /* Newer SR-IOV PF driver expects RX/TX queues to be started before
-        * enabling RSS. Hence RSS configuration is deferred upto this point.
+        * enabling RSS. Hence RSS configuration is deferred up to this point.
         * Also, we would like to retain similar behavior in PF case, so we
         * don't do PF/VF specific check here.
         */
-       if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+       if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
                if (qede_config_rss(eth_dev))
                        goto err;
 
@@ -1053,12 +1140,18 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
        if (qede_activate_vport(eth_dev, true))
                goto err;
 
+       /* Bring-up the link */
+       qede_dev_set_link_state(eth_dev, true);
+
        /* Update link status */
        qede_link_update(eth_dev, 0);
 
        /* Start/resume traffic */
        qede_fastpath_start(edev);
 
+       /* Assign I/O handlers */
+       qede_assign_rxtx_handlers(eth_dev, false);
+
        DP_INFO(edev, "Device started\n");
 
        return 0;
@@ -1067,16 +1160,28 @@ err:
        return -1; /* common error code is < 0 */
 }
 
-static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+static int qede_dev_stop(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
 
        PMD_INIT_FUNC_TRACE(edev);
+       eth_dev->data->dev_started = 0;
+
+       /* Bring the link down */
+       qede_dev_set_link_state(eth_dev, false);
+
+       /* Update link status */
+       qede_link_update(eth_dev, 0);
+
+       /* Replace I/O functions with dummy ones. It cannot
+        * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+        */
+       qede_assign_rxtx_handlers(eth_dev, true);
 
        /* Disable vport */
        if (qede_activate_vport(eth_dev, false))
-               return;
+               return 0;
 
        if (qdev->enable_lro)
                qede_enable_tpa(eth_dev, false);
@@ -1088,6 +1193,8 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
        ecore_hw_stop_fastpath(edev); /* TBD - loop */
 
        DP_INFO(edev, "Device is stopped\n");
+
+       return 0;
 }
 
 static const char * const valid_args[] = {
@@ -1159,25 +1266,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+       uint8_t num_rxqs;
+       uint8_t num_txqs;
        int ret;
 
        PMD_INIT_FUNC_TRACE(edev);
 
-       /* Check requirements for 100G mode */
-       if (ECORE_IS_CMT(edev)) {
-               if (eth_dev->data->nb_rx_queues < 2 ||
-                   eth_dev->data->nb_tx_queues < 2) {
-                       DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
-                       return -EINVAL;
-               }
-
-               if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
-                   (eth_dev->data->nb_tx_queues % 2 != 0)) {
-                       DP_ERR(edev,
-                              "100G mode needs even no. of RX/TX queues\n");
-                       return -EINVAL;
-               }
-       }
+       if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* We need to have min 1 RX queue.There is no min check in
         * rte_eth_dev_configure(), so we are checking it here.
@@ -1195,8 +1291,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
                DP_NOTICE(edev, false,
                          "Invalid devargs supplied, requested change will not take effect\n");
 
-       if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
-             rxmode->mq_mode == ETH_MQ_RX_RSS)) {
+       if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE ||
+             rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) {
                DP_ERR(edev, "Unsupported multi-queue mode\n");
                return -ENOTSUP;
        }
@@ -1204,19 +1300,19 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
        if (qede_check_fdir_support(eth_dev))
                return -ENOTSUP;
 
-       qede_dealloc_fp_resc(eth_dev);
-       qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
-       qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
-       if (qede_alloc_fp_resc(qdev))
-               return -ENOMEM;
-
-       /* If jumbo enabled adjust MTU */
-       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               eth_dev->data->mtu =
-                       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                       ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
+       /* Allocate/reallocate fastpath resources only for new queue config */
+       num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns;
+       num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns;
+       if (qdev->num_tx_queues != num_txqs ||
+           qdev->num_rx_queues != num_rxqs) {
+               qede_dealloc_fp_resc(eth_dev);
+               qdev->num_tx_queues = num_txqs;
+               qdev->num_rx_queues = num_rxqs;
+               if (qede_alloc_fp_resc(qdev))
+                       return -ENOMEM;
+       }
 
-       if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+       if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
                eth_dev->data->scattered_rx = 1;
 
        if (qede_start_vport(qdev, eth_dev->data->mtu))
@@ -1225,13 +1321,18 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
        qdev->mtu = eth_dev->data->mtu;
 
        /* Enable VLAN offloads by default */
-       ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-                                            ETH_VLAN_FILTER_MASK);
+       ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK  |
+                                            RTE_ETH_VLAN_FILTER_MASK);
        if (ret)
                return ret;
 
        DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
-                       QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
+                       QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
+
+       if (ECORE_IS_CMT(edev))
+               DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
+                       qdev->num_rx_queues, qdev->num_tx_queues);
+
 
        return 0;
 }
@@ -1251,7 +1352,7 @@ static const struct rte_eth_desc_lim qede_tx_desc_lim = {
        .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
 };
 
-static void
+static int
 qede_dev_info_get(struct rte_eth_dev *eth_dev,
                  struct rte_eth_dev_info *dev_info)
 {
@@ -1273,6 +1374,10 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        else
                dev_info->max_rx_queues = (uint16_t)RTE_MIN(
                        QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+       /* Since CMT mode internally doubles the number of queues */
+       if (ECORE_IS_CMT(edev))
+               dev_info->max_rx_queues  = dev_info->max_rx_queues / 2;
+
        dev_info->max_tx_queues = dev_info->max_rx_queues;
 
        dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
@@ -1280,34 +1385,34 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
        dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
        dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-       dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM  |
-                                    DEV_RX_OFFLOAD_UDP_CKSUM   |
-                                    DEV_RX_OFFLOAD_TCP_CKSUM   |
-                                    DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                    DEV_RX_OFFLOAD_TCP_LRO     |
-                                    DEV_RX_OFFLOAD_KEEP_CRC    |
-                                    DEV_RX_OFFLOAD_SCATTER     |
-                                    DEV_RX_OFFLOAD_JUMBO_FRAME |
-                                    DEV_RX_OFFLOAD_VLAN_FILTER |
-                                    DEV_RX_OFFLOAD_VLAN_STRIP);
+       dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM      |
+                                    RTE_ETH_RX_OFFLOAD_UDP_CKSUM       |
+                                    RTE_ETH_RX_OFFLOAD_TCP_CKSUM       |
+                                    RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                    RTE_ETH_RX_OFFLOAD_TCP_LRO |
+                                    RTE_ETH_RX_OFFLOAD_KEEP_CRC    |
+                                    RTE_ETH_RX_OFFLOAD_SCATTER |
+                                    RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+                                    RTE_ETH_RX_OFFLOAD_VLAN_STRIP  |
+                                    RTE_ETH_RX_OFFLOAD_RSS_HASH);
        dev_info->rx_queue_offload_capa = 0;
 
        /* TX offloads are on a per-packet basis, so it is applicable
         * to both at port and queue levels.
         */
-       dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
-                                    DEV_TX_OFFLOAD_IPV4_CKSUM  |
-                                    DEV_TX_OFFLOAD_UDP_CKSUM   |
-                                    DEV_TX_OFFLOAD_TCP_CKSUM   |
-                                    DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                    DEV_TX_OFFLOAD_MULTI_SEGS  |
-                                    DEV_TX_OFFLOAD_TCP_TSO     |
-                                    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                    DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+       dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT     |
+                                    RTE_ETH_TX_OFFLOAD_IPV4_CKSUM      |
+                                    RTE_ETH_TX_OFFLOAD_UDP_CKSUM       |
+                                    RTE_ETH_TX_OFFLOAD_TCP_CKSUM       |
+                                    RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+                                    RTE_ETH_TX_OFFLOAD_MULTI_SEGS  |
+                                    RTE_ETH_TX_OFFLOAD_TCP_TSO |
+                                    RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                    RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO);
        dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
-               .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+               .offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS,
        };
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -1319,18 +1424,20 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        memset(&link, 0, sizeof(struct qed_link_output));
        qdev->ops->common->get_link(edev, &link);
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-               speed_cap |= ETH_LINK_SPEED_1G;
+               speed_cap |= RTE_ETH_LINK_SPEED_1G;
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-               speed_cap |= ETH_LINK_SPEED_10G;
+               speed_cap |= RTE_ETH_LINK_SPEED_10G;
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-               speed_cap |= ETH_LINK_SPEED_25G;
+               speed_cap |= RTE_ETH_LINK_SPEED_25G;
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-               speed_cap |= ETH_LINK_SPEED_40G;
+               speed_cap |= RTE_ETH_LINK_SPEED_40G;
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-               speed_cap |= ETH_LINK_SPEED_50G;
+               speed_cap |= RTE_ETH_LINK_SPEED_50G;
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-               speed_cap |= ETH_LINK_SPEED_100G;
+               speed_cap |= RTE_ETH_LINK_SPEED_100G;
        dev_info->speed_capa = speed_cap;
+
+       return 0;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -1354,10 +1461,10 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
        /* Link Mode */
        switch (q_link.duplex) {
        case QEDE_DUPLEX_HALF:
-               link_duplex = ETH_LINK_HALF_DUPLEX;
+               link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
                break;
        case QEDE_DUPLEX_FULL:
-               link_duplex = ETH_LINK_FULL_DUPLEX;
+               link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                break;
        case QEDE_DUPLEX_UNKNOWN:
        default:
@@ -1366,11 +1473,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
        link.link_duplex = link_duplex;
 
        /* Link Status */
-       link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
+       link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
 
        /* AN */
        link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
-                            ETH_LINK_AUTONEG : ETH_LINK_FIXED;
+                            RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
 
        DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
                link.link_speed, link.link_duplex,
@@ -1379,33 +1486,36 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
        return rte_eth_linkstatus_set(eth_dev, &link);
 }
 
-static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
 {
-       struct qede_dev *qdev = eth_dev->data->dev_private;
-       struct ecore_dev *edev = &qdev->edev;
+       enum _ecore_status_t ecore_status;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
 
        PMD_INIT_FUNC_TRACE(edev);
 
-       if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
-               type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
 
-       qed_configure_filter_rx_mode(eth_dev, type);
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
-static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
+       enum _ecore_status_t ecore_status;
 
        PMD_INIT_FUNC_TRACE(edev);
 
        if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
        else
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
 static void qede_poll_sp_sb_cb(void *param)
@@ -1424,27 +1534,32 @@ static void qede_poll_sp_sb_cb(void *param)
        if (rc != 0) {
                DP_ERR(edev, "Unable to start periodic"
                             " timer rc %d\n", rc);
-               assert(false && "Unable to start periodic timer");
        }
 }
 
-static void qede_dev_close(struct rte_eth_dev *eth_dev)
+static int qede_dev_close(struct rte_eth_dev *eth_dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       int ret = 0;
 
        PMD_INIT_FUNC_TRACE(edev);
 
+       /* only close in case of the primary process */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
        /* dev_stop() shall cleanup fp resources in hw but without releasing
         * dma memories and sw structures so that dev_start() can be called
         * by the app without reconfiguration. However, in dev_close() we
         * can release all the resources and device can be brought up newly
         */
        if (eth_dev->data->dev_started)
-               qede_dev_stop(eth_dev);
+               ret = qede_dev_stop(eth_dev);
 
-       qede_stop_vport(edev);
+       if (qdev->vport_started)
+               qede_stop_vport(edev);
        qdev->vport_started = false;
        qede_fdir_dealloc_resc(eth_dev);
        qede_dealloc_fp_resc(eth_dev);
@@ -1452,8 +1567,6 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
        eth_dev->data->nb_rx_queues = 0;
        eth_dev->data->nb_tx_queues = 0;
 
-       /* Bring the link down */
-       qede_dev_set_link_state(eth_dev, false);
        qdev->ops->common->slowpath_stop(edev);
        qdev->ops->common->remove(edev);
        rte_intr_disable(&pci_dev->intr_handle);
@@ -1473,6 +1586,8 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
 
        if (ECORE_IS_CMT(edev))
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
+
+       return ret;
 }
 
 static int
@@ -1481,7 +1596,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        struct ecore_eth_stats stats;
-       unsigned int i = 0, j = 0, qid;
+       unsigned int i = 0, j = 0, qid, idx, hw_fn;
        unsigned int rxq_stat_cntrs, txq_stat_cntrs;
        struct qede_tx_queue *txq;
 
@@ -1517,44 +1632,59 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        eth_stats->oerrors = stats.common.tx_err_drop_pkts;
 
        /* Queue stats */
-       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
                               RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
                               RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
-           (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
+       if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
+           txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
                DP_VERBOSE(edev, ECORE_MSG_DEBUG,
                       "Not all the queue stats will be displayed. Set"
                       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
                       " appropriately and retry.\n");
 
-       for_each_rss(qid) {
-               eth_stats->q_ipackets[i] =
-                       *(uint64_t *)(
-                               ((char *)(qdev->fp_array[qid].rxq)) +
-                               offsetof(struct qede_rx_queue,
-                               rcv_pkts));
-               eth_stats->q_errors[i] =
-                       *(uint64_t *)(
-                               ((char *)(qdev->fp_array[qid].rxq)) +
-                               offsetof(struct qede_rx_queue,
-                               rx_hw_errors)) +
-                       *(uint64_t *)(
-                               ((char *)(qdev->fp_array[qid].rxq)) +
-                               offsetof(struct qede_rx_queue,
-                               rx_alloc_errors));
+       for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
+               eth_stats->q_ipackets[i] = 0;
+               eth_stats->q_errors[i] = 0;
+
+               for_each_hwfn(edev, hw_fn) {
+                       idx = qid * edev->num_hwfns + hw_fn;
+
+                       eth_stats->q_ipackets[i] +=
+                               *(uint64_t *)
+                                       (((char *)(qdev->fp_array[idx].rxq)) +
+                                        offsetof(struct qede_rx_queue,
+                                        rcv_pkts));
+                       eth_stats->q_errors[i] +=
+                               *(uint64_t *)
+                                       (((char *)(qdev->fp_array[idx].rxq)) +
+                                        offsetof(struct qede_rx_queue,
+                                        rx_hw_errors)) +
+                               *(uint64_t *)
+                                       (((char *)(qdev->fp_array[idx].rxq)) +
+                                        offsetof(struct qede_rx_queue,
+                                        rx_alloc_errors));
+               }
+
                i++;
                if (i == rxq_stat_cntrs)
                        break;
        }
 
-       for_each_tss(qid) {
-               txq = qdev->fp_array[qid].txq;
-               eth_stats->q_opackets[j] =
-                       *((uint64_t *)(uintptr_t)
-                               (((uint64_t)(uintptr_t)(txq)) +
-                                offsetof(struct qede_tx_queue,
-                                         xmit_pkts)));
+       for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
+               eth_stats->q_opackets[j] = 0;
+
+               for_each_hwfn(edev, hw_fn) {
+                       idx = qid * edev->num_hwfns + hw_fn;
+
+                       txq = qdev->fp_array[idx].txq;
+                       eth_stats->q_opackets[j] +=
+                               *((uint64_t *)(uintptr_t)
+                                       (((uint64_t)(uintptr_t)(txq)) +
+                                        offsetof(struct qede_tx_queue,
+                                                 xmit_pkts)));
+               }
+
                j++;
                if (j == txq_stat_cntrs)
                        break;
@@ -1565,18 +1695,18 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
 
 static unsigned
 qede_get_xstats_count(struct qede_dev *qdev) {
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
+
        if (ECORE_IS_BB(&qdev->edev))
                return RTE_DIM(qede_xstats_strings) +
                       RTE_DIM(qede_bb_xstats_strings) +
                       (RTE_DIM(qede_rxq_xstats_strings) *
-                       RTE_MIN(QEDE_RSS_COUNT(qdev),
-                               RTE_ETHDEV_QUEUE_STAT_CNTRS));
+                       QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
        else
                return RTE_DIM(qede_xstats_strings) +
                       RTE_DIM(qede_ah_xstats_strings) +
                       (RTE_DIM(qede_rxq_xstats_strings) *
-                       RTE_MIN(QEDE_RSS_COUNT(qdev),
-                               RTE_ETHDEV_QUEUE_STAT_CNTRS));
+                       QEDE_RSS_COUNT(dev));
 }
 
 static int
@@ -1587,42 +1717,43 @@ qede_get_xstats_names(struct rte_eth_dev *dev,
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        const unsigned int stat_cnt = qede_get_xstats_count(qdev);
-       unsigned int i, qid, stat_idx = 0;
-       unsigned int rxq_stat_cntrs;
+       unsigned int i, qid, hw_fn, stat_idx = 0;
+
+       if (xstats_names == NULL)
+               return stat_cnt;
 
-       if (xstats_names != NULL) {
-               for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+       for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+               strlcpy(xstats_names[stat_idx].name,
+                       qede_xstats_strings[i].name,
+                       sizeof(xstats_names[stat_idx].name));
+               stat_idx++;
+       }
+
+       if (ECORE_IS_BB(edev)) {
+               for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
                        strlcpy(xstats_names[stat_idx].name,
-                               qede_xstats_strings[i].name,
+                               qede_bb_xstats_strings[i].name,
                                sizeof(xstats_names[stat_idx].name));
                        stat_idx++;
                }
-
-               if (ECORE_IS_BB(edev)) {
-                       for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
-                               strlcpy(xstats_names[stat_idx].name,
-                                       qede_bb_xstats_strings[i].name,
-                                       sizeof(xstats_names[stat_idx].name));
-                               stat_idx++;
-                       }
-               } else {
-                       for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
-                               strlcpy(xstats_names[stat_idx].name,
-                                       qede_ah_xstats_strings[i].name,
-                                       sizeof(xstats_names[stat_idx].name));
-                               stat_idx++;
-                       }
+       } else {
+               for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+                       strlcpy(xstats_names[stat_idx].name,
+                               qede_ah_xstats_strings[i].name,
+                               sizeof(xstats_names[stat_idx].name));
+                       stat_idx++;
                }
+       }
 
-               rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
-                                        RTE_ETHDEV_QUEUE_STAT_CNTRS);
-               for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+       for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) {
+               for_each_hwfn(edev, hw_fn) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
                                snprintf(xstats_names[stat_idx].name,
-                                       sizeof(xstats_names[stat_idx].name),
-                                       "%.4s%d%s",
-                                       qede_rxq_xstats_strings[i].name, qid,
-                                       qede_rxq_xstats_strings[i].name + 4);
+                                        RTE_ETH_XSTATS_NAME_SIZE,
+                                        "%.4s%d.%d%s",
+                                        qede_rxq_xstats_strings[i].name,
+                                        hw_fn, qid,
+                                        qede_rxq_xstats_strings[i].name + 4);
                                stat_idx++;
                        }
                }
@@ -1639,8 +1770,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        struct ecore_dev *edev = &qdev->edev;
        struct ecore_eth_stats stats;
        const unsigned int num = qede_get_xstats_count(qdev);
-       unsigned int i, qid, stat_idx = 0;
-       unsigned int rxq_stat_cntrs;
+       unsigned int i, qid, hw_fn, fpidx, stat_idx = 0;
 
        if (n < num)
                return num;
@@ -1672,24 +1802,24 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                }
        }
 
-       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
-                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       for (qid = 0; qid < rxq_stat_cntrs; qid++) {
-               for_each_rss(qid) {
+       for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+               for_each_hwfn(edev, hw_fn) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
-                               xstats[stat_idx].value = *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[qid].rxq)) +
+                               fpidx = qid * edev->num_hwfns + hw_fn;
+                               xstats[stat_idx].value = *(uint64_t *)
+                                       (((char *)(qdev->fp_array[fpidx].rxq)) +
                                         qede_rxq_xstats_strings[i].offset);
                                xstats[stat_idx].id = stat_idx;
                                stat_idx++;
                        }
+
                }
        }
 
        return stat_idx;
 }
 
-static void
+static int
 qede_reset_xstats(struct rte_eth_dev *dev)
 {
        struct qede_dev *qdev = dev->data->dev_private;
@@ -1697,6 +1827,8 @@ qede_reset_xstats(struct rte_eth_dev *dev)
 
        ecore_reset_vport_stats(edev);
        qede_reset_queue_stats(qdev, true);
+
+       return 0;
 }
 
 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1726,39 +1858,48 @@ static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
        return qede_dev_set_link_state(eth_dev, false);
 }
 
-static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+static int qede_reset_stats(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
        qede_reset_queue_stats(qdev, false);
+
+       return 0;
 }
 
-static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
 {
        enum qed_filter_rx_mode_type type =
            QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       enum _ecore_status_t ecore_status;
 
        if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
-               type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
+               type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
 
-       qed_configure_filter_rx_mode(eth_dev, type);
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
-static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
 {
+       enum _ecore_status_t ecore_status;
+
        if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_PROMISC);
        else
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
 static int
-qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
-                     uint32_t mc_addrs_num)
+qede_set_mc_addr_list(struct rte_eth_dev *eth_dev,
+               struct rte_ether_addr *mc_addrs,
+               uint32_t mc_addrs_num)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -1771,7 +1912,7 @@ qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
        }
 
        for (i = 0; i < mc_addrs_num; i++) {
-               if (!is_multicast_ether_addr(&mc_addrs[i])) {
+               if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
                        DP_ERR(edev, "Not a valid multicast MAC\n");
                        return -EINVAL;
                }
@@ -1871,12 +2012,12 @@ static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
        }
 
        /* Pause is assumed to be supported (SUPPORTED_Pause) */
-       if (fc_conf->mode == RTE_FC_FULL)
+       if (fc_conf->mode == RTE_ETH_FC_FULL)
                params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE |
                                        QED_LINK_PAUSE_RX_ENABLE);
-       if (fc_conf->mode == RTE_FC_TX_PAUSE)
+       if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE)
                params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
-       if (fc_conf->mode == RTE_FC_RX_PAUSE)
+       if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE)
                params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
 
        params.link_up = true;
@@ -1900,13 +2041,13 @@ static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 
        if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE |
                                         QED_LINK_PAUSE_TX_ENABLE))
-               fc_conf->mode = RTE_FC_FULL;
+               fc_conf->mode = RTE_ETH_FC_FULL;
        else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
-               fc_conf->mode = RTE_FC_RX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
        else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
        else
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
 
        return 0;
 }
@@ -1936,7 +2077,9 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
                RTE_PTYPE_UNKNOWN
        };
 
-       if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+       if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+           eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
+           eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
                return ptypes;
 
        return NULL;
@@ -1945,14 +2088,14 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
 {
        *rss_caps = 0;
-       *rss_caps |= (hf & ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
-       *rss_caps |= (hf & ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
-       *rss_caps |= (hf & ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
-       *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
-       *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
-       *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
-       *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
-       *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_IPV4)              ? ECORE_RSS_IPV4 : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_IPV6)              ? ECORE_RSS_IPV6 : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX)           ? ECORE_RSS_IPV6 : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)  ? ECORE_RSS_IPV4_TCP : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)  ? ECORE_RSS_IPV6_TCP : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX)       ? ECORE_RSS_IPV6_TCP : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)  ? ECORE_RSS_IPV4_UDP : 0;
+       *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)  ? ECORE_RSS_IPV6_UDP : 0;
 }
 
 int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
@@ -1966,8 +2109,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
        uint32_t *key = (uint32_t *)rss_conf->rss_key;
        uint64_t hf = rss_conf->rss_hf;
        uint8_t len = rss_conf->rss_key_len;
-       uint8_t idx;
-       uint8_t i;
+       uint8_t idx, i, j, fpidx;
        int rc;
 
        memset(&vport_update_params, 0, sizeof(vport_update_params));
@@ -1987,8 +2129,10 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
                /* RSS hash key */
                if (key) {
                        if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
-                               DP_ERR(edev, "RSS key length exceeds limit\n");
-                               return -EINVAL;
+                               len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+                               DP_NOTICE(edev, false,
+                                         "RSS key length too big, trimmed to %d\n",
+                                         len);
                        }
                        DP_INFO(edev, "Applying user supplied hash key\n");
                        rss_params.update_rss_key = 1;
@@ -2001,14 +2145,18 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
        /* tbl_size has to be set with capabilities */
        rss_params.rss_table_size_log = 7;
        vport_update_params.vport_id = 0;
-       /* pass the L2 handles instead of qids */
-       for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
-               idx = i % QEDE_RSS_COUNT(qdev);
-               rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
-       }
-       vport_update_params.rss_params = &rss_params;
 
        for_each_hwfn(edev, i) {
+               /* pass the L2 handles instead of qids */
+               for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) {
+                       idx = j % QEDE_RSS_COUNT(eth_dev);
+                       fpidx = idx * edev->num_hwfns + i;
+                       rss_params.rss_ind_table[j] =
+                               qdev->fp_array[fpidx].rxq->handle;
+               }
+
+               vport_update_params.rss_params = &rss_params;
+
                p_hwfn = &edev->hwfns[i];
                vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
                rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
@@ -2060,61 +2208,6 @@ static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
-                                   struct ecore_rss_params *rss)
-{
-       int i, fn;
-       bool rss_mode = 1; /* enable */
-       struct ecore_queue_cid *cid;
-       struct ecore_rss_params *t_rss;
-
-       /* In regular scenario, we'd simply need to take input handlers.
-        * But in CMT, we'd have to split the handlers according to the
-        * engine they were configured on. We'd then have to understand
-        * whether RSS is really required, since 2-queues on CMT doesn't
-        * require RSS.
-        */
-
-       /* CMT should be round-robin */
-       for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-               cid = rss->rss_ind_table[i];
-
-               if (cid->p_owner == ECORE_LEADING_HWFN(edev))
-                       t_rss = &rss[0];
-               else
-                       t_rss = &rss[1];
-
-               t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
-       }
-
-       t_rss = &rss[1];
-       t_rss->update_rss_ind_table = 1;
-       t_rss->rss_table_size_log = 7;
-       t_rss->update_rss_config = 1;
-
-       /* Make sure RSS is actually required */
-       for_each_hwfn(edev, fn) {
-               for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
-                    i++) {
-                       if (rss[fn].rss_ind_table[i] !=
-                           rss[fn].rss_ind_table[0])
-                               break;
-               }
-
-               if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
-                       DP_INFO(edev,
-                               "CMT - 1 queue per-hwfn; Disabling RSS\n");
-                       rss_mode = 0;
-                       goto out;
-               }
-       }
-
-out:
-       t_rss->rss_enable = rss_mode;
-
-       return rss_mode;
-}
-
 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
@@ -2123,52 +2216,48 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct ecore_sp_vport_update_params vport_update_params;
        struct ecore_rss_params *params;
+       uint16_t i, j, idx, fid, shift;
        struct ecore_hwfn *p_hwfn;
-       uint16_t i, idx, shift;
        uint8_t entry;
        int rc = 0;
 
-       if (reta_size > ETH_RSS_RETA_SIZE_128) {
+       if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
                DP_ERR(edev, "reta_size %d is not supported by hardware\n",
                       reta_size);
                return -EINVAL;
        }
 
        memset(&vport_update_params, 0, sizeof(vport_update_params));
-       params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
-                            RTE_CACHE_LINE_SIZE);
+       params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE);
        if (params == NULL) {
                DP_ERR(edev, "failed to allocate memory\n");
                return -ENOMEM;
        }
 
-       for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
-               if (reta_conf[idx].mask & (1ULL << shift)) {
-                       entry = reta_conf[idx].reta[shift];
-                       /* Pass rxq handles to ecore */
-                       params->rss_ind_table[i] =
-                                       qdev->fp_array[entry].rxq->handle;
-                       /* Update the local copy for RETA query command */
-                       qdev->rss_ind_table[i] = entry;
-               }
-       }
-
        params->update_rss_ind_table = 1;
        params->rss_table_size_log = 7;
        params->update_rss_config = 1;
 
-       /* Fix up RETA for CMT mode device */
-       if (ECORE_IS_CMT(edev))
-               qdev->rss_enable = qede_update_rss_parm_cmt(edev,
-                                                           params);
        vport_update_params.vport_id = 0;
        /* Use the current value of rss_enable */
        params->rss_enable = qdev->rss_enable;
        vport_update_params.rss_params = params;
 
        for_each_hwfn(edev, i) {
+               for (j = 0; j < reta_size; j++) {
+                       idx = j / RTE_ETH_RETA_GROUP_SIZE;
+                       shift = j % RTE_ETH_RETA_GROUP_SIZE;
+                       if (reta_conf[idx].mask & (1ULL << shift)) {
+                               entry = reta_conf[idx].reta[shift];
+                               fid = entry * edev->num_hwfns + i;
+                               /* Pass rxq handles to ecore */
+                               params->rss_ind_table[j] =
+                                               qdev->fp_array[fid].rxq->handle;
+                               /* Update the local copy for RETA query cmd */
+                               qdev->rss_ind_table[j] = entry;
+                       }
+               }
+
                p_hwfn = &edev->hwfns[i];
                vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
                rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
@@ -2193,15 +2282,15 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
        uint16_t i, idx, shift;
        uint8_t entry;
 
-       if (reta_size > ETH_RSS_RETA_SIZE_128) {
+       if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) {
                DP_ERR(edev, "reta_size %d is not supported\n",
                       reta_size);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if (reta_conf[idx].mask & (1ULL << shift)) {
                        entry = qdev->rss_ind_table[i];
                        reta_conf[idx].reta[shift] = entry;
@@ -2217,45 +2306,33 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_dev_info dev_info = {0};
        struct qede_fastpath *fp;
-       uint32_t max_rx_pkt_len;
        uint32_t frame_size;
        uint16_t bufsz;
        bool restart = false;
        int i, rc;
 
        PMD_INIT_FUNC_TRACE(edev);
-       qede_dev_info_get(dev, &dev_info);
-       max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
-       frame_size = max_rx_pkt_len;
-       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
-               DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
-                      mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
-                      QEDE_ETH_OVERHEAD);
-               return -EINVAL;
-       }
+
+       frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN;
        if (!dev->data->scattered_rx &&
            frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
                DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
                        dev->data->min_rx_buf_size);
                return -EINVAL;
        }
-       /* Temporarily replace I/O functions with dummy ones. It cannot
-        * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
-        */
-       dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
-       dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
        if (dev->data->dev_started) {
                dev->data->dev_started = 0;
-               qede_dev_stop(dev);
+               rc = qede_dev_stop(dev);
+               if (rc != 0)
+                       return rc;
                restart = true;
        }
        rte_delay_ms(1000);
-       qdev->mtu = mtu;
+       qdev->new_mtu = mtu;
 
        /* Fix up RX buf size for all queues of the port */
-       for_each_rss(i) {
+       for (i = 0; i < qdev->num_rx_queues; i++) {
                fp = &qdev->fp_array[i];
                if (fp->rxq != NULL) {
                        bufsz = (uint16_t)rte_pktmbuf_data_room_size(
@@ -2271,22 +2348,12 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                        fp->rxq->rx_buf_size = rc;
                }
        }
-       if (max_rx_pkt_len > ETHER_MAX_LEN)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        if (!dev->data->dev_started && restart) {
                qede_dev_start(dev);
                dev->data->dev_started = 1;
        }
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
-       /* Reassign back */
-       dev->rx_pkt_burst = qede_recv_pkts;
-       dev->tx_pkt_burst = qede_xmit_pkts;
-
        return 0;
 }
 
@@ -2302,14 +2369,25 @@ qede_dev_reset(struct rte_eth_dev *dev)
        return qede_eth_dev_init(dev);
 }
 
+static void
+qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       qede_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+static void
+qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       qede_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
 static const struct eth_dev_ops qede_eth_dev_ops = {
        .dev_configure = qede_dev_configure,
        .dev_infos_get = qede_dev_info_get,
        .rx_queue_setup = qede_rx_queue_setup,
-       .rx_queue_release = qede_rx_queue_release,
-       .rx_descriptor_status = qede_rx_descriptor_status,
+       .rx_queue_release = qede_dev_rx_queue_release,
        .tx_queue_setup = qede_tx_queue_setup,
-       .tx_queue_release = qede_tx_queue_release,
+       .tx_queue_release = qede_dev_tx_queue_release,
        .dev_start = qede_dev_start,
        .dev_reset = qede_dev_reset,
        .dev_set_link_up = qede_dev_set_link_up,
@@ -2340,19 +2418,20 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
        .reta_update  = qede_rss_reta_update,
        .reta_query  = qede_rss_reta_query,
        .mtu_set = qede_set_mtu,
-       .filter_ctrl = qede_dev_filter_ctrl,
+       .flow_ops_get = qede_dev_flow_ops_get,
        .udp_tunnel_port_add = qede_udp_dst_port_add,
        .udp_tunnel_port_del = qede_udp_dst_port_del,
+       .fw_version_get = qede_fw_version_get,
+       .get_reg = qede_get_regs,
 };
 
 static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .dev_configure = qede_dev_configure,
        .dev_infos_get = qede_dev_info_get,
        .rx_queue_setup = qede_rx_queue_setup,
-       .rx_queue_release = qede_rx_queue_release,
-       .rx_descriptor_status = qede_rx_descriptor_status,
+       .rx_queue_release = qede_dev_rx_queue_release,
        .tx_queue_setup = qede_tx_queue_setup,
-       .tx_queue_release = qede_tx_queue_release,
+       .tx_queue_release = qede_dev_tx_queue_release,
        .dev_start = qede_dev_start,
        .dev_reset = qede_dev_reset,
        .dev_set_link_up = qede_dev_set_link_up,
@@ -2383,6 +2462,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .mac_addr_add = qede_mac_addr_add,
        .mac_addr_remove = qede_mac_addr_remove,
        .mac_addr_set = qede_mac_addr_set,
+       .fw_version_get = qede_fw_version_get,
 };
 
 static void qede_update_pf_params(struct ecore_dev *edev)
@@ -2395,6 +2475,24 @@ static void qede_update_pf_params(struct ecore_dev *edev)
        qed_ops->common->update_pf_params(edev, &pf_params);
 }
 
+static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr)
+{
+       uint64_t random;
+
+       /* Set Organizationally Unique Identifier (OUI) prefix. */
+       mac_addr->addr_bytes[0] = 0x00;
+       mac_addr->addr_bytes[1] = 0x09;
+       mac_addr->addr_bytes[2] = 0xC0;
+
+       /* Force indication of locally assigned MAC address. */
+       mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR;
+
+       /* Generate the last 3 bytes of the MAC address with a random number. */
+       random = rte_rand();
+
+       memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 {
        struct rte_pci_device *pci_dev;
@@ -2405,9 +2503,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        struct qed_slowpath_params params;
        static bool do_once = true;
        uint8_t bulletin_change;
-       uint8_t vf_mac[ETHER_ADDR_LEN];
+       uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
        uint8_t is_mac_forced;
-       bool is_mac_exist;
+       bool is_mac_exist = false;
        /* Fix up ecore debug level */
        uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
        uint8_t dp_level = ECORE_LEVEL_VERBOSE;
@@ -2427,16 +2525,13 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                 pci_addr.bus, pci_addr.devid, pci_addr.function,
                 eth_dev->data->port_id);
 
-       eth_dev->rx_pkt_burst = qede_recv_pkts;
-       eth_dev->tx_pkt_burst = qede_xmit_pkts;
-       eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
-
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
                DP_ERR(edev, "Skipping device init from secondary process\n");
                return 0;
        }
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        /* @DPDK */
        edev->vendor_id = pci_dev->id.vendor_id;
@@ -2445,7 +2540,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
                DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto err;
        }
 
        DP_INFO(edev, "Starting qede probe\n");
@@ -2453,7 +2549,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                                    dp_level, is_vf);
        if (rc != 0) {
                DP_ERR(edev, "qede probe failed rc %d\n", rc);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
        qede_update_pf_params(edev);
 
@@ -2474,7 +2571,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
        if (rte_intr_enable(&pci_dev->intr_handle)) {
                DP_ERR(edev, "rte_intr_enable() failed\n");
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
 
        /* Start the Slowpath-process */
@@ -2488,6 +2586,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
                QEDE_PMD_DRV_VER_STR_SIZE);
 
+       qede_assign_rxtx_handlers(eth_dev, true);
+       eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
+
        /* For CMT mode device do periodic polling for slowpath events.
         * This is required since uio device uses only one MSI-x
         * interrupt vector but we need one for each engine.
@@ -2499,7 +2600,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                if (rc != 0) {
                        DP_ERR(edev, "Unable to start periodic"
                                     " timer rc %d\n", rc);
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto err;
                }
        }
 
@@ -2508,7 +2610,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
                                     (void *)eth_dev);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
 
        rc = qed_ops->fill_dev_info(edev, &dev_info);
@@ -2518,11 +2621,17 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                qed_ops->common->remove(edev);
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
                                     (void *)eth_dev);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
 
        qede_alloc_etherdev(adapter, &dev_info);
 
+       if (do_once) {
+               qede_print_adapter_info(eth_dev);
+               do_once = false;
+       }
+
        adapter->ops->common->set_name(edev, edev->name);
 
        if (!is_vf)
@@ -2535,7 +2644,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
        /* Allocate memory for storing MAC addr */
        eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
-                                       (ETHER_ADDR_LEN *
+                                       (RTE_ETHER_ADDR_LEN *
                                        adapter->dev_info.num_mac_filters),
                                        RTE_CACHE_LINE_SIZE);
 
@@ -2549,10 +2658,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        }
 
        if (!is_vf) {
-               ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+               rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
                                hw_info.hw_mac_addr,
                                &eth_dev->data->mac_addrs[0]);
-               ether_addr_copy(&eth_dev->data->mac_addrs[0],
+               rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
                                &adapter->primary_mac);
        } else {
                ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
@@ -2565,25 +2674,34 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                                                &is_mac_forced);
                        if (is_mac_exist) {
                                DP_INFO(edev, "VF macaddr received from PF\n");
-                               ether_addr_copy((struct ether_addr *)&vf_mac,
-                                               &eth_dev->data->mac_addrs[0]);
-                               ether_addr_copy(&eth_dev->data->mac_addrs[0],
-                                               &adapter->primary_mac);
+                               rte_ether_addr_copy(
+                                       (struct rte_ether_addr *)&vf_mac,
+                                       &eth_dev->data->mac_addrs[0]);
+                               rte_ether_addr_copy(
+                                       &eth_dev->data->mac_addrs[0],
+                                       &adapter->primary_mac);
                        } else {
                                DP_ERR(edev, "No VF macaddr assigned\n");
                        }
                }
-       }
 
-       eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+               /* If MAC doesn't exist from PF, generate random one */
+               if (!is_mac_exist) {
+                       struct rte_ether_addr *mac_addr;
 
-       if (do_once) {
-               qede_print_adapter_info(adapter);
-               do_once = false;
+                       mac_addr = (struct rte_ether_addr *)&vf_mac;
+                       qede_generate_random_mac_addr(mac_addr);
+
+                       rte_ether_addr_copy(mac_addr,
+                                           &eth_dev->data->mac_addrs[0]);
+
+                       rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
+                                           &adapter->primary_mac);
+               }
        }
 
-       /* Bring-up the link */
-       qede_dev_set_link_state(eth_dev, true);
+       eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
+       eth_dev->rx_descriptor_status = qede_rx_descriptor_status;
 
        adapter->num_tx_queues = 0;
        adapter->num_rx_queues = 0;
@@ -2591,7 +2709,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        SLIST_INIT(&adapter->vlan_list_head);
        SLIST_INIT(&adapter->uc_list_head);
        SLIST_INIT(&adapter->mc_list_head);
-       adapter->mtu = ETHER_MTU;
+       adapter->mtu = RTE_ETHER_MTU;
        adapter->vport_started = false;
 
        /* VF tunnel offloads is enabled by default in PF driver */
@@ -2600,33 +2718,36 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        adapter->ipgre.num_filters = 0;
        if (is_vf) {
                adapter->vxlan.enable = true;
-               adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
-                                            ETH_TUNNEL_FILTER_IVLAN;
+               adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+                                            RTE_ETH_TUNNEL_FILTER_IVLAN;
                adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
                adapter->geneve.enable = true;
-               adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
-                                             ETH_TUNNEL_FILTER_IVLAN;
+               adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+                                             RTE_ETH_TUNNEL_FILTER_IVLAN;
                adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
                adapter->ipgre.enable = true;
-               adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
-                                            ETH_TUNNEL_FILTER_IVLAN;
+               adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC |
+                                            RTE_ETH_TUNNEL_FILTER_IVLAN;
        } else {
                adapter->vxlan.enable = false;
                adapter->geneve.enable = false;
                adapter->ipgre.enable = false;
+               qed_ops->sriov_configure(edev, pci_dev->max_vfs);
        }
 
-       DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
-               adapter->primary_mac.addr_bytes[0],
-               adapter->primary_mac.addr_bytes[1],
-               adapter->primary_mac.addr_bytes[2],
-               adapter->primary_mac.addr_bytes[3],
-               adapter->primary_mac.addr_bytes[4],
-               adapter->primary_mac.addr_bytes[5]);
+       DP_INFO(edev, "MAC address : " RTE_ETHER_ADDR_PRT_FMT "\n",
+               RTE_ETHER_ADDR_BYTES(&adapter->primary_mac));
 
        DP_INFO(edev, "Device initialized\n");
 
        return 0;
+
+err:
+       if (do_once) {
+               qede_print_adapter_info(eth_dev);
+               do_once = false;
+       }
+       return rc;
 }
 
 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2643,20 +2764,8 @@ static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
-
        PMD_INIT_FUNC_TRACE(edev);
-
-       /* only uninitialize in the primary process */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return 0;
-
-       /* safe to close dev here */
        qede_dev_close(eth_dev);
-
-       eth_dev->dev_ops = NULL;
-       eth_dev->rx_pkt_burst = NULL;
-       eth_dev->tx_pkt_burst = NULL;
-
        return 0;
 }
 
@@ -2733,8 +2842,7 @@ static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_qedevf_pmd = {
        .id_table = pci_id_qedevf_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
-                    RTE_PCI_DRV_IOVA_AS_VA,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        .probe = qedevf_eth_dev_pci_probe,
        .remove = qedevf_eth_dev_pci_remove,
 };
@@ -2753,8 +2861,7 @@ static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_qede_pmd = {
        .id_table = pci_id_qede_map,
-       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
-                    RTE_PCI_DRV_IOVA_AS_VA,
+       .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        .probe = qede_eth_dev_pci_probe,
        .remove = qede_eth_dev_pci_remove,
 };
@@ -2765,13 +2872,5 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
-
-RTE_INIT(qede_init_log)
-{
-       qede_logtype_init = rte_log_register("pmd.net.qede.init");
-       if (qede_logtype_init >= 0)
-               rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
-       qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
-       if (qede_logtype_driver >= 0)
-               rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER_SUFFIX(qede_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(qede_logtype_driver, driver, NOTICE);