fix various typos found by Lintian
[dpdk.git] / drivers / net / qede / qede_ethdev.c
index 1af0427..1542073 100644 (file)
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include "qede_ethdev.h"
+#include <rte_string_fns.h>
 #include <rte_alarm.h>
 #include <rte_version.h>
+#include <rte_kvargs.h>
 
 /* Globals */
+int qede_logtype_init;
+int qede_logtype_driver;
+
 static const struct qed_eth_ops *qed_ops;
-static int64_t timer_period = 1;
-
-/* VXLAN tunnel classification mapping */
-const struct _qede_vxlan_tunn_types {
-       uint16_t rte_filter_type;
-       enum ecore_filter_ucast_type qede_type;
-       enum ecore_tunn_clss qede_tunn_clss;
-       const char *string;
-} qede_tunn_types[] = {
-       {
-               ETH_TUNNEL_FILTER_OMAC,
-               ECORE_FILTER_MAC,
-               ECORE_TUNN_CLSS_MAC_VLAN,
-               "outer-mac"
-       },
-       {
-               ETH_TUNNEL_FILTER_TENID,
-               ECORE_FILTER_VNI,
-               ECORE_TUNN_CLSS_MAC_VNI,
-               "vni"
-       },
-       {
-               ETH_TUNNEL_FILTER_IMAC,
-               ECORE_FILTER_INNER_MAC,
-               ECORE_TUNN_CLSS_INNER_MAC_VLAN,
-               "inner-mac"
-       },
-       {
-               ETH_TUNNEL_FILTER_IVLAN,
-               ECORE_FILTER_INNER_VLAN,
-               ECORE_TUNN_CLSS_INNER_MAC_VLAN,
-               "inner-vlan"
-       },
-       {
-               ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
-               ECORE_FILTER_MAC_VNI_PAIR,
-               ECORE_TUNN_CLSS_MAC_VNI,
-               "outer-mac and vni"
-       },
-       {
-               ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "outer-mac and inner-mac"
-       },
-       {
-               ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "outer-mac and inner-vlan"
-       },
-       {
-               ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
-               ECORE_FILTER_INNER_MAC_VNI_PAIR,
-               ECORE_TUNN_CLSS_INNER_MAC_VNI,
-               "vni and inner-mac",
-       },
-       {
-               ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "vni and inner-vlan",
-       },
-       {
-               ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
-               ECORE_FILTER_INNER_PAIR,
-               ECORE_TUNN_CLSS_INNER_MAC_VLAN,
-               "inner-mac and inner-vlan",
-       },
-       {
-               ETH_TUNNEL_FILTER_OIP,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "outer-IP"
-       },
-       {
-               ETH_TUNNEL_FILTER_IIP,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "inner-IP"
-       },
-       {
-               RTE_TUNNEL_FILTER_IMAC_IVLAN,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "IMAC_IVLAN"
-       },
-       {
-               RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "IMAC_IVLAN_TENID"
-       },
-       {
-               RTE_TUNNEL_FILTER_IMAC_TENID,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "IMAC_TENID"
-       },
-       {
-               RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
-               ECORE_FILTER_UNUSED,
-               MAX_ECORE_TUNN_CLSS,
-               "OMAC_TENID_IMAC"
-       },
-};
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
+
+#define QEDE_SP_TIMER_PERIOD   10000 /* 100ms */
 
 struct rte_qede_xstats_name_off {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -224,6 +125,8 @@ static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
                offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
        {"rx_mac_filter_discards",
                offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
+       {"rx_gft_filter_drop",
+               offsetof(struct ecore_eth_stats_common, gft_filter_drop)},
        {"rx_hw_buffer_truncates",
                offsetof(struct ecore_eth_stats_common, brb_truncates)},
        {"rx_hw_buffer_discards",
@@ -334,6 +237,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
        ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
 }
 
+static void
+qede_interrupt_handler_intx(void *param)
+{
+       struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+       struct qede_dev *qdev = eth_dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
+       u64 status;
+
+       /* Check if our device actually raised an interrupt */
+       status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
+       if (status & 0x1) {
+               qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+
+               if (rte_intr_ack(eth_dev->intr_handle))
+                       DP_ERR(edev, "rte_intr_ack failed\n");
+       }
+}
+
 static void
 qede_interrupt_handler(void *param)
 {
@@ -342,8 +263,44 @@ qede_interrupt_handler(void *param)
        struct ecore_dev *edev = &qdev->edev;
 
        qede_interrupt_action(ECORE_LEADING_HWFN(edev));
-       if (rte_intr_enable(eth_dev->intr_handle))
-               DP_ERR(edev, "rte_intr_enable failed\n");
+       if (rte_intr_ack(eth_dev->intr_handle))
+               DP_ERR(edev, "rte_intr_ack failed\n");
+}
+
+static void
+qede_assign_rxtx_handlers(struct rte_eth_dev *dev)
+{
+       uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+       struct qede_dev *qdev = dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
+       bool use_tx_offload = false;
+
+       if (ECORE_IS_CMT(edev)) {
+               dev->rx_pkt_burst = qede_recv_pkts_cmt;
+               dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+               return;
+       }
+
+       if (dev->data->lro || dev->data->scattered_rx) {
+               DP_INFO(edev, "Assigning qede_recv_pkts\n");
+               dev->rx_pkt_burst = qede_recv_pkts;
+       } else {
+               DP_INFO(edev, "Assigning qede_recv_pkts_regular\n");
+               dev->rx_pkt_burst = qede_recv_pkts_regular;
+       }
+
+       use_tx_offload = !!(tx_offloads &
+                           (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */
+                            DEV_TX_OFFLOAD_TCP_TSO | /* tso */
+                            DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */
+
+       if (use_tx_offload) {
+               DP_INFO(edev, "Assigning qede_xmit_pkts\n");
+               dev->tx_pkt_burst = qede_xmit_pkts;
+       } else {
+               DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n");
+               dev->tx_pkt_burst = qede_xmit_pkts_regular;
+       }
 }
 
 static void
@@ -353,90 +310,166 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
        qdev->ops = qed_ops;
 }
 
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
 static void qede_print_adapter_info(struct qede_dev *qdev)
 {
        struct ecore_dev *edev = &qdev->edev;
        struct qed_dev_info *info = &qdev->dev_info.common;
-       static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
        static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
 
-       DP_INFO(edev, "*********************************\n");
-       DP_INFO(edev, " DPDK version:%s\n", rte_version());
-       DP_INFO(edev, " Chip details : %s %c%d\n",
+       DP_INFO(edev, "**************************************************\n");
+       DP_INFO(edev, " DPDK version\t\t\t: %s\n", rte_version());
+       DP_INFO(edev, " Chip details\t\t\t: %s %c%d\n",
                  ECORE_IS_BB(edev) ? "BB" : "AH",
                  'A' + edev->chip_rev,
                  (int)edev->chip_metal);
-       snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
-                info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
-       snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
-                ver_str, QEDE_PMD_VERSION);
-       DP_INFO(edev, " Driver version : %s\n", drv_ver);
-       DP_INFO(edev, " Firmware version : %s\n", ver_str);
+       snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+                QEDE_PMD_DRV_VERSION);
+       DP_INFO(edev, " Driver version\t\t\t: %s\n", ver_str);
+
+       snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+                QEDE_PMD_BASE_VERSION);
+       DP_INFO(edev, " Base version\t\t\t: %s\n", ver_str);
+
+       if (!IS_VF(edev))
+               snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s",
+                        QEDE_PMD_FW_VERSION);
+       else
+               snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+                        info->fw_major, info->fw_minor,
+                        info->fw_rev, info->fw_eng);
+       DP_INFO(edev, " Firmware version\t\t\t: %s\n", ver_str);
 
        snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
                 "%d.%d.%d.%d",
-               (info->mfw_rev >> 24) & 0xff,
-               (info->mfw_rev >> 16) & 0xff,
-               (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
-       DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
-       DP_INFO(edev, " Firmware file : %s\n", fw_file);
-       DP_INFO(edev, "*********************************\n");
+                (info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
+                QED_MFW_VERSION_3_OFFSET,
+                (info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
+                QED_MFW_VERSION_2_OFFSET,
+                (info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
+                QED_MFW_VERSION_1_OFFSET,
+                (info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
+                QED_MFW_VERSION_0_OFFSET);
+       DP_INFO(edev, " Management Firmware version\t: %s\n", ver_str);
+       DP_INFO(edev, " Firmware file\t\t\t: %s\n", qede_fw_file);
+       DP_INFO(edev, "**************************************************\n");
+}
+
+static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       unsigned int i = 0, j = 0, qid;
+       unsigned int rxq_stat_cntrs, txq_stat_cntrs;
+       struct qede_tx_queue *txq;
+
+       DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n");
+
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),
+                              RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),
+                              RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+       for (qid = 0; qid < qdev->num_rx_queues; qid++) {
+               OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+                            offsetof(struct qede_rx_queue, rcv_pkts), 0,
+                           sizeof(uint64_t));
+               OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+                            offsetof(struct qede_rx_queue, rx_hw_errors), 0,
+                           sizeof(uint64_t));
+               OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +
+                            offsetof(struct qede_rx_queue, rx_alloc_errors), 0,
+                           sizeof(uint64_t));
+
+               if (xstats)
+                       for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++)
+                               OSAL_MEMSET((((char *)
+                                             (qdev->fp_array[qid].rxq)) +
+                                            qede_rxq_xstats_strings[j].offset),
+                                           0,
+                                           sizeof(uint64_t));
+
+               i++;
+               if (i == rxq_stat_cntrs)
+                       break;
+       }
+
+       i = 0;
+
+       for (qid = 0; qid < qdev->num_tx_queues; qid++) {
+               txq = qdev->fp_array[qid].txq;
+
+               OSAL_MEMSET((uint64_t *)(uintptr_t)
+                               (((uint64_t)(uintptr_t)(txq)) +
+                                offsetof(struct qede_tx_queue, xmit_pkts)), 0,
+                           sizeof(uint64_t));
+
+               i++;
+               if (i == txq_stat_cntrs)
+                       break;
+       }
 }
-#endif
 
 static int
-qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+qede_stop_vport(struct ecore_dev *edev)
 {
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_sp_vport_start_params params;
        struct ecore_hwfn *p_hwfn;
+       uint8_t vport_id;
        int rc;
        int i;
 
-       memset(&params, 0, sizeof(params));
-       params.vport_id = 0;
-       params.mtu = mtu;
-       /* @DPDK - Disable FW placement */
-       params.zero_placement_offset = 1;
+       vport_id = 0;
        for_each_hwfn(edev, i) {
                p_hwfn = &edev->hwfns[i];
-               params.concrete_fid = p_hwfn->hw_info.concrete_fid;
-               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
-               rc = ecore_sp_vport_start(p_hwfn, &params);
+               rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+                                        vport_id);
                if (rc != ECORE_SUCCESS) {
-                       DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+                       DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
                        return rc;
                }
        }
-       ecore_reset_vport_stats(edev);
-       DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+       DP_INFO(edev, "vport stopped\n");
 
        return 0;
 }
 
 static int
-qede_stop_vport(struct ecore_dev *edev)
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
 {
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_start_params params;
        struct ecore_hwfn *p_hwfn;
-       uint8_t vport_id;
        int rc;
        int i;
 
-       vport_id = 0;
+       if (qdev->vport_started)
+               qede_stop_vport(edev);
+
+       memset(&params, 0, sizeof(params));
+       params.vport_id = 0;
+       params.mtu = mtu;
+       /* @DPDK - Disable FW placement */
+       params.zero_placement_offset = 1;
        for_each_hwfn(edev, i) {
                p_hwfn = &edev->hwfns[i];
-               rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
-                                        vport_id);
+               params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_start(p_hwfn, &params);
                if (rc != ECORE_SUCCESS) {
-                       DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+                       DP_ERR(edev, "Start V-PORT failed %d\n", rc);
                        return rc;
                }
        }
+       ecore_reset_vport_stats(edev);
+       qdev->vport_started = true;
+       DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
 
        return 0;
 }
 
+#define QEDE_NPAR_TX_SWITCHING         "npar_tx_switching"
+#define QEDE_VF_TX_SWITCHING           "vf_tx_switching"
+
 /* Activate or deactivate vport via vport-update */
 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
 {
@@ -453,6 +486,10 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
        params.update_vport_active_tx_flg = 1;
        params.vport_active_rx_flg = flg;
        params.vport_active_tx_flg = flg;
+       if ((qdev->enable_tx_switching == false) && (flg == true)) {
+               params.update_tx_switching_flg = 1;
+               params.tx_switching_flg = !flg;
+       }
        for_each_hwfn(edev, i) {
                p_hwfn = &edev->hwfns[i];
                params.opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -463,7 +500,8 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
                        break;
                }
        }
-       DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+       DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
+
        return rc;
 }
 
@@ -474,8 +512,8 @@ qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
        /* Enable LRO in split mode */
        sge_tpa_params->tpa_ipv4_en_flg = enable;
        sge_tpa_params->tpa_ipv6_en_flg = enable;
-       sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
-       sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+       sge_tpa_params->tpa_ipv4_tunn_en_flg = enable;
+       sge_tpa_params->tpa_ipv6_tunn_en_flg = enable;
        /* set if tpa enable changes */
        sge_tpa_params->update_tpa_en_flg = 1;
        /* set if tpa parameters should be handled */
@@ -520,51 +558,14 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
                        return -1;
                }
        }
+       qdev->enable_lro = flg;
+       eth_dev->data->lro = flg;
 
        DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
 
        return 0;
 }
 
-/* Update MTU via vport-update without doing port restart.
- * The vport must be deactivated before calling this API.
- */
-int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_sp_vport_update_params params;
-       struct ecore_hwfn *p_hwfn;
-       int rc;
-       int i;
-
-       memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
-       params.vport_id = 0;
-       params.mtu = mtu;
-       params.vport_id = 0;
-       for_each_hwfn(edev, i) {
-               p_hwfn = &edev->hwfns[i];
-               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
-               rc = ecore_sp_vport_update(p_hwfn, &params,
-                               ECORE_SPQ_MODE_EBLOCK, NULL);
-               if (rc != ECORE_SUCCESS) {
-                       DP_ERR(edev, "Failed to update MTU\n");
-                       return -1;
-               }
-       }
-       DP_INFO(edev, "MTU updated to %u\n", mtu);
-
-       return 0;
-}
-
-static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
-{
-       memset(ucast, 0, sizeof(struct ecore_filter_ucast));
-       ucast->is_rx_filter = true;
-       ucast->is_tx_filter = true;
-       /* ucast->assert_on_error = true; - For debug */
-}
-
 static int
 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
                             enum qed_filter_rx_mode_type type)
@@ -602,18 +603,8 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
        return ecore_filter_accept_cmd(edev, 0, flags, false, false,
                        ECORE_SPQ_MODE_CB, NULL);
 }
-static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
-                                   uint8_t clss, bool mode, bool mask)
-{
-       memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
-       p_tunn->vxlan.b_update_mode = mode;
-       p_tunn->vxlan.b_mode_enabled = mask;
-       p_tunn->b_update_rx_cls = true;
-       p_tunn->b_update_tx_cls = true;
-       p_tunn->vxlan.tun_cls = clss;
-}
 
-static int
+int
 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
                  bool add)
 {
@@ -621,19 +612,19 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_ucast_entry *tmp = NULL;
        struct qede_ucast_entry *u;
-       struct ether_addr *mac_addr;
+       struct rte_ether_addr *mac_addr;
 
-       mac_addr  = (struct ether_addr *)ucast->mac;
+       mac_addr  = (struct rte_ether_addr *)ucast->mac;
        if (add) {
                SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
                        if ((memcmp(mac_addr, &tmp->mac,
-                                   ETHER_ADDR_LEN) == 0) &&
+                                   RTE_ETHER_ADDR_LEN) == 0) &&
                             ucast->vni == tmp->vni &&
                             ucast->vlan == tmp->vlan) {
-                               DP_ERR(edev, "Unicast MAC is already added"
-                                      " with vlan = %u, vni = %u\n",
-                                      ucast->vlan,  ucast->vni);
-                                       return -EEXIST;
+                               DP_INFO(edev, "Unicast MAC is already added"
+                                       " with vlan = %u, vni = %u\n",
+                                       ucast->vlan,  ucast->vni);
+                                       return 0;
                        }
                }
                u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
@@ -642,7 +633,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
                        DP_ERR(edev, "Did not allocate memory for ucast\n");
                        return -ENOMEM;
                }
-               ether_addr_copy(mac_addr, &u->mac);
+               rte_ether_addr_copy(mac_addr, &u->mac);
                u->vlan = ucast->vlan;
                u->vni = ucast->vni;
                SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
@@ -650,7 +641,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
        } else {
                SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
                        if ((memcmp(mac_addr, &tmp->mac,
-                                   ETHER_ADDR_LEN) == 0) &&
+                                   RTE_ETHER_ADDR_LEN) == 0) &&
                            ucast->vlan == tmp->vlan      &&
                            ucast->vni == tmp->vni)
                        break;
@@ -667,123 +658,117 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
 }
 
 static int
-qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
-                 bool add)
+qede_add_mcast_filters(struct rte_eth_dev *eth_dev,
+               struct rte_ether_addr *mc_addrs,
+               uint32_t mc_addrs_num)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ether_addr *mac_addr;
-       struct qede_mcast_entry *tmp = NULL;
-       struct qede_mcast_entry *m;
+       struct ecore_filter_mcast mcast;
+       struct qede_mcast_entry *m = NULL;
+       uint8_t i;
+       int rc;
 
-       mac_addr  = (struct ether_addr *)mcast->mac;
-       if (add) {
-               SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
-                       if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
-                               DP_ERR(edev,
-                                       "Multicast MAC is already added\n");
-                               return -EEXIST;
-                       }
-               }
+       for (i = 0; i < mc_addrs_num; i++) {
                m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
-                       RTE_CACHE_LINE_SIZE);
+                              RTE_CACHE_LINE_SIZE);
                if (!m) {
-                       DP_ERR(edev,
-                               "Did not allocate memory for mcast\n");
+                       DP_ERR(edev, "Did not allocate memory for mcast\n");
                        return -ENOMEM;
                }
-               ether_addr_copy(mac_addr, &m->mac);
+               rte_ether_addr_copy(&mc_addrs[i], &m->mac);
                SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
-               qdev->num_mc_addr++;
-       } else {
-               SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
-                       if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
-                               break;
-               }
-               if (tmp == NULL) {
-                       DP_INFO(edev, "Multicast mac is not found\n");
-                       return -EINVAL;
-               }
-               SLIST_REMOVE(&qdev->mc_list_head, tmp,
-                            qede_mcast_entry, list);
-               qdev->num_mc_addr--;
        }
+       memset(&mcast, 0, sizeof(mcast));
+       mcast.num_mc_addrs = mc_addrs_num;
+       mcast.opcode = ECORE_FILTER_ADD;
+       for (i = 0; i < mc_addrs_num; i++)
+               rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *)
+                                                       &mcast.mac[i]);
+       rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct qede_mcast_entry *tmp = NULL;
+       struct ecore_filter_mcast mcast;
+       int j;
+       int rc;
+
+       memset(&mcast, 0, sizeof(mcast));
+       mcast.num_mc_addrs = qdev->num_mc_addr;
+       mcast.opcode = ECORE_FILTER_REMOVE;
+       j = 0;
+       SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+               rte_ether_addr_copy(&tmp->mac,
+                               (struct rte_ether_addr *)&mcast.mac[j]);
+               j++;
+       }
+       rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(edev, "Failed to delete multicast filter\n");
+               return -1;
+       }
+       /* Init the list */
+       while (!SLIST_EMPTY(&qdev->mc_list_head)) {
+               tmp = SLIST_FIRST(&qdev->mc_list_head);
+               SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
+       }
+       SLIST_INIT(&qdev->mc_list_head);
 
        return 0;
 }
 
-static enum _ecore_status_t
+enum _ecore_status_t
 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
                 bool add)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       enum _ecore_status_t rc;
-       struct ecore_filter_mcast mcast;
-       struct qede_mcast_entry *tmp;
-       uint16_t j = 0;
+       enum _ecore_status_t rc = ECORE_INVAL;
 
-       /* Multicast */
-       if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
-               if (add) {
-                       if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
-                               DP_ERR(edev,
-                                      "Mcast filter table limit exceeded, "
-                                      "Please enable mcast promisc mode\n");
-                               return -ECORE_INVAL;
-                       }
-               }
-               rc = qede_mcast_filter(eth_dev, ucast, add);
-               if (rc == 0) {
-                       DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
-                       memset(&mcast, 0, sizeof(mcast));
-                       mcast.num_mc_addrs = qdev->num_mc_addr;
-                       mcast.opcode = ECORE_FILTER_ADD;
-                       SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
-                               ether_addr_copy(&tmp->mac,
-                                       (struct ether_addr *)&mcast.mac[j]);
-                               j++;
-                       }
-                       rc = ecore_filter_mcast_cmd(edev, &mcast,
-                                                   ECORE_SPQ_MODE_CB, NULL);
-               }
-               if (rc != ECORE_SUCCESS) {
-                       DP_ERR(edev, "Failed to add multicast filter"
-                              " rc = %d, op = %d\n", rc, add);
-               }
-       } else { /* Unicast */
-               if (add) {
-                       if (qdev->num_uc_addr >=
-                           qdev->dev_info.num_mac_filters) {
-                               DP_ERR(edev,
-                                      "Ucast filter table limit exceeded,"
-                                      " Please enable promisc mode\n");
-                               return -ECORE_INVAL;
-                       }
-               }
-               rc = qede_ucast_filter(eth_dev, ucast, add);
-               if (rc == 0)
-                       rc = ecore_filter_ucast_cmd(edev, ucast,
-                                                   ECORE_SPQ_MODE_CB, NULL);
-               if (rc != ECORE_SUCCESS) {
-                       DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
-                              rc, add);
-               }
+       if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
+               DP_ERR(edev, "Ucast filter table limit exceeded,"
+                             " Please enable promisc mode\n");
+                       return ECORE_INVAL;
        }
 
+       rc = qede_ucast_filter(eth_dev, ucast, add);
+       if (rc == 0)
+               rc = ecore_filter_ucast_cmd(edev, ucast,
+                                           ECORE_SPQ_MODE_CB, NULL);
+       /* Indicate error only for add filter operation.
+        * Delete filter operations are not severe.
+        */
+       if ((rc != ECORE_SUCCESS) && add)
+               DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+                      rc, add);
+
        return rc;
 }
 
 static int
-qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr,
                  __rte_unused uint32_t index, __rte_unused uint32_t pool)
 {
        struct ecore_filter_ucast ucast;
        int re;
 
+       if (!rte_is_valid_assigned_ether_addr(mac_addr))
+               return -EINVAL;
+
        qede_set_ucast_cmn_params(&ucast);
+       ucast.opcode = ECORE_FILTER_ADD;
        ucast.type = ECORE_FILTER_MAC;
-       ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+       rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac);
        re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
        return re;
 }
@@ -803,19 +788,22 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
                return;
        }
 
+       if (!rte_is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
+               return;
+
        qede_set_ucast_cmn_params(&ucast);
        ucast.opcode = ECORE_FILTER_REMOVE;
        ucast.type = ECORE_FILTER_MAC;
 
        /* Use the index maintained by rte */
-       ether_addr_copy(&eth_dev->data->mac_addrs[index],
-                       (struct ether_addr *)&ucast.mac);
+       rte_ether_addr_copy(&eth_dev->data->mac_addrs[index],
+                       (struct rte_ether_addr *)&ucast.mac);
 
-       ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+       qede_mac_int_ops(eth_dev, &ucast, false);
 }
 
-static void
-qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
+static int
+qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -823,15 +811,15 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
        if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
                                               mac_addr->addr_bytes)) {
                DP_ERR(edev, "Setting MAC address is not allowed\n");
-               ether_addr_copy(&qdev->primary_mac,
-                               &eth_dev->data->mac_addrs[0]);
-               return;
+               return -EPERM;
        }
 
-       qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
+       qede_mac_addr_remove(eth_dev, 0);
+
+       return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
 }
 
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
 {
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct ecore_sp_vport_update_params params;
@@ -881,6 +869,8 @@ static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
                }
        }
 
+       qdev->vlan_strip_flg = flg;
+
        DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
        return 0;
 }
@@ -906,9 +896,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
 
                SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
                        if (tmp->vid == vlan_id) {
-                               DP_ERR(edev, "VLAN %u already configured\n",
-                                      vlan_id);
-                               return -EEXIST;
+                               DP_INFO(edev, "VLAN %u already configured\n",
+                                       vlan_id);
+                               return 0;
                        }
                }
 
@@ -975,14 +965,14 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
        return rc;
 }
 
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+       uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
 
        if (mask & ETH_VLAN_STRIP_MASK) {
-               if (rxmode->hw_vlan_strip)
+               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        (void)qede_vlan_stripping(eth_dev, 1);
                else
                        (void)qede_vlan_stripping(eth_dev, 0);
@@ -990,7 +980,7 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
        if (mask & ETH_VLAN_FILTER_MASK) {
                /* VLAN filtering kicks in when a VLAN is added */
-               if (rxmode->hw_vlan_filter) {
+               if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
                        qede_vlan_filter_set(eth_dev, 0, 1);
                } else {
                        if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1000,7 +990,8 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
                                /* Signal app that VLAN filtering is still
                                 * enabled
                                 */
-                               rxmode->hw_vlan_filter = true;
+                               eth_dev->data->dev_conf.rxmode.offloads |=
+                                               DEV_RX_OFFLOAD_VLAN_FILTER;
                        } else {
                                qede_vlan_filter_set(eth_dev, 0, 0);
                        }
@@ -1008,11 +999,13 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
        }
 
        if (mask & ETH_VLAN_EXTEND_MASK)
-               DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
-                       " and classification is based on outer tag only\n");
+               DP_ERR(edev, "Extend VLAN not supported\n");
 
-       DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
-               mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+       qdev->vlan_offload_mask = mask;
+
+       DP_INFO(edev, "VLAN offload mask %d\n", mask);
+
+       return 0;
 }
 
 static void qede_prandom_bytes(uint32_t *buff)
@@ -1027,9 +1020,7 @@ static void qede_prandom_bytes(uint32_t *buff)
 int qede_config_rss(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-#endif
        uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
        struct rte_eth_rss_reta_entry64 reta_conf[2];
        struct rte_eth_rss_conf rss_conf;
@@ -1055,7 +1046,7 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
        for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
                id = i / RTE_RETA_GROUP_SIZE;
                pos = i % RTE_RETA_GROUP_SIZE;
-               q = i % QEDE_RSS_COUNT(qdev);
+               q = i % QEDE_RSS_COUNT(eth_dev);
                reta_conf[id].reta[pos] = q;
        }
        if (qede_rss_reta_update(eth_dev, &reta_conf[0],
@@ -1080,26 +1071,36 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
 
        PMD_INIT_FUNC_TRACE(edev);
 
        /* Update MTU only if it has changed */
-       if (qdev->mtu != qdev->new_mtu) {
+       if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) {
                if (qede_update_mtu(eth_dev, qdev->new_mtu))
                        goto err;
                qdev->mtu = qdev->new_mtu;
-               /* If MTU has changed then update TPA too */
-               if (qdev->enable_lro)
-                       if (qede_enable_tpa(eth_dev, true))
-                               goto err;
+               qdev->new_mtu = 0;
+       }
+
+       /* Configure TPA parameters */
+       if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+               if (qede_enable_tpa(eth_dev, true))
+                       return -EINVAL;
+               /* Enable scatter mode for LRO */
+               if (!eth_dev->data->scattered_rx)
+                       rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
        }
 
        /* Start queues */
        if (qede_start_queues(eth_dev))
                goto err;
 
+       if (IS_PF(edev))
+               qede_reset_queue_stats(qdev, true);
+
        /* Newer SR-IOV PF driver expects RX/TX queues to be started before
-        * enabling RSS. Hence RSS configuration is deferred upto this point.
+        * enabling RSS. Hence RSS configuration is deferred up to this point.
         * Also, we would like to retain similar behavior in PF case, so we
         * don't do PF/VF specific check here.
         */
@@ -1111,12 +1112,13 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
        if (qede_activate_vport(eth_dev, true))
                goto err;
 
-       /* Bring-up the link */
-       qede_dev_set_link_state(eth_dev, true);
+       /* Update link status */
+       qede_link_update(eth_dev, 0);
 
        /* Start/resume traffic */
        qede_fastpath_start(edev);
 
+       qede_assign_rxtx_handlers(eth_dev);
        DP_INFO(edev, "Device started\n");
 
        return 0;
@@ -1139,57 +1141,109 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
        if (qdev->enable_lro)
                qede_enable_tpa(eth_dev, false);
 
-       /* TODO: Do we need disable LRO or RSS */
        /* Stop queues */
        qede_stop_queues(eth_dev);
 
        /* Disable traffic */
        ecore_hw_stop_fastpath(edev); /* TBD - loop */
 
-       /* Bring the link down */
-       qede_dev_set_link_state(eth_dev, false);
-
        DP_INFO(edev, "Device is stopped\n");
 }
 
-static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+static const char * const valid_args[] = {
+       QEDE_NPAR_TX_SWITCHING,
+       QEDE_VF_TX_SWITCHING,
+       NULL,
+};
+
+static int qede_args_check(const char *key, const char *val, void *opaque)
 {
+       unsigned long tmp;
+       int ret = 0;
+       struct rte_eth_dev *eth_dev = opaque;
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
 
-       PMD_INIT_FUNC_TRACE(edev);
+       errno = 0;
+       tmp = strtoul(val, NULL, 0);
+       if (errno) {
+               DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val);
+               return errno;
+       }
 
-       /* Check requirements for 100G mode */
-       if (edev->num_hwfns > 1) {
-               if (eth_dev->data->nb_rx_queues < 2 ||
-                               eth_dev->data->nb_tx_queues < 2) {
-                       DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
-                       return -EINVAL;
-               }
+       if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
+           ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
+               qdev->enable_tx_switching = !!tmp;
+               DP_INFO(edev, "Disabling %s tx-switching\n",
+                       strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
+                       "VF" : "NPAR");
+       }
 
-               if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
-                               (eth_dev->data->nb_tx_queues % 2 != 0)) {
-                       DP_ERR(edev,
-                                       "100G mode needs even no. of RX/TX queues\n");
-                       return -EINVAL;
+       return ret;
+}
+
+static int qede_args(struct rte_eth_dev *eth_dev)
+{
+       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+       struct rte_kvargs *kvlist;
+       struct rte_devargs *devargs;
+       int ret;
+       int i;
+
+       devargs = pci_dev->device.devargs;
+       if (!devargs)
+               return 0; /* return success */
+
+       kvlist = rte_kvargs_parse(devargs->args, valid_args);
+       if (kvlist == NULL)
+               return -EINVAL;
+
+        /* Process parameters. */
+       for (i = 0; (valid_args[i] != NULL); ++i) {
+               if (rte_kvargs_count(kvlist, valid_args[i])) {
+                       ret = rte_kvargs_process(kvlist, valid_args[i],
+                                                qede_args_check, eth_dev);
+                       if (ret != ECORE_SUCCESS) {
+                               rte_kvargs_free(kvlist);
+                               return ret;
+                       }
                }
        }
+       rte_kvargs_free(kvlist);
 
-       /* Sanity checks and throw warnings */
-       if (rxmode->enable_scatter)
-               eth_dev->data->scattered_rx = 1;
+       return 0;
+}
 
-       if (!rxmode->hw_strip_crc)
-               DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
+static int qede_dev_configure(struct rte_eth_dev *eth_dev)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+       int ret;
 
-       if (!rxmode->hw_ip_checksum)
-               DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
-                               "in hw\n");
-       if (rxmode->header_split)
-               DP_INFO(edev, "Header split enable is not supported\n");
-       if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
-                               ETH_MQ_RX_RSS)) {
+       PMD_INIT_FUNC_TRACE(edev);
+
+       if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)
+               rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
+       /* We need to have min 1 RX queue.There is no min check in
+        * rte_eth_dev_configure(), so we are checking it here.
+        */
+       if (eth_dev->data->nb_rx_queues == 0) {
+               DP_ERR(edev, "Minimum one RX queue is required\n");
+               return -EINVAL;
+       }
+
+       /* Enable Tx switching by default */
+       qdev->enable_tx_switching = 1;
+
+       /* Parse devargs and fix up rxmode */
+       if (qede_args(eth_dev))
+               DP_NOTICE(edev, false,
+                         "Invalid devargs supplied, requested change will not take effect\n");
+
+       if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
+             rxmode->mq_mode == ETH_MQ_RX_RSS)) {
                DP_ERR(edev, "Unsupported multi-queue mode\n");
                return -ENOTSUP;
        }
@@ -1197,52 +1251,40 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
        if (qede_check_fdir_support(eth_dev))
                return -ENOTSUP;
 
-       /* Deallocate resources if held previously. It is needed only if the
-        * queue count has been changed from previous configuration. If its
-        * going to change then it means RX/TX queue setup will be called
-        * again and the fastpath pointers will be reinitialized there.
-        */
-       if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
-           qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
-               qede_dealloc_fp_resc(eth_dev);
-               /* Proceed with updated queue count */
-               qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
-               qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
-               if (qede_alloc_fp_resc(qdev))
-                       return -ENOMEM;
-       }
+       qede_dealloc_fp_resc(eth_dev);
+       qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;
+       qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;
 
-       /* VF's MTU has to be set using vport-start where as
-        * PF's MTU can be updated via vport-update.
-        */
-       if (IS_VF(edev)) {
-               if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
-                       return -1;
-       } else {
-               if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
-                       return -1;
-       }
+       if (qede_alloc_fp_resc(qdev))
+               return -ENOMEM;
 
-       qdev->mtu = rxmode->max_rx_pkt_len;
-       qdev->new_mtu = qdev->mtu;
+       /* If jumbo enabled adjust MTU */
+       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+               eth_dev->data->mtu =
+                       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+                       RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
 
-       /* Configure TPA parameters */
-       if (rxmode->enable_lro) {
-               if (qede_enable_tpa(eth_dev, true))
-                       return -EINVAL;
-               /* Enable scatter mode for LRO */
-               if (!rxmode->enable_scatter)
-                       eth_dev->data->scattered_rx = 1;
-       }
-       qdev->enable_lro = rxmode->enable_lro;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+               eth_dev->data->scattered_rx = 1;
+
+       if (qede_start_vport(qdev, eth_dev->data->mtu))
+               return -1;
+
+       qdev->mtu = eth_dev->data->mtu;
 
        /* Enable VLAN offloads by default */
-       qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-                       ETH_VLAN_FILTER_MASK |
-                       ETH_VLAN_EXTEND_MASK);
+       ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
+                                            ETH_VLAN_FILTER_MASK);
+       if (ret)
+               return ret;
 
        DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
-                       QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
+                       QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));
+
+       if (ECORE_IS_CMT(edev))
+               DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n",
+                       qdev->num_rx_queues, qdev->num_tx_queues);
+
 
        return 0;
 }
@@ -1262,7 +1304,7 @@ static const struct rte_eth_desc_lim qede_tx_desc_lim = {
        .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
 };
 
-static void
+static int
 qede_dev_info_get(struct rte_eth_dev *eth_dev,
                  struct rte_eth_dev_info *dev_info)
 {
@@ -1273,7 +1315,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 
        PMD_INIT_FUNC_TRACE(edev);
 
-       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
        dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
        dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1285,6 +1326,10 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        else
                dev_info->max_rx_queues = (uint16_t)RTE_MIN(
                        QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+       /* Since CMT mode internally doubles the number of queues */
+       if (ECORE_IS_CMT(edev))
+               dev_info->max_rx_queues  = dev_info->max_rx_queues / 2;
+
        dev_info->max_tx_queues = dev_info->max_rx_queues;
 
        dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
@@ -1292,25 +1337,42 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
        dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
        dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-
-       dev_info->default_txconf = (struct rte_eth_txconf) {
-               .txq_flags = QEDE_TXQ_FLAGS,
-       };
-
-       dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP  |
-                                    DEV_RX_OFFLOAD_IPV4_CKSUM  |
+       dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM  |
                                     DEV_RX_OFFLOAD_UDP_CKSUM   |
                                     DEV_RX_OFFLOAD_TCP_CKSUM   |
                                     DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                    DEV_RX_OFFLOAD_TCP_LRO);
-
+                                    DEV_RX_OFFLOAD_TCP_LRO     |
+                                    DEV_RX_OFFLOAD_KEEP_CRC    |
+                                    DEV_RX_OFFLOAD_SCATTER     |
+                                    DEV_RX_OFFLOAD_JUMBO_FRAME |
+                                    DEV_RX_OFFLOAD_VLAN_FILTER |
+                                    DEV_RX_OFFLOAD_VLAN_STRIP  |
+                                    DEV_RX_OFFLOAD_RSS_HASH);
+       dev_info->rx_queue_offload_capa = 0;
+
+       /* TX offloads are on a per-packet basis, so it is applicable
+        * to both at port and queue levels.
+        */
        dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
                                     DEV_TX_OFFLOAD_IPV4_CKSUM  |
                                     DEV_TX_OFFLOAD_UDP_CKSUM   |
                                     DEV_TX_OFFLOAD_TCP_CKSUM   |
                                     DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                    DEV_TX_OFFLOAD_TCP_TSO |
-                                    DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
+                                    DEV_TX_OFFLOAD_MULTI_SEGS  |
+                                    DEV_TX_OFFLOAD_TCP_TSO     |
+                                    DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                    DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+       dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+       };
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               /* Packets are always dropped if no descriptors are available */
+               .rx_drop_en = 1,
+               .offloads = 0,
+       };
 
        memset(&link, 0, sizeof(struct qed_link_output));
        qdev->ops->common->get_link(edev, &link);
@@ -1327,26 +1389,30 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
        if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
                speed_cap |= ETH_LINK_SPEED_100G;
        dev_info->speed_capa = speed_cap;
+
+       return 0;
 }
 
 /* return 0 means link status changed, -1 means not changed */
-static int
+int
 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
+       struct qed_link_output q_link;
+       struct rte_eth_link link;
        uint16_t link_duplex;
-       struct qed_link_output link;
-       struct rte_eth_link *curr = &eth_dev->data->dev_link;
 
-       memset(&link, 0, sizeof(struct qed_link_output));
-       qdev->ops->common->get_link(edev, &link);
+       memset(&q_link, 0, sizeof(q_link));
+       memset(&link, 0, sizeof(link));
+
+       qdev->ops->common->get_link(edev, &q_link);
 
        /* Link Speed */
-       curr->link_speed = link.speed;
+       link.link_speed = q_link.speed;
 
        /* Link Mode */
-       switch (link.duplex) {
+       switch (q_link.duplex) {
        case QEDE_DUPLEX_HALF:
                link_duplex = ETH_LINK_HALF_DUPLEX;
                break;
@@ -1357,55 +1423,55 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
        default:
                link_duplex = -1;
        }
-       curr->link_duplex = link_duplex;
+       link.link_duplex = link_duplex;
 
        /* Link Status */
-       curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+       link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
 
        /* AN */
-       curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+       link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
                             ETH_LINK_AUTONEG : ETH_LINK_FIXED;
 
        DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
-               curr->link_speed, curr->link_duplex,
-               curr->link_autoneg, curr->link_status);
+               link.link_speed, link.link_duplex,
+               link.link_autoneg, link.link_status);
 
-       /* return 0 means link status changed, -1 means not changed */
-       return ((curr->link_status == link.link_up) ? -1 : 0);
+       return rte_eth_linkstatus_set(eth_dev, &link);
 }
 
-static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
 {
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
+       enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       enum _ecore_status_t ecore_status;
 
        PMD_INIT_FUNC_TRACE(edev);
-#endif
-
-       enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
 
        if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
                type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
 
-       qed_configure_filter_rx_mode(eth_dev, type);
+       ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
+
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
-static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
+static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
 {
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
+       enum _ecore_status_t ecore_status;
 
        PMD_INIT_FUNC_TRACE(edev);
-#endif
 
        if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
        else
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
 static void qede_poll_sp_sb_cb(void *param)
@@ -1418,13 +1484,12 @@ static void qede_poll_sp_sb_cb(void *param)
        qede_interrupt_action(ECORE_LEADING_HWFN(edev));
        qede_interrupt_action(&edev->hwfns[1]);
 
-       rc = rte_eal_alarm_set(timer_period * US_PER_S,
+       rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
                               qede_poll_sp_sb_cb,
                               (void *)eth_dev);
        if (rc != 0) {
                DP_ERR(edev, "Unable to start periodic"
                             " timer rc %d\n", rc);
-               assert(false && "Unable to start periodic timer");
        }
 }
 
@@ -1444,29 +1509,45 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
        if (eth_dev->data->dev_started)
                qede_dev_stop(eth_dev);
 
-       qede_stop_vport(edev);
+       if (qdev->vport_started)
+               qede_stop_vport(edev);
+       qdev->vport_started = false;
        qede_fdir_dealloc_resc(eth_dev);
        qede_dealloc_fp_resc(eth_dev);
 
        eth_dev->data->nb_rx_queues = 0;
        eth_dev->data->nb_tx_queues = 0;
 
+       /* Bring the link down */
+       qede_dev_set_link_state(eth_dev, false);
        qdev->ops->common->slowpath_stop(edev);
        qdev->ops->common->remove(edev);
        rte_intr_disable(&pci_dev->intr_handle);
-       rte_intr_callback_unregister(&pci_dev->intr_handle,
-                                    qede_interrupt_handler, (void *)eth_dev);
-       if (edev->num_hwfns > 1)
+
+       switch (pci_dev->intr_handle.type) {
+       case RTE_INTR_HANDLE_UIO_INTX:
+       case RTE_INTR_HANDLE_VFIO_LEGACY:
+               rte_intr_callback_unregister(&pci_dev->intr_handle,
+                                            qede_interrupt_handler_intx,
+                                            (void *)eth_dev);
+               break;
+       default:
+               rte_intr_callback_unregister(&pci_dev->intr_handle,
+                                          qede_interrupt_handler,
+                                          (void *)eth_dev);
+       }
+
+       if (ECORE_IS_CMT(edev))
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
 }
 
-static void
+static int
 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        struct ecore_eth_stats stats;
-       unsigned int i = 0, j = 0, qid;
+       unsigned int i = 0, j = 0, qid, idx, hw_fn;
        unsigned int rxq_stat_cntrs, txq_stat_cntrs;
        struct qede_tx_queue *txq;
 
@@ -1502,64 +1583,81 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        eth_stats->oerrors = stats.common.tx_err_drop_pkts;
 
        /* Queue stats */
-       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),
                               RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+       txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),
                               RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
-           (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
+       if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||
+           txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))
                DP_VERBOSE(edev, ECORE_MSG_DEBUG,
                       "Not all the queue stats will be displayed. Set"
                       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
                       " appropriately and retry.\n");
 
-       for_each_rss(qid) {
-               eth_stats->q_ipackets[i] =
-                       *(uint64_t *)(
-                               ((char *)(qdev->fp_array[qid].rxq)) +
-                               offsetof(struct qede_rx_queue,
-                               rcv_pkts));
-               eth_stats->q_errors[i] =
-                       *(uint64_t *)(
-                               ((char *)(qdev->fp_array[qid].rxq)) +
-                               offsetof(struct qede_rx_queue,
-                               rx_hw_errors)) +
-                       *(uint64_t *)(
-                               ((char *)(qdev->fp_array[qid].rxq)) +
-                               offsetof(struct qede_rx_queue,
-                               rx_alloc_errors));
+       for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {
+               eth_stats->q_ipackets[i] = 0;
+               eth_stats->q_errors[i] = 0;
+
+               for_each_hwfn(edev, hw_fn) {
+                       idx = qid * edev->num_hwfns + hw_fn;
+
+                       eth_stats->q_ipackets[i] +=
+                               *(uint64_t *)
+                                       (((char *)(qdev->fp_array[idx].rxq)) +
+                                        offsetof(struct qede_rx_queue,
+                                        rcv_pkts));
+                       eth_stats->q_errors[i] +=
+                               *(uint64_t *)
+                                       (((char *)(qdev->fp_array[idx].rxq)) +
+                                        offsetof(struct qede_rx_queue,
+                                        rx_hw_errors)) +
+                               *(uint64_t *)
+                                       (((char *)(qdev->fp_array[idx].rxq)) +
+                                        offsetof(struct qede_rx_queue,
+                                        rx_alloc_errors));
+               }
+
                i++;
                if (i == rxq_stat_cntrs)
                        break;
        }
 
-       for_each_tss(qid) {
-               txq = qdev->fp_array[qid].txq;
-               eth_stats->q_opackets[j] =
-                       *((uint64_t *)(uintptr_t)
-                               (((uint64_t)(uintptr_t)(txq)) +
-                                offsetof(struct qede_tx_queue,
-                                         xmit_pkts)));
+       for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {
+               eth_stats->q_opackets[j] = 0;
+
+               for_each_hwfn(edev, hw_fn) {
+                       idx = qid * edev->num_hwfns + hw_fn;
+
+                       txq = qdev->fp_array[idx].txq;
+                       eth_stats->q_opackets[j] +=
+                               *((uint64_t *)(uintptr_t)
+                                       (((uint64_t)(uintptr_t)(txq)) +
+                                        offsetof(struct qede_tx_queue,
+                                                 xmit_pkts)));
+               }
+
                j++;
                if (j == txq_stat_cntrs)
                        break;
        }
+
+       return 0;
 }
 
 static unsigned
 qede_get_xstats_count(struct qede_dev *qdev) {
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
+
        if (ECORE_IS_BB(&qdev->edev))
                return RTE_DIM(qede_xstats_strings) +
                       RTE_DIM(qede_bb_xstats_strings) +
                       (RTE_DIM(qede_rxq_xstats_strings) *
-                       RTE_MIN(QEDE_RSS_COUNT(qdev),
-                               RTE_ETHDEV_QUEUE_STAT_CNTRS));
+                       QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);
        else
                return RTE_DIM(qede_xstats_strings) +
                       RTE_DIM(qede_ah_xstats_strings) +
                       (RTE_DIM(qede_rxq_xstats_strings) *
-                       RTE_MIN(QEDE_RSS_COUNT(qdev),
-                               RTE_ETHDEV_QUEUE_STAT_CNTRS));
+                       QEDE_RSS_COUNT(dev));
 }
 
 static int
@@ -1570,45 +1668,43 @@ qede_get_xstats_names(struct rte_eth_dev *dev,
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        const unsigned int stat_cnt = qede_get_xstats_count(qdev);
-       unsigned int i, qid, stat_idx = 0;
-       unsigned int rxq_stat_cntrs;
-
-       if (xstats_names != NULL) {
-               for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
-                       snprintf(xstats_names[stat_idx].name,
-                               sizeof(xstats_names[stat_idx].name),
-                               "%s",
-                               qede_xstats_strings[i].name);
+       unsigned int i, qid, hw_fn, stat_idx = 0;
+
+       if (xstats_names == NULL)
+               return stat_cnt;
+
+       for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+               strlcpy(xstats_names[stat_idx].name,
+                       qede_xstats_strings[i].name,
+                       sizeof(xstats_names[stat_idx].name));
+               stat_idx++;
+       }
+
+       if (ECORE_IS_BB(edev)) {
+               for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+                       strlcpy(xstats_names[stat_idx].name,
+                               qede_bb_xstats_strings[i].name,
+                               sizeof(xstats_names[stat_idx].name));
                        stat_idx++;
                }
-
-               if (ECORE_IS_BB(edev)) {
-                       for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
-                               snprintf(xstats_names[stat_idx].name,
-                                       sizeof(xstats_names[stat_idx].name),
-                                       "%s",
-                                       qede_bb_xstats_strings[i].name);
-                               stat_idx++;
-                       }
-               } else {
-                       for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
-                               snprintf(xstats_names[stat_idx].name,
-                                       sizeof(xstats_names[stat_idx].name),
-                                       "%s",
-                                       qede_ah_xstats_strings[i].name);
-                               stat_idx++;
-                       }
+       } else {
+               for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+                       strlcpy(xstats_names[stat_idx].name,
+                               qede_ah_xstats_strings[i].name,
+                               sizeof(xstats_names[stat_idx].name));
+                       stat_idx++;
                }
+       }
 
-               rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
-                                        RTE_ETHDEV_QUEUE_STAT_CNTRS);
-               for (qid = 0; qid < rxq_stat_cntrs; qid++) {
+       for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) {
+               for_each_hwfn(edev, hw_fn) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
                                snprintf(xstats_names[stat_idx].name,
-                                       sizeof(xstats_names[stat_idx].name),
-                                       "%.4s%d%s",
-                                       qede_rxq_xstats_strings[i].name, qid,
-                                       qede_rxq_xstats_strings[i].name + 4);
+                                        RTE_ETH_XSTATS_NAME_SIZE,
+                                        "%.4s%d.%d%s",
+                                        qede_rxq_xstats_strings[i].name,
+                                        hw_fn, qid,
+                                        qede_rxq_xstats_strings[i].name + 4);
                                stat_idx++;
                        }
                }
@@ -1625,8 +1721,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        struct ecore_dev *edev = &qdev->edev;
        struct ecore_eth_stats stats;
        const unsigned int num = qede_get_xstats_count(qdev);
-       unsigned int i, qid, stat_idx = 0;
-       unsigned int rxq_stat_cntrs;
+       unsigned int i, qid, hw_fn, fpidx, stat_idx = 0;
 
        if (n < num)
                return num;
@@ -1658,30 +1753,33 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                }
        }
 
-       rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
-                                RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       for (qid = 0; qid < rxq_stat_cntrs; qid++) {
-               for_each_rss(qid) {
+       for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
+               for_each_hwfn(edev, hw_fn) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
-                               xstats[stat_idx].value = *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[qid].rxq)) +
+                               fpidx = qid * edev->num_hwfns + hw_fn;
+                               xstats[stat_idx].value = *(uint64_t *)
+                                       (((char *)(qdev->fp_array[fpidx].rxq)) +
                                         qede_rxq_xstats_strings[i].offset);
                                xstats[stat_idx].id = stat_idx;
                                stat_idx++;
                        }
+
                }
        }
 
        return stat_idx;
 }
 
-static void
+static int
 qede_reset_xstats(struct rte_eth_dev *dev)
 {
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
+       qede_reset_queue_stats(qdev, true);
+
+       return 0;
 }
 
 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up)
@@ -1711,33 +1809,137 @@ static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev)
        return qede_dev_set_link_state(eth_dev, false);
 }
 
-static void qede_reset_stats(struct rte_eth_dev *eth_dev)
+static int qede_reset_stats(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
 
        ecore_reset_vport_stats(edev);
+       qede_reset_queue_stats(qdev, false);
+
+       return 0;
 }
 
-static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
+static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
 {
        enum qed_filter_rx_mode_type type =
            QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       enum _ecore_status_t ecore_status;
 
        if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
                type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
 
-       qed_configure_filter_rx_mode(eth_dev, type);
+       ecore_status = qed_configure_filter_rx_mode(eth_dev, type);
+
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
 }
 
-static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
+static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
 {
+       enum _ecore_status_t ecore_status;
+
        if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_PROMISC);
        else
-               qed_configure_filter_rx_mode(eth_dev,
+               ecore_status = qed_configure_filter_rx_mode(eth_dev,
                                QED_FILTER_RX_MODE_TYPE_REGULAR);
+
+       return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN;
+}
+
+static int
+qede_set_mc_addr_list(struct rte_eth_dev *eth_dev,
+               struct rte_ether_addr *mc_addrs,
+               uint32_t mc_addrs_num)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       uint8_t i;
+
+       if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
+               DP_ERR(edev, "Reached max multicast filters limit,"
+                            "Please enable multicast promisc mode\n");
+               return -ENOSPC;
+       }
+
+       for (i = 0; i < mc_addrs_num; i++) {
+               if (!rte_is_multicast_ether_addr(&mc_addrs[i])) {
+                       DP_ERR(edev, "Not a valid multicast MAC\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* Flush all existing entries */
+       if (qede_del_mcast_filters(eth_dev))
+               return -1;
+
+       /* Set new mcast list */
+       return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_hwfn *p_hwfn;
+       int rc;
+       int i;
+
+       if (IS_PF(edev)) {
+               struct ecore_sp_vport_update_params params;
+
+               memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+               params.vport_id = 0;
+               params.mtu = mtu;
+               params.vport_id = 0;
+               for_each_hwfn(edev, i) {
+                       p_hwfn = &edev->hwfns[i];
+                       params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+                       rc = ecore_sp_vport_update(p_hwfn, &params,
+                                       ECORE_SPQ_MODE_EBLOCK, NULL);
+                       if (rc != ECORE_SUCCESS)
+                               goto err;
+               }
+       } else {
+               for_each_hwfn(edev, i) {
+                       p_hwfn = &edev->hwfns[i];
+                       rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
+                       if (rc == ECORE_INVAL) {
+                               DP_INFO(edev, "VF MTU Update TLV not supported\n");
+                               /* Recreate vport */
+                               rc = qede_start_vport(qdev, mtu);
+                               if (rc != ECORE_SUCCESS)
+                                       goto err;
+
+                               /* Restore config lost due to vport stop */
+                               if (eth_dev->data->promiscuous)
+                                       qede_promiscuous_enable(eth_dev);
+                               else
+                                       qede_promiscuous_disable(eth_dev);
+
+                               if (eth_dev->data->all_multicast)
+                                       qede_allmulticast_enable(eth_dev);
+                               else
+                                       qede_allmulticast_disable(eth_dev);
+
+                               qede_vlan_offload_set(eth_dev,
+                                                     qdev->vlan_offload_mask);
+                       } else if (rc != ECORE_SUCCESS) {
+                               goto err;
+                       }
+               }
+       }
+       DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
+
+       return 0;
+
+err:
+       DP_ERR(edev, "Failed to update MTU\n");
+       return -1;
 }
 
 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
@@ -1806,12 +2008,30 @@ static const uint32_t *
 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 {
        static const uint32_t ptypes[] = {
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L2_ETHER_VLAN,
                RTE_PTYPE_L3_IPV4,
                RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_VXLAN,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_TUNNEL_GENEVE,
+               RTE_PTYPE_TUNNEL_GRE,
+               /* Inner */
+               RTE_PTYPE_INNER_L2_ETHER,
+               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_INNER_L4_FRAG,
                RTE_PTYPE_UNKNOWN
        };
 
-       if (eth_dev->rx_pkt_burst == qede_recv_pkts)
+       if (eth_dev->rx_pkt_burst == qede_recv_pkts ||
+           eth_dev->rx_pkt_burst == qede_recv_pkts_regular ||
+           eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)
                return ptypes;
 
        return NULL;
@@ -1841,8 +2061,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
        uint32_t *key = (uint32_t *)rss_conf->rss_key;
        uint64_t hf = rss_conf->rss_hf;
        uint8_t len = rss_conf->rss_key_len;
-       uint8_t idx;
-       uint8_t i;
+       uint8_t idx, i, j, fpidx;
        int rc;
 
        memset(&vport_update_params, 0, sizeof(vport_update_params));
@@ -1876,14 +2095,18 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
        /* tbl_size has to be set with capabilities */
        rss_params.rss_table_size_log = 7;
        vport_update_params.vport_id = 0;
-       /* pass the L2 handles instead of qids */
-       for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
-               idx = qdev->rss_ind_table[i];
-               rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
-       }
-       vport_update_params.rss_params = &rss_params;
 
        for_each_hwfn(edev, i) {
+               /* pass the L2 handles instead of qids */
+               for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) {
+                       idx = j % QEDE_RSS_COUNT(eth_dev);
+                       fpidx = idx * edev->num_hwfns + i;
+                       rss_params.rss_ind_table[j] =
+                               qdev->fp_array[fpidx].rxq->handle;
+               }
+
+               vport_update_params.rss_params = &rss_params;
+
                p_hwfn = &edev->hwfns[i];
                vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
                rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
@@ -1935,61 +2158,6 @@ static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
-static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
-                                   struct ecore_rss_params *rss)
-{
-       int i, fn;
-       bool rss_mode = 1; /* enable */
-       struct ecore_queue_cid *cid;
-       struct ecore_rss_params *t_rss;
-
-       /* In regular scenario, we'd simply need to take input handlers.
-        * But in CMT, we'd have to split the handlers according to the
-        * engine they were configured on. We'd then have to understand
-        * whether RSS is really required, since 2-queues on CMT doesn't
-        * require RSS.
-        */
-
-       /* CMT should be round-robin */
-       for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
-               cid = rss->rss_ind_table[i];
-
-               if (cid->p_owner == ECORE_LEADING_HWFN(edev))
-                       t_rss = &rss[0];
-               else
-                       t_rss = &rss[1];
-
-               t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
-       }
-
-       t_rss = &rss[1];
-       t_rss->update_rss_ind_table = 1;
-       t_rss->rss_table_size_log = 7;
-       t_rss->update_rss_config = 1;
-
-       /* Make sure RSS is actually required */
-       for_each_hwfn(edev, fn) {
-               for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
-                    i++) {
-                       if (rss[fn].rss_ind_table[i] !=
-                           rss[fn].rss_ind_table[0])
-                               break;
-               }
-
-               if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
-                       DP_INFO(edev,
-                               "CMT - 1 queue per-hwfn; Disabling RSS\n");
-                       rss_mode = 0;
-                       goto out;
-               }
-       }
-
-out:
-       t_rss->rss_enable = rss_mode;
-
-       return rss_mode;
-}
-
 int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
@@ -1998,8 +2166,8 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct ecore_sp_vport_update_params vport_update_params;
        struct ecore_rss_params *params;
+       uint16_t i, j, idx, fid, shift;
        struct ecore_hwfn *p_hwfn;
-       uint16_t i, idx, shift;
        uint8_t entry;
        int rc = 0;
 
@@ -2010,40 +2178,36 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
        }
 
        memset(&vport_update_params, 0, sizeof(vport_update_params));
-       params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
-                            RTE_CACHE_LINE_SIZE);
+       params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE);
        if (params == NULL) {
                DP_ERR(edev, "failed to allocate memory\n");
                return -ENOMEM;
        }
 
-       for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
-               if (reta_conf[idx].mask & (1ULL << shift)) {
-                       entry = reta_conf[idx].reta[shift];
-                       /* Pass rxq handles to ecore */
-                       params->rss_ind_table[i] =
-                                       qdev->fp_array[entry].rxq->handle;
-                       /* Update the local copy for RETA query command */
-                       qdev->rss_ind_table[i] = entry;
-               }
-       }
-
        params->update_rss_ind_table = 1;
        params->rss_table_size_log = 7;
        params->update_rss_config = 1;
 
-       /* Fix up RETA for CMT mode device */
-       if (edev->num_hwfns > 1)
-               qdev->rss_enable = qede_update_rss_parm_cmt(edev,
-                                                           params);
        vport_update_params.vport_id = 0;
        /* Use the current value of rss_enable */
        params->rss_enable = qdev->rss_enable;
        vport_update_params.rss_params = params;
 
        for_each_hwfn(edev, i) {
+               for (j = 0; j < reta_size; j++) {
+                       idx = j / RTE_RETA_GROUP_SIZE;
+                       shift = j % RTE_RETA_GROUP_SIZE;
+                       if (reta_conf[idx].mask & (1ULL << shift)) {
+                               entry = reta_conf[idx].reta[shift];
+                               fid = entry * edev->num_hwfns + i;
+                               /* Pass rxq handles to ecore */
+                               params->rss_ind_table[j] =
+                                               qdev->fp_array[fid].rxq->handle;
+                               /* Update the local copy for RETA query cmd */
+                               qdev->rss_ind_table[j] = entry;
+                       }
+               }
+
                p_hwfn = &edev->hwfns[i];
                vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
                rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
@@ -2094,16 +2258,24 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_dev_info dev_info = {0};
        struct qede_fastpath *fp;
+       uint32_t max_rx_pkt_len;
        uint32_t frame_size;
-       uint16_t rx_buf_size;
        uint16_t bufsz;
-       int i;
+       bool restart = false;
+       int i, rc;
 
        PMD_INIT_FUNC_TRACE(edev);
-       qede_dev_info_get(dev, &dev_info);
-       frame_size = mtu + QEDE_ETH_OVERHEAD;
-       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
-               DP_ERR(edev, "MTU %u out of range\n", mtu);
+       rc = qede_dev_info_get(dev, &dev_info);
+       if (rc != 0) {
+               DP_ERR(edev, "Error during getting ethernet device info\n");
+               return rc;
+       }
+       max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
+       frame_size = max_rx_pkt_len;
+       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
+               DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
+                      mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
+                      QEDE_ETH_OVERHEAD);
                return -EINVAL;
        }
        if (!dev->data->scattered_rx &&
@@ -2117,325 +2289,66 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
         */
        dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
        dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
-       qede_dev_stop(dev);
+       if (dev->data->dev_started) {
+               dev->data->dev_started = 0;
+               qede_dev_stop(dev);
+               restart = true;
+       }
        rte_delay_ms(1000);
-       qdev->mtu = mtu;
+       qdev->new_mtu = mtu;
+
        /* Fix up RX buf size for all queues of the port */
-       for_each_rss(i) {
+       for (i = 0; i < qdev->num_rx_queues; i++) {
                fp = &qdev->fp_array[i];
-               bufsz = (uint16_t)rte_pktmbuf_data_room_size(
-                       fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
-               if (dev->data->scattered_rx)
-                       rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
-               else
-                       rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
-               rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
-               fp->rxq->rx_buf_size = rx_buf_size;
-               DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
-       }
-       qede_dev_start(dev);
-       if (frame_size > ETHER_MAX_LEN)
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
-       else
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-       /* Reassign back */
-       dev->rx_pkt_burst = qede_recv_pkts;
-       dev->tx_pkt_burst = qede_xmit_pkts;
-
-       return 0;
-}
-
-static int
-qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
-                      struct rte_eth_udp_tunnel *tunnel_udp,
-                      bool add)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_tunnel_info tunn; /* @DPDK */
-       struct ecore_hwfn *p_hwfn;
-       int rc, i;
-
-       PMD_INIT_FUNC_TRACE(edev);
-
-       memset(&tunn, 0, sizeof(tunn));
-       if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
-               tunn.vxlan_port.b_update_port = true;
-               tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
-                                                 QEDE_VXLAN_DEF_PORT;
-               for_each_hwfn(edev, i) {
-                       p_hwfn = &edev->hwfns[i];
-                       struct ecore_ptt *p_ptt = IS_PF(edev) ?
-                              ecore_ptt_acquire(p_hwfn) : NULL;
-                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
-                                               ECORE_SPQ_MODE_CB, NULL);
-                       if (rc != ECORE_SUCCESS) {
-                               DP_ERR(edev, "Unable to config UDP port %u\n",
-                                      tunn.vxlan_port.port);
-                               if (IS_PF(edev))
-                                       ecore_ptt_release(p_hwfn, p_ptt);
+               if (fp->rxq != NULL) {
+                       bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+                               fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+                       /* cache align the mbuf size to simplfy rx_buf_size
+                        * calculation
+                        */
+                       bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+                       rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
+                       if (rc < 0)
                                return rc;
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static int
-qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
-                     struct rte_eth_udp_tunnel *tunnel_udp)
-{
-       return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
-}
-
-static int
-qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
-                     struct rte_eth_udp_tunnel *tunnel_udp)
-{
-       return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
-}
-
-static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
-                                      uint32_t *clss, char *str)
-{
-       uint16_t j;
-       *clss = MAX_ECORE_TUNN_CLSS;
 
-       for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
-               if (filter == qede_tunn_types[j].rte_filter_type) {
-                       *type = qede_tunn_types[j].qede_type;
-                       *clss = qede_tunn_types[j].qede_tunn_clss;
-                       strcpy(str, qede_tunn_types[j].string);
-                       return;
+                       fp->rxq->rx_buf_size = rc;
                }
        }
-}
+       if (max_rx_pkt_len > RTE_ETHER_MAX_LEN)
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       else
+               dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-static int
-qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
-                             const struct rte_eth_tunnel_filter_conf *conf,
-                             uint32_t type)
-{
-       /* Init commmon ucast params first */
-       qede_set_ucast_cmn_params(ucast);
-
-       /* Copy out the required fields based on classification type */
-       ucast->type = type;
-
-       switch (type) {
-       case ECORE_FILTER_VNI:
-               ucast->vni = conf->tenant_id;
-       break;
-       case ECORE_FILTER_INNER_VLAN:
-               ucast->vlan = conf->inner_vlan;
-       break;
-       case ECORE_FILTER_MAC:
-               memcpy(ucast->mac, conf->outer_mac.addr_bytes,
-                      ETHER_ADDR_LEN);
-       break;
-       case ECORE_FILTER_INNER_MAC:
-               memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                      ETHER_ADDR_LEN);
-       break;
-       case ECORE_FILTER_MAC_VNI_PAIR:
-               memcpy(ucast->mac, conf->outer_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
-               ucast->vni = conf->tenant_id;
-       break;
-       case ECORE_FILTER_INNER_MAC_VNI_PAIR:
-               memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
-               ucast->vni = conf->tenant_id;
-       break;
-       case ECORE_FILTER_INNER_PAIR:
-               memcpy(ucast->mac, conf->inner_mac.addr_bytes,
-                       ETHER_ADDR_LEN);
-               ucast->vlan = conf->inner_vlan;
-       break;
-       default:
-               return -EINVAL;
+       if (!dev->data->dev_started && restart) {
+               qede_dev_start(dev);
+               dev->data->dev_started = 1;
        }
 
-       return ECORE_SUCCESS;
-}
-
-static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
-                                 enum rte_filter_op filter_op,
-                                 const struct rte_eth_tunnel_filter_conf *conf)
-{
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_tunnel_info tunn;
-       struct ecore_hwfn *p_hwfn;
-       enum ecore_filter_ucast_type type;
-       enum ecore_tunn_clss clss;
-       struct ecore_filter_ucast ucast;
-       char str[80];
-       uint16_t filter_type;
-       int rc, i;
-
-       PMD_INIT_FUNC_TRACE(edev);
-
-       filter_type = conf->filter_type | qdev->vxlan_filter_type;
-       /* First determine if the given filter classification is supported */
-       qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
-       if (clss == MAX_ECORE_TUNN_CLSS) {
-               DP_ERR(edev, "Wrong filter type\n");
-               return -EINVAL;
-       }
-       /* Init tunnel ucast params */
-       rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
-       if (rc != ECORE_SUCCESS) {
-               DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
-                               conf->filter_type);
-               return rc;
-       }
-       DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
-               str, filter_op, ucast.type);
-       switch (filter_op) {
-       case RTE_ETH_FILTER_ADD:
-               ucast.opcode = ECORE_FILTER_ADD;
-
-               /* Skip MAC/VLAN if filter is based on VNI */
-               if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
-                       rc = qede_mac_int_ops(eth_dev, &ucast, 1);
-                       if (rc == 0) {
-                               /* Enable accept anyvlan */
-                               qede_config_accept_any_vlan(qdev, true);
-                       }
-               } else {
-                       rc = qede_ucast_filter(eth_dev, &ucast, 1);
-                       if (rc == 0)
-                               rc = ecore_filter_ucast_cmd(edev, &ucast,
-                                                   ECORE_SPQ_MODE_CB, NULL);
-               }
-
-               if (rc != ECORE_SUCCESS)
-                       return rc;
-
-               qdev->vxlan_filter_type = filter_type;
-
-               DP_INFO(edev, "Enabling VXLAN tunneling\n");
-               qede_set_cmn_tunn_param(&tunn, clss, true, true);
-               for_each_hwfn(edev, i) {
-                       p_hwfn = &edev->hwfns[i];
-                       struct ecore_ptt *p_ptt = IS_PF(edev) ?
-                              ecore_ptt_acquire(p_hwfn) : NULL;
-                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
-                               &tunn, ECORE_SPQ_MODE_CB, NULL);
-                       if (rc != ECORE_SUCCESS) {
-                               DP_ERR(edev, "Failed to update tunn_clss %u\n",
-                                      tunn.vxlan.tun_cls);
-                               if (IS_PF(edev))
-                                       ecore_ptt_release(p_hwfn, p_ptt);
-                       }
-               }
-               qdev->num_tunn_filters++; /* Filter added successfully */
-       break;
-       case RTE_ETH_FILTER_DELETE:
-               ucast.opcode = ECORE_FILTER_REMOVE;
-
-               if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
-                       rc = qede_mac_int_ops(eth_dev, &ucast, 0);
-               } else {
-                       rc = qede_ucast_filter(eth_dev, &ucast, 0);
-                       if (rc == 0)
-                               rc = ecore_filter_ucast_cmd(edev, &ucast,
-                                                   ECORE_SPQ_MODE_CB, NULL);
-               }
-               if (rc != ECORE_SUCCESS)
-                       return rc;
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
 
-               qdev->vxlan_filter_type = filter_type;
-               qdev->num_tunn_filters--;
-
-               /* Disable VXLAN if VXLAN filters become 0 */
-               if (qdev->num_tunn_filters == 0) {
-                       DP_INFO(edev, "Disabling VXLAN tunneling\n");
-
-                       /* Use 0 as tunnel mode */
-                       qede_set_cmn_tunn_param(&tunn, clss, false, true);
-                       for_each_hwfn(edev, i) {
-                               p_hwfn = &edev->hwfns[i];
-                               struct ecore_ptt *p_ptt = IS_PF(edev) ?
-                                      ecore_ptt_acquire(p_hwfn) : NULL;
-                               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
-                                       &tunn, ECORE_SPQ_MODE_CB, NULL);
-                               if (rc != ECORE_SUCCESS) {
-                                       DP_ERR(edev,
-                                               "Failed to update tunn_clss %u\n",
-                                               tunn.vxlan.tun_cls);
-                                       if (IS_PF(edev))
-                                               ecore_ptt_release(p_hwfn,
-                                                                 p_ptt);
-                                       break;
-                               }
-                       }
-               }
-       break;
-       default:
-               DP_ERR(edev, "Unsupported operation %d\n", filter_op);
-               return -EINVAL;
+       /* Reassign back */
+       qede_assign_rxtx_handlers(dev);
+       if (ECORE_IS_CMT(edev)) {
+               dev->rx_pkt_burst = qede_recv_pkts_cmt;
+               dev->tx_pkt_burst = qede_xmit_pkts_cmt;
+       } else {
+               dev->rx_pkt_burst = qede_recv_pkts;
+               dev->tx_pkt_burst = qede_xmit_pkts;
        }
-       DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
-
        return 0;
 }
 
-int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
-                        enum rte_filter_type filter_type,
-                        enum rte_filter_op filter_op,
-                        void *arg)
+static int
+qede_dev_reset(struct rte_eth_dev *dev)
 {
-       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
-       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_tunnel_filter_conf *filter_conf =
-                       (struct rte_eth_tunnel_filter_conf *)arg;
-
-       switch (filter_type) {
-       case RTE_ETH_FILTER_TUNNEL:
-               switch (filter_conf->tunnel_type) {
-               case RTE_TUNNEL_TYPE_VXLAN:
-                       DP_INFO(edev,
-                               "Packet steering to the specified Rx queue"
-                               " is not supported with VXLAN tunneling");
-                       return(qede_vxlan_tunn_config(eth_dev, filter_op,
-                                                     filter_conf));
-               /* Place holders for future tunneling support */
-               case RTE_TUNNEL_TYPE_GENEVE:
-               case RTE_TUNNEL_TYPE_TEREDO:
-               case RTE_TUNNEL_TYPE_NVGRE:
-               case RTE_TUNNEL_TYPE_IP_IN_GRE:
-               case RTE_L2_TUNNEL_TYPE_E_TAG:
-                       DP_ERR(edev, "Unsupported tunnel type %d\n",
-                               filter_conf->tunnel_type);
-                       return -EINVAL;
-               case RTE_TUNNEL_TYPE_NONE:
-               default:
-                       return 0;
-               }
-               break;
-       case RTE_ETH_FILTER_FDIR:
-               return qede_fdir_filter_conf(eth_dev, filter_op, arg);
-       case RTE_ETH_FILTER_NTUPLE:
-               return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
-       case RTE_ETH_FILTER_MACVLAN:
-       case RTE_ETH_FILTER_ETHERTYPE:
-       case RTE_ETH_FILTER_FLEXIBLE:
-       case RTE_ETH_FILTER_SYN:
-       case RTE_ETH_FILTER_HASH:
-       case RTE_ETH_FILTER_L2_TUNNEL:
-       case RTE_ETH_FILTER_MAX:
-       default:
-               DP_ERR(edev, "Unsupported filter type %d\n",
-                       filter_type);
-               return -EINVAL;
-       }
+       int ret;
 
-       return 0;
+       ret = qede_eth_dev_uninit(dev);
+       if (ret)
+               return ret;
+
+       return qede_eth_dev_init(dev);
 }
 
 static const struct eth_dev_ops qede_eth_dev_ops = {
@@ -2443,9 +2356,11 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
        .dev_infos_get = qede_dev_info_get,
        .rx_queue_setup = qede_rx_queue_setup,
        .rx_queue_release = qede_rx_queue_release,
+       .rx_descriptor_status = qede_rx_descriptor_status,
        .tx_queue_setup = qede_tx_queue_setup,
        .tx_queue_release = qede_tx_queue_release,
        .dev_start = qede_dev_start,
+       .dev_reset = qede_dev_reset,
        .dev_set_link_up = qede_dev_set_link_up,
        .dev_set_link_down = qede_dev_set_link_down,
        .link_update = qede_link_update,
@@ -2453,6 +2368,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
        .promiscuous_disable = qede_promiscuous_disable,
        .allmulticast_enable = qede_allmulticast_enable,
        .allmulticast_disable = qede_allmulticast_disable,
+       .set_mc_addr_list = qede_set_mc_addr_list,
        .dev_stop = qede_dev_stop,
        .dev_close = qede_dev_close,
        .stats_get = qede_get_stats,
@@ -2483,9 +2399,11 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .dev_infos_get = qede_dev_info_get,
        .rx_queue_setup = qede_rx_queue_setup,
        .rx_queue_release = qede_rx_queue_release,
+       .rx_descriptor_status = qede_rx_descriptor_status,
        .tx_queue_setup = qede_tx_queue_setup,
        .tx_queue_release = qede_tx_queue_release,
        .dev_start = qede_dev_start,
+       .dev_reset = qede_dev_reset,
        .dev_set_link_up = qede_dev_set_link_up,
        .dev_set_link_down = qede_dev_set_link_down,
        .link_update = qede_link_update,
@@ -2493,6 +2411,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .promiscuous_disable = qede_promiscuous_disable,
        .allmulticast_enable = qede_allmulticast_enable,
        .allmulticast_disable = qede_allmulticast_disable,
+       .set_mc_addr_list = qede_set_mc_addr_list,
        .dev_stop = qede_dev_stop,
        .dev_close = qede_dev_close,
        .stats_get = qede_get_stats,
@@ -2508,6 +2427,11 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .reta_update  = qede_rss_reta_update,
        .reta_query  = qede_rss_reta_query,
        .mtu_set = qede_set_mtu,
+       .udp_tunnel_port_add = qede_udp_dst_port_add,
+       .udp_tunnel_port_del = qede_udp_dst_port_del,
+       .mac_addr_add = qede_mac_addr_add,
+       .mac_addr_remove = qede_mac_addr_remove,
+       .mac_addr_set = qede_mac_addr_set,
 };
 
 static void qede_update_pf_params(struct ecore_dev *edev)
@@ -2530,16 +2454,18 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        struct qed_slowpath_params params;
        static bool do_once = true;
        uint8_t bulletin_change;
-       uint8_t vf_mac[ETHER_ADDR_LEN];
+       uint8_t vf_mac[RTE_ETHER_ADDR_LEN];
        uint8_t is_mac_forced;
        bool is_mac_exist;
        /* Fix up ecore debug level */
        uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
        uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+       uint32_t int_mode;
        int rc;
 
        /* Extract key data structures */
        adapter = eth_dev->data->dev_private;
+       adapter->ethdev = eth_dev;
        edev = &adapter->edev;
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        pci_addr = pci_dev->addr;
@@ -2550,10 +2476,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                 pci_addr.bus, pci_addr.devid, pci_addr.function,
                 eth_dev->data->port_id);
 
-       eth_dev->rx_pkt_burst = qede_recv_pkts;
-       eth_dev->tx_pkt_burst = qede_xmit_pkts;
-       eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
-
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
                DP_ERR(edev, "Skipping device init from secondary process\n");
                return 0;
@@ -2568,7 +2490,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
                DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto err;
        }
 
        DP_INFO(edev, "Starting qede probe\n");
@@ -2576,19 +2499,36 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                                    dp_level, is_vf);
        if (rc != 0) {
                DP_ERR(edev, "qede probe failed rc %d\n", rc);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
        qede_update_pf_params(edev);
-       rte_intr_callback_register(&pci_dev->intr_handle,
-                                  qede_interrupt_handler, (void *)eth_dev);
+
+       switch (pci_dev->intr_handle.type) {
+       case RTE_INTR_HANDLE_UIO_INTX:
+       case RTE_INTR_HANDLE_VFIO_LEGACY:
+               int_mode = ECORE_INT_MODE_INTA;
+               rte_intr_callback_register(&pci_dev->intr_handle,
+                                          qede_interrupt_handler_intx,
+                                          (void *)eth_dev);
+               break;
+       default:
+               int_mode = ECORE_INT_MODE_MSIX;
+               rte_intr_callback_register(&pci_dev->intr_handle,
+                                          qede_interrupt_handler,
+                                          (void *)eth_dev);
+       }
+
        if (rte_intr_enable(&pci_dev->intr_handle)) {
                DP_ERR(edev, "rte_intr_enable() failed\n");
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
 
        /* Start the Slowpath-process */
        memset(&params, 0, sizeof(struct qed_slowpath_params));
-       params.int_mode = ECORE_INT_MODE_MSIX;
+
+       params.int_mode = int_mode;
        params.drv_major = QEDE_PMD_VERSION_MAJOR;
        params.drv_minor = QEDE_PMD_VERSION_MINOR;
        params.drv_rev = QEDE_PMD_VERSION_REVISION;
@@ -2596,18 +2536,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
                QEDE_PMD_DRV_VER_STR_SIZE);
 
+       qede_assign_rxtx_handlers(eth_dev);
+       eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
+
        /* For CMT mode device do periodic polling for slowpath events.
         * This is required since uio device uses only one MSI-x
         * interrupt vector but we need one for each engine.
         */
-       if (edev->num_hwfns > 1 && IS_PF(edev)) {
-               rc = rte_eal_alarm_set(timer_period * US_PER_S,
+       if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
+               rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
                                       qede_poll_sp_sb_cb,
                                       (void *)eth_dev);
                if (rc != 0) {
                        DP_ERR(edev, "Unable to start periodic"
                                     " timer rc %d\n", rc);
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto err;
                }
        }
 
@@ -2616,7 +2560,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc);
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
                                     (void *)eth_dev);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
 
        rc = qed_ops->fill_dev_info(edev, &dev_info);
@@ -2626,11 +2571,17 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                qed_ops->common->remove(edev);
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb,
                                     (void *)eth_dev);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto err;
        }
 
        qede_alloc_etherdev(adapter, &dev_info);
 
+       if (do_once) {
+               qede_print_adapter_info(adapter);
+               do_once = false;
+       }
+
        adapter->ops->common->set_name(edev, edev->name);
 
        if (!is_vf)
@@ -2643,7 +2594,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
        /* Allocate memory for storing MAC addr */
        eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
-                                       (ETHER_ADDR_LEN *
+                                       (RTE_ETHER_ADDR_LEN *
                                        adapter->dev_info.num_mac_filters),
                                        RTE_CACHE_LINE_SIZE);
 
@@ -2657,10 +2608,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        }
 
        if (!is_vf) {
-               ether_addr_copy((struct ether_addr *)edev->hwfns[0].
+               rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0].
                                hw_info.hw_mac_addr,
                                &eth_dev->data->mac_addrs[0]);
-               ether_addr_copy(&eth_dev->data->mac_addrs[0],
+               rte_ether_addr_copy(&eth_dev->data->mac_addrs[0],
                                &adapter->primary_mac);
        } else {
                ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
@@ -2671,12 +2622,14 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                                                ECORE_LEADING_HWFN(edev),
                                                vf_mac,
                                                &is_mac_forced);
-                       if (is_mac_exist && is_mac_forced) {
+                       if (is_mac_exist) {
                                DP_INFO(edev, "VF macaddr received from PF\n");
-                               ether_addr_copy((struct ether_addr *)&vf_mac,
-                                               &eth_dev->data->mac_addrs[0]);
-                               ether_addr_copy(&eth_dev->data->mac_addrs[0],
-                                               &adapter->primary_mac);
+                               rte_ether_addr_copy(
+                                       (struct rte_ether_addr *)&vf_mac,
+                                       &eth_dev->data->mac_addrs[0]);
+                               rte_ether_addr_copy(
+                                       &eth_dev->data->mac_addrs[0],
+                                       &adapter->primary_mac);
                        } else {
                                DP_ERR(edev, "No VF macaddr assigned\n");
                        }
@@ -2685,23 +2638,39 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
        eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
 
-       if (do_once) {
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
-               qede_print_adapter_info(adapter);
-#endif
-               do_once = false;
-       }
+       /* Bring-up the link */
+       qede_dev_set_link_state(eth_dev, true);
 
        adapter->num_tx_queues = 0;
        adapter->num_rx_queues = 0;
-       SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+       SLIST_INIT(&adapter->arfs_info.arfs_list_head);
        SLIST_INIT(&adapter->vlan_list_head);
        SLIST_INIT(&adapter->uc_list_head);
-       adapter->mtu = ETHER_MTU;
-       adapter->new_mtu = ETHER_MTU;
-       if (!is_vf)
-               if (qede_start_vport(adapter, adapter->mtu))
-                       return -1;
+       SLIST_INIT(&adapter->mc_list_head);
+       adapter->mtu = RTE_ETHER_MTU;
+       adapter->vport_started = false;
+
+       /* VF tunnel offloads is enabled by default in PF driver */
+       adapter->vxlan.num_filters = 0;
+       adapter->geneve.num_filters = 0;
+       adapter->ipgre.num_filters = 0;
+       if (is_vf) {
+               adapter->vxlan.enable = true;
+               adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
+                                            ETH_TUNNEL_FILTER_IVLAN;
+               adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
+               adapter->geneve.enable = true;
+               adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
+                                             ETH_TUNNEL_FILTER_IVLAN;
+               adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+               adapter->ipgre.enable = true;
+               adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
+                                            ETH_TUNNEL_FILTER_IVLAN;
+       } else {
+               adapter->vxlan.enable = false;
+               adapter->geneve.enable = false;
+               adapter->ipgre.enable = false;
+       }
 
        DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
                adapter->primary_mac.addr_bytes[0],
@@ -2714,6 +2683,13 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        DP_INFO(edev, "Device initialized\n");
 
        return 0;
+
+err:
+       if (do_once) {
+               qede_print_adapter_info(adapter);
+               do_once = false;
+       }
+       return rc;
 }
 
 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2728,12 +2704,10 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
 
 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
 {
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
        struct qede_dev *qdev = eth_dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
 
        PMD_INIT_FUNC_TRACE(edev);
-#endif
 
        /* only uninitialize in the primary process */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -2746,11 +2720,6 @@ static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = NULL;
        eth_dev->tx_pkt_burst = NULL;
 
-       if (eth_dev->data->mac_addrs)
-               rte_free(eth_dev->data->mac_addrs);
-
-       eth_dev->data->mac_addrs = NULL;
-
        return 0;
 }
 
@@ -2857,3 +2826,13 @@ RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
+
+RTE_INIT(qede_init_log)
+{
+       qede_logtype_init = rte_log_register("pmd.net.qede.init");
+       if (qede_logtype_init >= 0)
+               rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE);
+       qede_logtype_driver = rte_log_register("pmd.net.qede.driver");
+       if (qede_logtype_driver >= 0)
+               rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE);
+}