net/qede: fix default config option
[dpdk.git] / drivers / net / qede / qede_ethdev.c
index c28a2bc..6f5ba2a 100644 (file)
@@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off {
 };
 
 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
-       {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+       {"rx_unicast_bytes",
+               offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
        {"rx_multicast_bytes",
-               offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+               offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
        {"rx_broadcast_bytes",
-               offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
-       {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+               offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+       {"rx_unicast_packets",
+               offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
        {"rx_multicast_packets",
-               offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+               offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
        {"rx_broadcast_packets",
-               offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+               offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
 
-       {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+       {"tx_unicast_bytes",
+               offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
        {"tx_multicast_bytes",
-               offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+               offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
        {"tx_broadcast_bytes",
-               offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
-       {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+               offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+       {"tx_unicast_packets",
+               offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
        {"tx_multicast_packets",
-               offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+               offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
        {"tx_broadcast_packets",
-               offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+               offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
 
        {"rx_64_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+               offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
        {"rx_65_to_127_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        rx_65_to_127_byte_packets)},
        {"rx_128_to_255_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        rx_128_to_255_byte_packets)},
        {"rx_256_to_511_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        rx_256_to_511_byte_packets)},
        {"rx_512_to_1023_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        rx_512_to_1023_byte_packets)},
        {"rx_1024_to_1518_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
-       {"rx_1519_to_1522_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
-       {"rx_1519_to_2047_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
-       {"rx_2048_to_4095_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
-       {"rx_4096_to_9216_byte_packets",
-               offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
-       {"rx_9217_to_16383_byte_packets",
-               offsetof(struct ecore_eth_stats,
-                        rx_9217_to_16383_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        rx_1024_to_1518_byte_packets)},
        {"tx_64_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+               offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
        {"tx_65_to_127_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        tx_65_to_127_byte_packets)},
        {"tx_128_to_255_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        tx_128_to_255_byte_packets)},
        {"tx_256_to_511_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        tx_256_to_511_byte_packets)},
        {"tx_512_to_1023_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        tx_512_to_1023_byte_packets)},
        {"tx_1024_to_1518_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
-       {"trx_1519_to_1522_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
-       {"tx_2048_to_4095_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
-       {"tx_4096_to_9216_byte_packets",
-               offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
-       {"tx_9217_to_16383_byte_packets",
-               offsetof(struct ecore_eth_stats,
-                        tx_9217_to_16383_byte_packets)},
+               offsetof(struct ecore_eth_stats_common,
+                        tx_1024_to_1518_byte_packets)},
 
        {"rx_mac_crtl_frames",
-               offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+               offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
        {"tx_mac_control_frames",
-               offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
-       {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
-       {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+               offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+       {"rx_pause_frames",
+               offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+       {"tx_pause_frames",
+               offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
        {"rx_priority_flow_control_frames",
-               offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+               offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
        {"tx_priority_flow_control_frames",
-               offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+               offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
 
-       {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
-       {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+       {"rx_crc_errors",
+               offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+       {"rx_align_errors",
+               offsetof(struct ecore_eth_stats_common, rx_align_errors)},
        {"rx_carrier_errors",
-               offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+               offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
        {"rx_oversize_packet_errors",
-               offsetof(struct ecore_eth_stats, rx_oversize_packets)},
-       {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+               offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+       {"rx_jabber_errors",
+               offsetof(struct ecore_eth_stats_common, rx_jabbers)},
        {"rx_undersize_packet_errors",
-               offsetof(struct ecore_eth_stats, rx_undersize_packets)},
-       {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+               offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+       {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
        {"rx_host_buffer_not_available",
-               offsetof(struct ecore_eth_stats, no_buff_discards)},
+               offsetof(struct ecore_eth_stats_common, no_buff_discards)},
        /* Number of packets discarded because they are bigger than MTU */
        {"rx_packet_too_big_discards",
-               offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+               offsetof(struct ecore_eth_stats_common,
+                        packet_too_big_discard)},
        {"rx_ttl_zero_discards",
-               offsetof(struct ecore_eth_stats, ttl0_discard)},
+               offsetof(struct ecore_eth_stats_common, ttl0_discard)},
        {"rx_multi_function_tag_filter_discards",
-               offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+               offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
        {"rx_mac_filter_discards",
-               offsetof(struct ecore_eth_stats, mac_filter_discards)},
+               offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
        {"rx_hw_buffer_truncates",
-               offsetof(struct ecore_eth_stats, brb_truncates)},
+               offsetof(struct ecore_eth_stats_common, brb_truncates)},
        {"rx_hw_buffer_discards",
-               offsetof(struct ecore_eth_stats, brb_discards)},
-       {"tx_lpi_entry_count",
-               offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
-       {"tx_total_collisions",
-               offsetof(struct ecore_eth_stats, tx_total_collisions)},
+               offsetof(struct ecore_eth_stats_common, brb_discards)},
        {"tx_error_drop_packets",
-               offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+               offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
 
-       {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+       {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
        {"rx_mac_unicast_packets",
-               offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+               offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
        {"rx_mac_multicast_packets",
-               offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+               offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
        {"rx_mac_broadcast_packets",
-               offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+               offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
        {"rx_mac_frames_ok",
-               offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
-       {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+               offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+       {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
        {"tx_mac_unicast_packets",
-               offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+               offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
        {"tx_mac_multicast_packets",
-               offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+               offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
        {"tx_mac_broadcast_packets",
-               offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+               offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
 
        {"lro_coalesced_packets",
-               offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+               offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
        {"lro_coalesced_events",
-               offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+               offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
        {"lro_aborts_num",
-               offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+               offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
        {"lro_not_coalesced_packets",
-               offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+               offsetof(struct ecore_eth_stats_common,
+                        tpa_not_coalesced_pkts)},
        {"lro_coalesced_bytes",
-               offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+               offsetof(struct ecore_eth_stats_common,
+                        tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+       {"rx_1519_to_1522_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        rx_1519_to_1522_byte_packets)},
+       {"rx_1519_to_2047_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        rx_1519_to_2047_byte_packets)},
+       {"rx_2048_to_4095_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        rx_2048_to_4095_byte_packets)},
+       {"rx_4096_to_9216_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        rx_4096_to_9216_byte_packets)},
+       {"rx_9217_to_16383_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        rx_9217_to_16383_byte_packets)},
+
+       {"tx_1519_to_2047_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        tx_1519_to_2047_byte_packets)},
+       {"tx_2048_to_4095_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        tx_2048_to_4095_byte_packets)},
+       {"tx_4096_to_9216_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        tx_4096_to_9216_byte_packets)},
+       {"tx_9217_to_16383_byte_packets",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb,
+                        tx_9217_to_16383_byte_packets)},
+
+       {"tx_lpi_entry_count",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+       {"tx_total_collisions",
+               offsetof(struct ecore_eth_stats, bb) +
+               offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+       {"rx_1519_to_max_byte_packets",
+               offsetof(struct ecore_eth_stats, ah) +
+               offsetof(struct ecore_eth_stats_ah,
+                        rx_1519_to_max_byte_packets)},
+       {"tx_1519_to_max_byte_packets",
+               offsetof(struct ecore_eth_stats, ah) +
+               offsetof(struct ecore_eth_stats_ah,
+                        tx_1519_to_max_byte_packets)},
 };
 
 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
@@ -294,7 +350,6 @@ static void
 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
 {
        rte_memcpy(&qdev->dev_info, info, sizeof(*info));
-       qdev->num_tc = qdev->dev_info.num_tc;
        qdev->ops = qed_ops;
 }
 
@@ -308,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
 
        DP_INFO(edev, "*********************************\n");
        DP_INFO(edev, " DPDK version:%s\n", rte_version());
-       DP_INFO(edev, " Chip details : %s%d\n",
+       DP_INFO(edev, " Chip details : %s %c%d\n",
                  ECORE_IS_BB(edev) ? "BB" : "AH",
-                 CHIP_REV_IS_A0(edev) ? 0 : 1);
+                 'A' + edev->chip_rev,
+                 (int)edev->chip_metal);
        snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
                 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
        snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
@@ -329,6 +385,186 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
 }
 #endif
 
+static int
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+{
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_start_params params;
+       struct ecore_hwfn *p_hwfn;
+       int rc;
+       int i;
+
+       memset(&params, 0, sizeof(params));
+       params.vport_id = 0;
+       params.mtu = mtu;
+       /* @DPDK - Disable FW placement */
+       params.zero_placement_offset = 1;
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_start(p_hwfn, &params);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+                       return rc;
+               }
+       }
+       ecore_reset_vport_stats(edev);
+       DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+       return 0;
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
+       struct ecore_hwfn *p_hwfn;
+       uint8_t vport_id;
+       int rc;
+       int i;
+
+       vport_id = 0;
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+                                        vport_id);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+                       return rc;
+               }
+       }
+
+       return 0;
+}
+
+/* Activate or deactivate vport via vport-update */
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_update_params params;
+       struct ecore_hwfn *p_hwfn;
+       uint8_t i;
+       int rc = -1;
+
+       memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+       params.vport_id = 0;
+       params.update_vport_active_rx_flg = 1;
+       params.update_vport_active_tx_flg = 1;
+       params.vport_active_rx_flg = flg;
+       params.vport_active_tx_flg = flg;
+#ifndef RTE_LIBRTE_QEDE_VF_TX_SWITCH
+       if (IS_VF(edev)) {
+               params.update_tx_switching_flg = 1;
+               params.tx_switching_flg = !flg;
+               DP_INFO(edev, "VF tx-switching is disabled\n");
+       }
+#endif
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_update(p_hwfn, &params,
+                               ECORE_SPQ_MODE_EBLOCK, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to update vport\n");
+                       break;
+               }
+       }
+       DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated");
+
+       return rc;
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+                          uint16_t mtu, bool enable)
+{
+       /* Enable LRO in split mode */
+       sge_tpa_params->tpa_ipv4_en_flg = enable;
+       sge_tpa_params->tpa_ipv6_en_flg = enable;
+       sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+       sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+       /* set if tpa enable changes */
+       sge_tpa_params->update_tpa_en_flg = 1;
+       /* set if tpa parameters should be handled */
+       sge_tpa_params->update_tpa_param_flg = enable;
+
+       sge_tpa_params->max_buffers_per_cqe = 20;
+       /* Enable TPA in split mode. In this mode each TPA segment
+        * starts on the new BD, so there is one BD per segment.
+        */
+       sge_tpa_params->tpa_pkt_split_flg = 1;
+       sge_tpa_params->tpa_hdr_data_split_flg = 0;
+       sge_tpa_params->tpa_gro_consistent_flg = 0;
+       sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+       sge_tpa_params->tpa_max_size = 0x7FFF;
+       sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+       sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+/* Enable/disable LRO via vport-update */
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_update_params params;
+       struct ecore_sge_tpa_params tpa_params;
+       struct ecore_hwfn *p_hwfn;
+       int rc;
+       int i;
+
+       memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+       memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+       qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
+       params.vport_id = 0;
+       params.sge_tpa_params = &tpa_params;
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_update(p_hwfn, &params,
+                               ECORE_SPQ_MODE_EBLOCK, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to update LRO\n");
+                       return -1;
+               }
+       }
+       qdev->enable_lro = flg;
+       DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
+
+       return 0;
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_update_params params;
+       struct ecore_hwfn *p_hwfn;
+       int rc;
+       int i;
+
+       memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+       params.vport_id = 0;
+       params.mtu = mtu;
+       params.vport_id = 0;
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_update(p_hwfn, &params,
+                               ECORE_SPQ_MODE_EBLOCK, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to update MTU\n");
+                       return -1;
+               }
+       }
+       DP_INFO(edev, "MTU updated to %u\n", mtu);
+
+       return 0;
+}
+
 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
 {
        memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -337,15 +573,90 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
        /* ucast->assert_on_error = true; - For debug */
 }
 
-static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
-                                   uint8_t clss, bool mode, bool mask)
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+                            enum qed_filter_rx_mode_type type)
 {
-       memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
-       p_tunn->vxlan.b_update_mode = mode;
-       p_tunn->vxlan.b_mode_enabled = mask;
-       p_tunn->b_update_rx_cls = true;
-       p_tunn->b_update_tx_cls = true;
-       p_tunn->vxlan.tun_cls = clss;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_filter_accept_flags flags;
+
+       memset(&flags, 0, sizeof(flags));
+
+       flags.update_rx_mode_config = 1;
+       flags.update_tx_mode_config = 1;
+       flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+               ECORE_ACCEPT_MCAST_MATCHED |
+               ECORE_ACCEPT_BCAST;
+
+       flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+               ECORE_ACCEPT_MCAST_MATCHED |
+               ECORE_ACCEPT_BCAST;
+
+       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+               flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+               if (IS_VF(edev)) {
+                       flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+                       DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+               }
+       } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+               flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+       } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+                               QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+               flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+                       ECORE_ACCEPT_MCAST_UNMATCHED;
+       }
+
+       return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+                       ECORE_SPQ_MODE_CB, NULL);
+}
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+                 bool enable, bool mask)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       enum _ecore_status_t rc = ECORE_INVAL;
+       struct ecore_ptt *p_ptt;
+       struct ecore_tunnel_info tunn;
+       struct ecore_hwfn *p_hwfn;
+       int i;
+
+       memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+       tunn.vxlan.b_update_mode = enable;
+       tunn.vxlan.b_mode_enabled = mask;
+       tunn.b_update_rx_cls = true;
+       tunn.b_update_tx_cls = true;
+       tunn.vxlan.tun_cls = clss;
+
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               if (IS_PF(edev)) {
+                       p_ptt = ecore_ptt_acquire(p_hwfn);
+                       if (!p_ptt)
+                               return -EAGAIN;
+               } else {
+                       p_ptt = NULL;
+               }
+               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+                               &tunn, ECORE_SPQ_MODE_CB, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to update tunn_clss %u\n",
+                                       tunn.vxlan.tun_cls);
+                       if (IS_PF(edev))
+                               ecore_ptt_release(p_hwfn, p_ptt);
+                       break;
+               }
+       }
+
+       if (rc == ECORE_SUCCESS) {
+               qdev->vxlan.enable = enable;
+               qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+               DP_INFO(edev, "vxlan is %s\n", enable ? "enabled" : "disabled");
+       }
+
+       return rc;
 }
 
 static int
@@ -363,6 +674,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
                SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
                        if ((memcmp(mac_addr, &tmp->mac,
                                    ETHER_ADDR_LEN) == 0) &&
+                            ucast->vni == tmp->vni &&
                             ucast->vlan == tmp->vlan) {
                                DP_ERR(edev, "Unicast MAC is already added"
                                       " with vlan = %u, vni = %u\n",
@@ -565,49 +877,57 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
        qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
 }
 
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
 {
-       struct ecore_dev *edev = &qdev->edev;
-       struct qed_update_vport_params params = {
-               .vport_id = 0,
-               .accept_any_vlan = action,
-               .update_accept_any_vlan_flg = 1,
-       };
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_update_params params;
+       struct ecore_hwfn *p_hwfn;
+       uint8_t i;
        int rc;
 
-       /* Proceed only if action actually needs to be performed */
-       if (qdev->accept_any_vlan == action)
-               return;
-
-       rc = qdev->ops->vport_update(edev, &params);
-       if (rc) {
-               DP_ERR(edev, "Failed to %s accept-any-vlan\n",
-                      action ? "enable" : "disable");
-       } else {
-               DP_INFO(edev, "%s accept-any-vlan\n",
-                       action ? "enabled" : "disabled");
-               qdev->accept_any_vlan = action;
+       memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+       params.vport_id = 0;
+       params.update_accept_any_vlan_flg = 1;
+       params.accept_any_vlan = flg;
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_update(p_hwfn, &params,
+                               ECORE_SPQ_MODE_EBLOCK, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to configure accept-any-vlan\n");
+                       return;
+               }
        }
+
+       DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
 }
 
-static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
 {
-       struct qed_update_vport_params vport_update_params;
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct ecore_sp_vport_update_params params;
+       struct ecore_hwfn *p_hwfn;
+       uint8_t i;
        int rc;
 
-       memset(&vport_update_params, 0, sizeof(vport_update_params));
-       vport_update_params.vport_id = 0;
-       vport_update_params.update_inner_vlan_removal_flg = 1;
-       vport_update_params.inner_vlan_removal_flg = set_stripping;
-       rc = qdev->ops->vport_update(edev, &vport_update_params);
-       if (rc) {
-               DP_ERR(edev, "Update V-PORT failed %d\n", rc);
-               return rc;
+       memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+       params.vport_id = 0;
+       params.update_inner_vlan_removal_flg = 1;
+       params.inner_vlan_removal_flg = flg;
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+               rc = ecore_sp_vport_update(p_hwfn, &params,
+                               ECORE_SPQ_MODE_EBLOCK, NULL);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Failed to update vport\n");
+                       return -1;
+               }
        }
-       qdev->vlan_strip_flg = set_stripping;
 
+       DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
        return 0;
 }
 
@@ -701,7 +1021,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
        return rc;
 }
 
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
@@ -739,31 +1059,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 
        DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
                mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
-}
-
-static int qede_init_vport(struct qede_dev *qdev)
-{
-       struct ecore_dev *edev = &qdev->edev;
-       struct qed_start_vport_params start = {0};
-       int rc;
-
-       start.remove_inner_vlan = 1;
-       start.enable_lro = qdev->enable_lro;
-       start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
-       start.vport_id = 0;
-       start.drop_ttl0 = false;
-       start.clear_stats = 1;
-       start.handle_ptp_pkts = 0;
-
-       rc = qdev->ops->vport_start(edev, &start);
-       if (rc) {
-               DP_ERR(edev, "Start V-PORT failed %d\n", rc);
-               return rc;
-       }
-
-       DP_INFO(edev,
-               "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
-               start.vport_id, ETHER_MTU);
 
        return 0;
 }
@@ -818,33 +1113,128 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+       struct ecore_hwfn *p_hwfn;
+       int i;
+
+       for_each_hwfn(edev, i) {
+               p_hwfn = &edev->hwfns[i];
+               ecore_hw_start_fastpath(p_hwfn);
+       }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+       struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
+
+       /* Update MTU only if it has changed */
+       if (qdev->mtu != qdev->new_mtu) {
+               if (qede_update_mtu(eth_dev, qdev->new_mtu))
+                       goto err;
+               qdev->mtu = qdev->new_mtu;
+       }
+
+       /* Configure TPA parameters */
+       if (rxmode->enable_lro) {
+               if (qede_enable_tpa(eth_dev, true))
+                       return -EINVAL;
+               /* Enable scatter mode for LRO */
+               if (!rxmode->enable_scatter)
+                       eth_dev->data->scattered_rx = 1;
+       }
+
+       /* Start queues */
+       if (qede_start_queues(eth_dev))
+               goto err;
+
+       /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+        * enabling RSS. Hence RSS configuration is deferred upto this point.
+        * Also, we would like to retain similar behavior in PF case, so we
+        * don't do PF/VF specific check here.
+        */
+       if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+               if (qede_config_rss(eth_dev))
+                       goto err;
+
+       /* Enable vport*/
+       if (qede_activate_vport(eth_dev, true))
+               goto err;
+
+       /* Bring-up the link */
+       qede_dev_set_link_state(eth_dev, true);
+
+       /* Update link status */
+       qede_link_update(eth_dev, 0);
+
+       /* Start/resume traffic */
+       qede_fastpath_start(edev);
+
+       DP_INFO(edev, "Device started\n");
+
+       return 0;
+err:
+       DP_ERR(edev, "Device start fails\n");
+       return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
+
+       /* Disable vport */
+       if (qede_activate_vport(eth_dev, false))
+               return;
+
+       if (qdev->enable_lro)
+               qede_enable_tpa(eth_dev, false);
+
+       /* Stop queues */
+       qede_stop_queues(eth_dev);
+
+       /* Disable traffic */
+       ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+       /* Bring the link down */
+       qede_dev_set_link_state(eth_dev, false);
+
+       DP_INFO(edev, "Device is stopped\n");
+}
+
 static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 {
-       struct qede_dev *qdev = eth_dev->data->dev_private;
-       struct ecore_dev *edev = &qdev->edev;
+       struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
-       int rc;
+       int ret;
 
        PMD_INIT_FUNC_TRACE(edev);
 
        /* Check requirements for 100G mode */
-       if (edev->num_hwfns > 1) {
+       if (ECORE_IS_CMT(edev)) {
                if (eth_dev->data->nb_rx_queues < 2 ||
-                   eth_dev->data->nb_tx_queues < 2) {
+                               eth_dev->data->nb_tx_queues < 2) {
                        DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
                        return -EINVAL;
                }
 
                if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
-                   (eth_dev->data->nb_tx_queues % 2 != 0)) {
+                               (eth_dev->data->nb_tx_queues % 2 != 0)) {
                        DP_ERR(edev,
-                                 "100G mode needs even no. of RX/TX queues\n");
+                                       "100G mode needs even no. of RX/TX queues\n");
                        return -EINVAL;
                }
        }
 
        /* Sanity checks and throw warnings */
-       if (rxmode->enable_scatter == 1)
+       if (rxmode->enable_scatter)
                eth_dev->data->scattered_rx = 1;
 
        if (!rxmode->hw_strip_crc)
@@ -852,83 +1242,69 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
 
        if (!rxmode->hw_ip_checksum)
                DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
-                             "in hw\n");
-
-       if (rxmode->enable_lro) {
-               qdev->enable_lro = true;
-               /* Enable scatter mode for LRO */
-               if (!rxmode->enable_scatter)
-                       eth_dev->data->scattered_rx = 1;
-       }
-
-       /* Check for the port restart case */
-       if (qdev->state != QEDE_DEV_INIT) {
-               rc = qdev->ops->vport_stop(edev, 0);
-               if (rc != 0)
-                       return rc;
-               qede_dealloc_fp_resc(eth_dev);
+                               "in hw\n");
+       if (rxmode->header_split)
+               DP_INFO(edev, "Header split enable is not supported\n");
+       if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+                               ETH_MQ_RX_RSS)) {
+               DP_ERR(edev, "Unsupported multi-queue mode\n");
+               return -ENOTSUP;
        }
+       /* Flow director mode check */
+       if (qede_check_fdir_support(eth_dev))
+               return -ENOTSUP;
 
-       qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
-       qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
-       qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
-       /* Fastpath status block should be initialized before sending
-        * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
-        */
-       rc = qede_alloc_fp_resc(qdev);
-       if (rc != 0)
-               return rc;
-
-       /* Issue VPORT-START with default config values to allow
-        * other port configurations early on.
+       /* Deallocate resources if held previously. It is needed only if the
+        * queue count has been changed from previous configuration. If its
+        * going to change then it means RX/TX queue setup will be called
+        * again and the fastpath pointers will be reinitialized there.
         */
-       rc = qede_init_vport(qdev);
-       if (rc != 0)
-               return rc;
-
-       if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
-           rxmode->mq_mode == ETH_MQ_RX_NONE)) {
-               DP_ERR(edev, "Unsupported RSS mode\n");
-               qdev->ops->vport_stop(edev, 0);
+       if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+           qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
                qede_dealloc_fp_resc(eth_dev);
-               return -EINVAL;
+               /* Proceed with updated queue count */
+               qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+               qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+               if (qede_alloc_fp_resc(qdev))
+                       return -ENOMEM;
        }
 
-       /* Flow director mode check */
-       rc = qede_check_fdir_support(eth_dev);
-       if (rc) {
-               qdev->ops->vport_stop(edev, 0);
-               qede_dealloc_fp_resc(eth_dev);
-               return -EINVAL;
+       /* VF's MTU has to be set using vport-start where as
+        * PF's MTU can be updated via vport-update.
+        */
+       if (IS_VF(edev)) {
+               if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+                       return -1;
+       } else {
+               if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+                       return -1;
        }
-       SLIST_INIT(&qdev->fdir_info.fdir_list_head);
 
-       SLIST_INIT(&qdev->vlan_list_head);
+       qdev->mtu = rxmode->max_rx_pkt_len;
+       qdev->new_mtu = qdev->mtu;
 
        /* Enable VLAN offloads by default */
-       qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
-                                      ETH_VLAN_FILTER_MASK |
-                                      ETH_VLAN_EXTEND_MASK);
-
-       qdev->state = QEDE_DEV_CONFIG;
+       ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK  |
+                       ETH_VLAN_FILTER_MASK |
+                       ETH_VLAN_EXTEND_MASK);
+       if (ret)
+               return ret;
 
-       DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
-               (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
-               qdev->num_tc);
+       DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+                       QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
 
        return 0;
 }
 
 /* Info about HW descriptor ring limitations */
 static const struct rte_eth_desc_lim qede_rx_desc_lim = {
-       .nb_max = NUM_RX_BDS_MAX,
+       .nb_max = 0x8000, /* 32K */
        .nb_min = 128,
        .nb_align = 128 /* lowest common multiple */
 };
 
 static const struct rte_eth_desc_lim qede_tx_desc_lim = {
-       .nb_max = NUM_TX_BDS_MAX,
+       .nb_max = 0x8000, /* 32K */
        .nb_min = 256,
        .nb_align = 256,
        .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
@@ -1003,7 +1379,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
 }
 
 /* return 0 means link status changed, -1 means not changed */
-static int
+int
 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -1106,44 +1482,34 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       int rc;
 
        PMD_INIT_FUNC_TRACE(edev);
 
-       qede_fdir_dealloc_resc(eth_dev);
-
        /* dev_stop() shall cleanup fp resources in hw but without releasing
         * dma memories and sw structures so that dev_start() can be called
         * by the app without reconfiguration. However, in dev_close() we
         * can release all the resources and device can be brought up newly
         */
-       if (qdev->state != QEDE_DEV_STOP)
+       if (eth_dev->data->dev_started)
                qede_dev_stop(eth_dev);
-       else
-               DP_INFO(edev, "Device is already stopped\n");
-
-       rc = qdev->ops->vport_stop(edev, 0);
-       if (rc != 0)
-               DP_ERR(edev, "Failed to stop VPORT\n");
 
+       qede_stop_vport(edev);
+       qede_fdir_dealloc_resc(eth_dev);
        qede_dealloc_fp_resc(eth_dev);
 
-       qdev->ops->common->slowpath_stop(edev);
+       eth_dev->data->nb_rx_queues = 0;
+       eth_dev->data->nb_tx_queues = 0;
 
+       qdev->ops->common->slowpath_stop(edev);
        qdev->ops->common->remove(edev);
-
        rte_intr_disable(&pci_dev->intr_handle);
-
        rte_intr_callback_unregister(&pci_dev->intr_handle,
                                     qede_interrupt_handler, (void *)eth_dev);
-
-       if (edev->num_hwfns > 1)
+       if (ECORE_IS_CMT(edev))
                rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
-       qdev->state = QEDE_DEV_INIT; /* Go back to init state */
 }
 
-static void
+static int
 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
 {
        struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -1153,35 +1519,36 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
        unsigned int rxq_stat_cntrs, txq_stat_cntrs;
        struct qede_tx_queue *txq;
 
-       qdev->ops->get_vport_stats(edev, &stats);
+       ecore_get_vport_stats(edev, &stats);
 
        /* RX Stats */
-       eth_stats->ipackets = stats.rx_ucast_pkts +
-           stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+       eth_stats->ipackets = stats.common.rx_ucast_pkts +
+           stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
 
-       eth_stats->ibytes = stats.rx_ucast_bytes +
-           stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+       eth_stats->ibytes = stats.common.rx_ucast_bytes +
+           stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
 
-       eth_stats->ierrors = stats.rx_crc_errors +
-           stats.rx_align_errors +
-           stats.rx_carrier_errors +
-           stats.rx_oversize_packets +
-           stats.rx_jabbers + stats.rx_undersize_packets;
+       eth_stats->ierrors = stats.common.rx_crc_errors +
+           stats.common.rx_align_errors +
+           stats.common.rx_carrier_errors +
+           stats.common.rx_oversize_packets +
+           stats.common.rx_jabbers + stats.common.rx_undersize_packets;
 
-       eth_stats->rx_nombuf = stats.no_buff_discards;
+       eth_stats->rx_nombuf = stats.common.no_buff_discards;
 
-       eth_stats->imissed = stats.mftag_filter_discards +
-           stats.mac_filter_discards +
-           stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+       eth_stats->imissed = stats.common.mftag_filter_discards +
+           stats.common.mac_filter_discards +
+           stats.common.no_buff_discards +
+           stats.common.brb_truncates + stats.common.brb_discards;
 
        /* TX stats */
-       eth_stats->opackets = stats.tx_ucast_pkts +
-           stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+       eth_stats->opackets = stats.common.tx_ucast_pkts +
+           stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
 
-       eth_stats->obytes = stats.tx_ucast_bytes +
-           stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+       eth_stats->obytes = stats.common.tx_ucast_bytes +
+           stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
 
-       eth_stats->oerrors = stats.tx_err_drop_pkts;
+       eth_stats->oerrors = stats.common.tx_err_drop_pkts;
 
        /* Queue stats */
        rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
@@ -1195,49 +1562,55 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
                       " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
                       " appropriately and retry.\n");
 
-       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
-               if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
-                       eth_stats->q_ipackets[i] =
-                               *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
-                                       offsetof(struct qede_rx_queue,
-                                       rcv_pkts));
-                       eth_stats->q_errors[i] =
-                               *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
-                                       offsetof(struct qede_rx_queue,
-                                       rx_hw_errors)) +
-                               *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
-                                       offsetof(struct qede_rx_queue,
-                                       rx_alloc_errors));
-                       i++;
-               }
+       for_each_rss(qid) {
+               eth_stats->q_ipackets[i] =
+                       *(uint64_t *)(
+                               ((char *)(qdev->fp_array[qid].rxq)) +
+                               offsetof(struct qede_rx_queue,
+                               rcv_pkts));
+               eth_stats->q_errors[i] =
+                       *(uint64_t *)(
+                               ((char *)(qdev->fp_array[qid].rxq)) +
+                               offsetof(struct qede_rx_queue,
+                               rx_hw_errors)) +
+                       *(uint64_t *)(
+                               ((char *)(qdev->fp_array[qid].rxq)) +
+                               offsetof(struct qede_rx_queue,
+                               rx_alloc_errors));
+               i++;
                if (i == rxq_stat_cntrs)
                        break;
        }
 
-       for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
-               if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
-                       txq = qdev->fp_array[(qid)].txqs[0];
-                       eth_stats->q_opackets[j] =
-                               *((uint64_t *)(uintptr_t)
-                                       (((uint64_t)(uintptr_t)(txq)) +
-                                        offsetof(struct qede_tx_queue,
-                                                 xmit_pkts)));
-                       j++;
-               }
+       for_each_tss(qid) {
+               txq = qdev->fp_array[qid].txq;
+               eth_stats->q_opackets[j] =
+                       *((uint64_t *)(uintptr_t)
+                               (((uint64_t)(uintptr_t)(txq)) +
+                                offsetof(struct qede_tx_queue,
+                                         xmit_pkts)));
+               j++;
                if (j == txq_stat_cntrs)
                        break;
        }
+
+       return 0;
 }
 
 static unsigned
 qede_get_xstats_count(struct qede_dev *qdev) {
-       return RTE_DIM(qede_xstats_strings) +
-               (RTE_DIM(qede_rxq_xstats_strings) *
-                RTE_MIN(QEDE_RSS_COUNT(qdev),
-                        RTE_ETHDEV_QUEUE_STAT_CNTRS));
+       if (ECORE_IS_BB(&qdev->edev))
+               return RTE_DIM(qede_xstats_strings) +
+                      RTE_DIM(qede_bb_xstats_strings) +
+                      (RTE_DIM(qede_rxq_xstats_strings) *
+                       RTE_MIN(QEDE_RSS_COUNT(qdev),
+                               RTE_ETHDEV_QUEUE_STAT_CNTRS));
+       else
+               return RTE_DIM(qede_xstats_strings) +
+                      RTE_DIM(qede_ah_xstats_strings) +
+                      (RTE_DIM(qede_rxq_xstats_strings) *
+                       RTE_MIN(QEDE_RSS_COUNT(qdev),
+                               RTE_ETHDEV_QUEUE_STAT_CNTRS));
 }
 
 static int
@@ -1246,6 +1619,7 @@ qede_get_xstats_names(struct rte_eth_dev *dev,
                      __rte_unused unsigned int limit)
 {
        struct qede_dev *qdev = dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
        const unsigned int stat_cnt = qede_get_xstats_count(qdev);
        unsigned int i, qid, stat_idx = 0;
        unsigned int rxq_stat_cntrs;
@@ -1259,6 +1633,24 @@ qede_get_xstats_names(struct rte_eth_dev *dev,
                        stat_idx++;
                }
 
+               if (ECORE_IS_BB(edev)) {
+                       for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+                               snprintf(xstats_names[stat_idx].name,
+                                       sizeof(xstats_names[stat_idx].name),
+                                       "%s",
+                                       qede_bb_xstats_strings[i].name);
+                               stat_idx++;
+                       }
+               } else {
+                       for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+                               snprintf(xstats_names[stat_idx].name,
+                                       sizeof(xstats_names[stat_idx].name),
+                                       "%s",
+                                       qede_ah_xstats_strings[i].name);
+                               stat_idx++;
+                       }
+               }
+
                rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
                                         RTE_ETHDEV_QUEUE_STAT_CNTRS);
                for (qid = 0; qid < rxq_stat_cntrs; qid++) {
@@ -1290,7 +1682,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
        if (n < num)
                return num;
 
-       qdev->ops->get_vport_stats(edev, &stats);
+       ecore_get_vport_stats(edev, &stats);
 
        for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
                xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1299,13 +1691,31 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                stat_idx++;
        }
 
+       if (ECORE_IS_BB(edev)) {
+               for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+                       xstats[stat_idx].value =
+                                       *(uint64_t *)(((char *)&stats) +
+                                       qede_bb_xstats_strings[i].offset);
+                       xstats[stat_idx].id = stat_idx;
+                       stat_idx++;
+               }
+       } else {
+               for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+                       xstats[stat_idx].value =
+                                       *(uint64_t *)(((char *)&stats) +
+                                       qede_ah_xstats_strings[i].offset);
+                       xstats[stat_idx].id = stat_idx;
+                       stat_idx++;
+               }
+       }
+
        rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (qid = 0; qid < rxq_stat_cntrs; qid++) {
-               if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+               for_each_rss(qid) {
                        for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
                                xstats[stat_idx].value = *(uint64_t *)(
-                                       ((char *)(qdev->fp_array[(qid)].rxq)) +
+                                       ((char *)(qdev->fp_array[qid].rxq)) +
                                         qede_rxq_xstats_strings[i].offset);
                                xstats[stat_idx].id = stat_idx;
                                stat_idx++;
@@ -1447,8 +1857,22 @@ static const uint32_t *
 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 {
        static const uint32_t ptypes[] = {
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L2_ETHER_VLAN,
                RTE_PTYPE_L3_IPV4,
                RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_VXLAN,
+               RTE_PTYPE_L4_FRAG,
+               /* Inner */
+               RTE_PTYPE_INNER_L2_ETHER,
+               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               RTE_PTYPE_INNER_L3_IPV4,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_INNER_L4_FRAG,
                RTE_PTYPE_UNKNOWN
        };
 
@@ -1653,6 +2077,10 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
        memset(&vport_update_params, 0, sizeof(vport_update_params));
        params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
                             RTE_CACHE_LINE_SIZE);
+       if (params == NULL) {
+               DP_ERR(edev, "failed to allocate memory\n");
+               return -ENOMEM;
+       }
 
        for (i = 0; i < reta_size; i++) {
                idx = i / RTE_RETA_GROUP_SIZE;
@@ -1672,7 +2100,7 @@ int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
        params->update_rss_config = 1;
 
        /* Fix up RETA for CMT mode device */
-       if (edev->num_hwfns > 1)
+       if (ECORE_IS_CMT(edev))
                qdev->rss_enable = qede_update_rss_parm_cmt(edev,
                                                            params);
        vport_update_params.vport_id = 0;
@@ -1723,6 +2151,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
+
+
 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1756,19 +2186,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        rte_delay_ms(1000);
        qdev->mtu = mtu;
        /* Fix up RX buf size for all queues of the port */
-       for_each_queue(i) {
+       for_each_rss(i) {
                fp = &qdev->fp_array[i];
-               if (fp->type & QEDE_FASTPATH_RX) {
-                       bufsz = (uint16_t)rte_pktmbuf_data_room_size(
-                               fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
-                       if (dev->data->scattered_rx)
-                               rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
-                       else
-                               rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
-                       rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
-                       fp->rxq->rx_buf_size = rx_buf_size;
-                       DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
-               }
+               bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+                       fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+               if (dev->data->scattered_rx)
+                       rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+               else
+                       rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+               rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+               fp->rxq->rx_buf_size = rx_buf_size;
+               DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
        }
        qede_dev_start(dev);
        if (frame_size > ETHER_MAX_LEN)
@@ -1793,25 +2221,76 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct ecore_tunnel_info tunn; /* @DPDK */
        struct ecore_hwfn *p_hwfn;
+       struct ecore_ptt *p_ptt;
+       uint16_t udp_port;
        int rc, i;
 
        PMD_INIT_FUNC_TRACE(edev);
 
        memset(&tunn, 0, sizeof(tunn));
        if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+               /* Enable VxLAN tunnel if needed before UDP port update using
+                * default MAC/VLAN classification.
+                */
+               if (add) {
+                       if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+                               DP_INFO(edev,
+                                       "UDP port %u was already configured\n",
+                                       tunnel_udp->udp_port);
+                               return ECORE_SUCCESS;
+                       }
+                       /* Enable VXLAN if it was not enabled while adding
+                        * VXLAN filter.
+                        */
+                       if (!qdev->vxlan.enable) {
+                               rc = qede_vxlan_enable(eth_dev,
+                                       ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+                               if (rc != ECORE_SUCCESS) {
+                                       DP_ERR(edev, "Failed to enable VXLAN "
+                                               "prior to updating UDP port\n");
+                                       return rc;
+                               }
+                       }
+                       udp_port = tunnel_udp->udp_port;
+               } else {
+                       if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+                               DP_ERR(edev, "UDP port %u doesn't exist\n",
+                                       tunnel_udp->udp_port);
+                               return ECORE_INVAL;
+                       }
+                       udp_port = 0;
+               }
+
                tunn.vxlan_port.b_update_port = true;
-               tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
-                                                 QEDE_VXLAN_DEF_PORT;
+               tunn.vxlan_port.port = udp_port;
                for_each_hwfn(edev, i) {
                        p_hwfn = &edev->hwfns[i];
-                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+                       if (IS_PF(edev)) {
+                               p_ptt = ecore_ptt_acquire(p_hwfn);
+                               if (!p_ptt)
+                                       return -EAGAIN;
+                       } else {
+                               p_ptt = NULL;
+                       }
+                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
                                                ECORE_SPQ_MODE_CB, NULL);
                        if (rc != ECORE_SUCCESS) {
                                DP_ERR(edev, "Unable to config UDP port %u\n",
                                       tunn.vxlan_port.port);
+                               if (IS_PF(edev))
+                                       ecore_ptt_release(p_hwfn, p_ptt);
                                return rc;
                        }
                }
+
+               qdev->vxlan.udp_port = udp_port;
+               /* If the request is to delete UDP port and if the number of
+                * VXLAN filters have reached 0 then VxLAN offload can be be
+                * disabled.
+                */
+               if (!add && qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+                       return qede_vxlan_enable(eth_dev,
+                                       ECORE_TUNN_CLSS_MAC_VLAN, false, true);
        }
 
        return 0;
@@ -1901,33 +2380,38 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct ecore_tunnel_info tunn;
-       struct ecore_hwfn *p_hwfn;
        enum ecore_filter_ucast_type type;
-       enum ecore_tunn_clss clss;
-       struct ecore_filter_ucast ucast;
+       enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+       struct ecore_filter_ucast ucast = {0};
        char str[80];
-       uint16_t filter_type;
-       int rc, i;
+       uint16_t filter_type = 0;
+       int rc;
+
+       PMD_INIT_FUNC_TRACE(edev);
 
-       filter_type = conf->filter_type | qdev->vxlan_filter_type;
-       /* First determine if the given filter classification is supported */
-       qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
-       if (clss == MAX_ECORE_TUNN_CLSS) {
-               DP_ERR(edev, "Wrong filter type\n");
-               return -EINVAL;
-       }
-       /* Init tunnel ucast params */
-       rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
-       if (rc != ECORE_SUCCESS) {
-               DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
-                               conf->filter_type);
-               return rc;
-       }
-       DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
-               str, filter_op, ucast.type);
        switch (filter_op) {
        case RTE_ETH_FILTER_ADD:
+               if (IS_VF(edev))
+                       return qede_vxlan_enable(eth_dev,
+                                       ECORE_TUNN_CLSS_MAC_VLAN, true, true);
+
+               filter_type = conf->filter_type;
+               /* Determine if the given filter classification is supported */
+               qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+               if (clss == MAX_ECORE_TUNN_CLSS) {
+                       DP_ERR(edev, "Unsupported filter type\n");
+                       return -EINVAL;
+               }
+               /* Init tunnel ucast params */
+               rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+                       conf->filter_type);
+                       return rc;
+               }
+               DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+                       str, filter_op, ucast.type);
+
                ucast.opcode = ECORE_FILTER_ADD;
 
                /* Skip MAC/VLAN if filter is based on VNI */
@@ -1947,22 +2431,34 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
                if (rc != ECORE_SUCCESS)
                        return rc;
 
-               qdev->vxlan_filter_type = filter_type;
+               qdev->vxlan.num_filters++;
+               qdev->vxlan.filter_type = filter_type;
+               if (!qdev->vxlan.enable)
+                       return qede_vxlan_enable(eth_dev, clss, true, true);
 
-               DP_INFO(edev, "Enabling VXLAN tunneling\n");
-               qede_set_cmn_tunn_param(&tunn, clss, true, true);
-               for_each_hwfn(edev, i) {
-                       p_hwfn = &edev->hwfns[i];
-                       rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
-                               &tunn, ECORE_SPQ_MODE_CB, NULL);
-                       if (rc != ECORE_SUCCESS) {
-                               DP_ERR(edev, "Failed to update tunn_clss %u\n",
-                                      tunn.vxlan.tun_cls);
-                       }
-               }
-               qdev->num_tunn_filters++; /* Filter added successfully */
        break;
        case RTE_ETH_FILTER_DELETE:
+               if (IS_VF(edev))
+                       return qede_vxlan_enable(eth_dev,
+                               ECORE_TUNN_CLSS_MAC_VLAN, false, true);
+
+               filter_type = conf->filter_type;
+               /* Determine if the given filter classification is supported */
+               qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+               if (clss == MAX_ECORE_TUNN_CLSS) {
+                       DP_ERR(edev, "Unsupported filter type\n");
+                       return -EINVAL;
+               }
+               /* Init tunnel ucast params */
+               rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+                       conf->filter_type);
+                       return rc;
+               }
+               DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+                       str, filter_op, ucast.type);
+
                ucast.opcode = ECORE_FILTER_REMOVE;
 
                if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
@@ -1976,33 +2472,16 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
                if (rc != ECORE_SUCCESS)
                        return rc;
 
-               qdev->vxlan_filter_type = filter_type;
-               qdev->num_tunn_filters--;
+               qdev->vxlan.num_filters--;
 
                /* Disable VXLAN if VXLAN filters become 0 */
-               if (qdev->num_tunn_filters == 0) {
-                       DP_INFO(edev, "Disabling VXLAN tunneling\n");
-
-                       /* Use 0 as tunnel mode */
-                       qede_set_cmn_tunn_param(&tunn, clss, false, true);
-                       for_each_hwfn(edev, i) {
-                               p_hwfn = &edev->hwfns[i];
-                               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
-                                       ECORE_SPQ_MODE_CB, NULL);
-                               if (rc != ECORE_SUCCESS) {
-                                       DP_ERR(edev,
-                                               "Failed to update tunn_clss %u\n",
-                                               tunn.vxlan.tun_cls);
-                                       break;
-                               }
-                       }
-               }
+               if (qdev->vxlan.num_filters == 0)
+                       return qede_vxlan_enable(eth_dev, clss, false, true);
        break;
        default:
                DP_ERR(edev, "Unsupported operation %d\n", filter_op);
                return -EINVAL;
        }
-       DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
 
        return 0;
 }
@@ -2130,6 +2609,8 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
        .reta_update  = qede_rss_reta_update,
        .reta_query  = qede_rss_reta_query,
        .mtu_set = qede_set_mtu,
+       .udp_tunnel_port_add = qede_udp_dst_port_add,
+       .udp_tunnel_port_del = qede_udp_dst_port_del,
 };
 
 static void qede_update_pf_params(struct ecore_dev *edev)
@@ -2162,6 +2643,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
 
        /* Extract key data structures */
        adapter = eth_dev->data->dev_private;
+       adapter->ethdev = eth_dev;
        edev = &adapter->edev;
        pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        pci_addr = pci_dev->addr;
@@ -2177,8 +2659,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
-               DP_NOTICE(edev, false,
-                         "Skipping device init from secondary process\n");
+               DP_ERR(edev, "Skipping device init from secondary process\n");
                return 0;
        }
 
@@ -2195,20 +2676,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
        }
 
        DP_INFO(edev, "Starting qede probe\n");
-
-       rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
-                                   dp_module, dp_level, is_vf);
-
+       rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+                                   dp_level, is_vf);
        if (rc != 0) {
                DP_ERR(edev, "qede probe failed rc %d\n", rc);
                return -ENODEV;
        }
-
        qede_update_pf_params(edev);
-
        rte_intr_callback_register(&pci_dev->intr_handle,
                                   qede_interrupt_handler, (void *)eth_dev);
-
        if (rte_intr_enable(&pci_dev->intr_handle)) {
                DP_ERR(edev, "rte_intr_enable() failed\n");
                return -ENODEV;
@@ -2228,7 +2704,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
         * This is required since uio device uses only one MSI-x
         * interrupt vector but we need one for each engine.
         */
-       if (edev->num_hwfns > 1 && IS_PF(edev)) {
+       if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
                rc = rte_eal_alarm_set(timer_period * US_PER_S,
                                       qede_poll_sp_sb_cb,
                                       (void *)eth_dev);
@@ -2306,8 +2782,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                                ether_addr_copy(&eth_dev->data->mac_addrs[0],
                                                &adapter->primary_mac);
                        } else {
-                               DP_NOTICE(edev, false,
-                                         "No VF macaddr assigned\n");
+                               DP_ERR(edev, "No VF macaddr assigned\n");
                        }
                }
        }
@@ -2321,17 +2796,28 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
                do_once = false;
        }
 
-       adapter->state = QEDE_DEV_INIT;
+       adapter->num_tx_queues = 0;
+       adapter->num_rx_queues = 0;
+       SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+       SLIST_INIT(&adapter->vlan_list_head);
+       SLIST_INIT(&adapter->uc_list_head);
+       adapter->mtu = ETHER_MTU;
+       adapter->new_mtu = ETHER_MTU;
+       if (!is_vf)
+               if (qede_start_vport(adapter, adapter->mtu))
+                       return -1;
 
-       DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
-                 adapter->primary_mac.addr_bytes[0],
-                 adapter->primary_mac.addr_bytes[1],
-                 adapter->primary_mac.addr_bytes[2],
-                 adapter->primary_mac.addr_bytes[3],
-                 adapter->primary_mac.addr_bytes[4],
-                 adapter->primary_mac.addr_bytes[5]);
+       DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+               adapter->primary_mac.addr_bytes[0],
+               adapter->primary_mac.addr_bytes[1],
+               adapter->primary_mac.addr_bytes[2],
+               adapter->primary_mac.addr_bytes[3],
+               adapter->primary_mac.addr_bytes[4],
+               adapter->primary_mac.addr_bytes[5]);
 
-       return rc;
+       DP_INFO(edev, "Device initialized\n");
+
+       return 0;
 }
 
 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2346,6 +2832,13 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
 
 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
 {
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+       struct qede_dev *qdev = eth_dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
+
+       PMD_INIT_FUNC_TRACE(edev);
+#endif
+
        /* only uninitialize in the primary process */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;