ethdev: fix extended statistics name index
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index c99143a..0e1b25c 100644 (file)
@@ -72,6 +72,8 @@
 #include "base/ixgbe_phy.h"
 #include "ixgbe_regs.h"
 
+#include "rte_pmd_ixgbe.h"
+
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
  * least 8 bytes less than receive packet buffer size. This value is in units
 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
 #define IXGBE_QDE_STRIP_TAG                    0x00000004
+#define IXGBE_VTEICR_MASK                      0x07
 
 enum ixgbevf_xcast_modes {
        IXGBEVF_XCAST_MODE_NONE = 0,
@@ -157,6 +160,9 @@ enum ixgbevf_xcast_modes {
        IXGBEVF_XCAST_MODE_ALLMULTI,
 };
 
+#define IXGBE_EXVET_VET_EXT_SHIFT              16
+#define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
+
 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
@@ -174,11 +180,15 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
-                               struct rte_eth_xstats *xstats, unsigned n);
+                               struct rte_eth_xstat *xstats, unsigned n);
 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
-                                 struct rte_eth_xstats *xstats, unsigned n);
+                                 struct rte_eth_xstat *xstats, unsigned n);
 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint16_t queue_id,
                                             uint8_t stat_idx,
@@ -223,7 +233,8 @@ static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+                                     struct rte_intr_handle *handle);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
                void *param);
 static void ixgbe_dev_interrupt_delayed_handler(void *param);
@@ -361,6 +372,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
                                   struct timespec *timestamp);
 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
                                   const struct timespec *timestamp);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+                                         void *param);
 
 static int ixgbe_dev_l2_tunnel_eth_type_conf
        (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -419,23 +432,80 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
  * The set of PCI devices this driver supports
  */
 static const struct rte_pci_id pci_id_ixgbe_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{ .vendor_id = 0, /* sentinel */ },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+#ifdef RTE_NIC_BYPASS
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+#endif
+       { .vendor_id = 0, /* sentinel */ },
 };
 
-
 /*
  * The set of PCI devices this driver supports (for 82599 VF)
  */
 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-{ .vendor_id = 0, /* sentinel */ },
-
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
+       { .vendor_id = 0, /* sentinel */ },
 };
 
 static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -466,6 +536,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .xstats_get           = ixgbe_dev_xstats_get,
        .stats_reset          = ixgbe_dev_stats_reset,
        .xstats_reset         = ixgbe_dev_xstats_reset,
+       .xstats_get_names     = ixgbe_dev_xstats_get_names,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
        .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
@@ -527,7 +598,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .timesync_disable     = ixgbe_timesync_disable,
        .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
        .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
-       .get_reg_length       = ixgbe_get_reg_length,
        .get_reg              = ixgbe_get_regs,
        .get_eeprom_length    = ixgbe_get_eeprom_length,
        .get_eeprom           = ixgbe_get_eeprom,
@@ -555,6 +625,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .xstats_get           = ixgbevf_dev_xstats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
        .xstats_reset         = ixgbevf_dev_stats_reset,
+       .xstats_get_names     = ixgbevf_dev_xstats_get_names,
        .dev_close            = ixgbevf_dev_close,
        .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
        .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
@@ -577,7 +648,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .rxq_info_get         = ixgbe_rxq_info_get,
        .txq_info_get         = ixgbe_txq_info_get,
        .mac_addr_set         = ixgbevf_set_default_mac_addr,
-       .get_reg_length       = ixgbevf_get_reg_length,
        .get_reg              = ixgbevf_get_regs,
        .reta_update          = ixgbe_dev_rss_reta_update,
        .reta_query           = ixgbe_dev_rss_reta_query,
@@ -685,6 +755,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 
 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
                           sizeof(rte_ixgbe_rxq_strings[0]))
+#define IXGBE_NB_RXQ_PRIO_VALUES 8
 
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
        {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
@@ -695,6 +766,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
 
 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
                           sizeof(rte_ixgbe_txq_strings[0]))
+#define IXGBE_NB_TXQ_PRIO_VALUES 8
 
 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
        {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
@@ -1012,7 +1084,8 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
 static int
 eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 {
-       struct rte_pci_device *pci_dev;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct ixgbe_vfta *shadow_vfta =
@@ -1056,7 +1129,6 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
                return 0;
        }
-       pci_dev = eth_dev->pci_dev;
 
        rte_eth_copy_pci_info(eth_dev, pci_dev);
 
@@ -1201,12 +1273,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id);
 
-       rte_intr_callback_register(&pci_dev->intr_handle,
-                                  ixgbe_dev_interrupt_handler,
-                                  (void *)eth_dev);
+       rte_intr_callback_register(intr_handle,
+                                  ixgbe_dev_interrupt_handler, eth_dev);
 
        /* enable uio/vfio intr/eventfd mapping */
-       rte_intr_enable(&pci_dev->intr_handle);
+       rte_intr_enable(intr_handle);
 
        /* enable support intr */
        ixgbe_enable_intr(eth_dev);
@@ -1222,7 +1293,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 static int
 eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 {
-       struct rte_pci_device *pci_dev;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw;
 
        PMD_INIT_FUNC_TRACE();
@@ -1231,7 +1303,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
                return -EPERM;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-       pci_dev = eth_dev->pci_dev;
 
        if (hw->adapter_stopped == 0)
                ixgbe_dev_close(eth_dev);
@@ -1244,9 +1315,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        ixgbe_swfw_lock_reset(hw);
 
        /* disable uio intr before callback unregister */
-       rte_intr_disable(&(pci_dev->intr_handle));
-       rte_intr_callback_unregister(&(pci_dev->intr_handle),
-               ixgbe_dev_interrupt_handler, (void *)eth_dev);
+       rte_intr_disable(intr_handle);
+       rte_intr_callback_unregister(intr_handle,
+                                    ixgbe_dev_interrupt_handler, eth_dev);
 
        /* uninitialize PF if max_vfs not zero */
        ixgbe_pf_host_uninit(eth_dev);
@@ -1310,7 +1381,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 {
        int diag;
        uint32_t tc, tcs;
-       struct rte_pci_device *pci_dev;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct ixgbe_vfta *shadow_vfta =
@@ -1348,8 +1420,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
-       pci_dev = eth_dev->pci_dev;
-
        rte_eth_copy_pci_info(eth_dev, pci_dev);
 
        hw->device_id = pci_dev->id.device_id;
@@ -1442,6 +1512,11 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
                return -EIO;
        }
 
+       rte_intr_callback_register(intr_handle,
+                                  ixgbevf_dev_interrupt_handler, eth_dev);
+       rte_intr_enable(intr_handle);
+       ixgbevf_intr_enable(hw);
+
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
@@ -1454,6 +1529,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 static int
 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw;
 
        PMD_INIT_FUNC_TRACE();
@@ -1476,15 +1553,20 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
        rte_free(eth_dev->data->mac_addrs);
        eth_dev->data->mac_addrs = NULL;
 
+       rte_intr_disable(intr_handle);
+       rte_intr_callback_unregister(intr_handle,
+                                    ixgbevf_dev_interrupt_handler, eth_dev);
+
        return 0;
 }
 
 static struct eth_driver rte_ixgbe_pmd = {
        .pci_drv = {
-               .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
                        RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .eth_dev_uninit = eth_ixgbe_dev_uninit,
@@ -1496,43 +1578,16 @@ static struct eth_driver rte_ixgbe_pmd = {
  */
 static struct eth_driver rte_ixgbevf_pmd = {
        .pci_drv = {
-               .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
        .eth_dev_uninit = eth_ixgbevf_dev_uninit,
        .dev_private_size = sizeof(struct ixgbe_adapter),
 };
 
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-
-       rte_eth_driver_register(&rte_ixgbe_pmd);
-       return 0;
-}
-
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
- */
-static int
-rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-
-       rte_eth_driver_register(&rte_ixgbevf_pmd);
-       return 0;
-}
-
 static int
 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
@@ -1576,15 +1631,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int ret = 0;
+       uint32_t reg;
+       uint32_t qinq;
+
+       qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+       qinq &= IXGBE_DMATXCTL_GDV;
 
        switch (vlan_type) {
        case ETH_VLAN_TYPE_INNER:
-               /* Only the high 16-bits is valid */
-               IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+               if (qinq) {
+                       reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+                       reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+                       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+                       reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+                       reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+                               | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+                       IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+               } else {
+                       ret = -ENOTSUP;
+                       PMD_DRV_LOG(ERR, "Inner type is not supported"
+                                   " by single VLAN");
+               }
+               break;
+       case ETH_VLAN_TYPE_OUTER:
+               if (qinq) {
+                       /* Only the high 16-bits is valid */
+                       IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
+                                       IXGBE_EXVET_VET_EXT_SHIFT);
+               } else {
+                       reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+                       reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+                       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+                       reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+                       reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+                               | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+                       IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+               }
+
                break;
        default:
                ret = -EINVAL;
-               PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+               PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
                break;
        }
 
@@ -1636,6 +1723,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 {
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+       struct ixgbe_rx_queue *rxq;
 
        if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
                return;
@@ -1644,6 +1732,16 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
                IXGBE_SET_HWSTRIP(hwstrip, queue);
        else
                IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+       if (queue >= dev->data->nb_rx_queues)
+               return;
+
+       rxq = dev->data->rx_queues[queue];
+
+       if (on)
+               rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+       else
+               rxq->vlan_flags = PKT_RX_VLAN_PKT;
 }
 
 static void
@@ -1701,6 +1799,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t ctrl;
        uint16_t i;
+       struct ixgbe_rx_queue *rxq;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1711,9 +1810,10 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
        } else {
                /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                       rxq = dev->data->rx_queues[i];
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                        ctrl &= ~IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
 
                        /* record those setting for HW strip per queue */
                        ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
@@ -1728,6 +1828,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t ctrl;
        uint16_t i;
+       struct ixgbe_rx_queue *rxq;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1738,9 +1839,10 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
        } else {
                /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                       rxq = dev->data->rx_queues[i];
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                        ctrl |= IXGBE_RXDCTL_VME;
-                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
 
                        /* record those setting for HW strip per queue */
                        ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
@@ -1843,6 +1945,8 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 static int
 ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
        switch (nb_rx_q) {
        case 1:
        case 2:
@@ -1856,7 +1960,7 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
        }
 
        RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
-       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
 
        return 0;
 }
@@ -1873,6 +1977,8 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                /* check multi-queue mode */
                switch (dev_conf->rxmode.mq_mode) {
                case ETH_MQ_RX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+                       break;
                case ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
                        PMD_INIT_LOG(ERR, "SRIOV active,"
@@ -1908,11 +2014,9 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 
                switch (dev_conf->txmode.mq_mode) {
                case ETH_MQ_TX_VMDQ_DCB:
-                       /* DCB VMDQ in SRIOV mode, not implement yet */
-                       PMD_INIT_LOG(ERR, "SRIOV is active,"
-                                       " unsupported VMDQ mq_mode tx %d.",
-                                       dev_conf->txmode.mq_mode);
-                       return -EINVAL;
+                       PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+                       dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+                       break;
                default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
                        dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
                        break;
@@ -2087,7 +2191,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_vf_info *vfinfo =
                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t intr_vector = 0;
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
@@ -2167,6 +2272,36 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
+    mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+               ETH_VLAN_EXTEND_MASK;
+       ixgbe_vlan_offload_set(dev, mask);
+
+       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+               /* Enable vlan filtering for VMDq */
+               ixgbe_vmdq_vlan_hw_filter_enable(dev);
+       }
+
+       /* Configure DCB hw */
+       ixgbe_configure_dcb(dev);
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+               err = ixgbe_fdir_configure(dev);
+               if (err)
+                       goto error;
+       }
+
+       /* Restore vf rate limit */
+       if (vfinfo != NULL) {
+               for (vf = 0; vf < pci_dev->max_vfs; vf++)
+                       for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+                               if (vfinfo[vf].tx_rate[idx] != 0)
+                                       ixgbe_set_vf_rate_limit(dev, vf,
+                                               vfinfo[vf].tx_rate[idx],
+                                               1 << idx);
+       }
+
+       ixgbe_restore_statistics_mapping(dev);
+
        err = ixgbe_dev_rxtx_start(dev);
        if (err < 0) {
                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@@ -2234,8 +2369,7 @@ skip_link_setup:
                        ixgbe_dev_lsc_interrupt_setup(dev);
        } else {
                rte_intr_callback_unregister(intr_handle,
-                                            ixgbe_dev_interrupt_handler,
-                                            (void *)dev);
+                                            ixgbe_dev_interrupt_handler, dev);
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
                                     " no intr multiplex\n");
@@ -2252,36 +2386,6 @@ skip_link_setup:
        /* resume enabled intr since hw reset */
        ixgbe_enable_intr(dev);
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
-       ixgbe_vlan_offload_set(dev, mask);
-
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
-               /* Enable vlan filtering for VMDq */
-               ixgbe_vmdq_vlan_hw_filter_enable(dev);
-       }
-
-       /* Configure DCB hw */
-       ixgbe_configure_dcb(dev);
-
-       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
-               err = ixgbe_fdir_configure(dev);
-               if (err)
-                       goto error;
-       }
-
-       /* Restore vf rate limit */
-       if (vfinfo != NULL) {
-               for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
-                       for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
-                               if (vfinfo[vf].tx_rate[idx] != 0)
-                                       ixgbe_set_vf_rate_limit(dev, vf,
-                                               vfinfo[vf].tx_rate[idx],
-                                               1 << idx);
-       }
-
-       ixgbe_restore_statistics_mapping(dev);
-
        return 0;
 
 error:
@@ -2304,7 +2408,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        struct ixgbe_filter_info *filter_info =
                IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
        struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        int vf;
 
        PMD_INIT_FUNC_TRACE();
@@ -2319,8 +2424,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        /* stop adapter */
        ixgbe_stop_adapter(hw);
 
-       for (vf = 0; vfinfo != NULL &&
-                    vf < dev->pci_dev->max_vfs; vf++)
+       for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
                vfinfo[vf].clear_to_send = false;
 
        if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
@@ -2695,12 +2799,76 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-       return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
-               (IXGBE_NB_TXQ_PRIO_STATS * 8);
+       return IXGBE_NB_HW_STATS +
+               (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
+               (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
+}
+
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+       const unsigned cnt_stats = ixgbe_xstats_calc_num();
+       unsigned stat, i, count;
+
+       if (xstats_names != NULL) {
+               count = 0;
+
+               /* Note: limit >= cnt_stats checked upstream
+                * in rte_eth_xstats_names()
+                */
+
+               /* Extended stats from ixgbe_hw_stats */
+               for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+                       snprintf(xstats_names[count].name,
+                               sizeof(xstats_names[count].name),
+                               "%s",
+                               rte_ixgbe_stats_strings[i].name);
+                       count++;
+               }
+
+               /* RX Priority Stats */
+               for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+                       for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+                               snprintf(xstats_names[count].name,
+                                       sizeof(xstats_names[count].name),
+                                       "rx_priority%u_%s", i,
+                                       rte_ixgbe_rxq_strings[stat].name);
+                               count++;
+                       }
+               }
+
+               /* TX Priority Stats */
+               for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+                       for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+                               snprintf(xstats_names[count].name,
+                                       sizeof(xstats_names[count].name),
+                                       "tx_priority%u_%s", i,
+                                       rte_ixgbe_txq_strings[stat].name);
+                               count++;
+                       }
+               }
+       }
+       return cnt_stats;
+}
+
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+       unsigned i;
+
+       if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
+               return -ENOMEM;
+
+       if (xstats_names != NULL)
+               for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
+                       snprintf(xstats_names[i].name,
+                               sizeof(xstats_names[i].name),
+                               "%s", rte_ixgbevf_stats_strings[i].name);
+       return IXGBEVF_NB_XSTATS;
 }
 
 static int
-ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                                         unsigned n)
 {
        struct ixgbe_hw *hw =
@@ -2732,39 +2900,33 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
        /* Extended stats from ixgbe_hw_stats */
        count = 0;
        for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
-               snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
-                        rte_ixgbe_stats_strings[i].name);
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                rte_ixgbe_stats_strings[i].offset);
+               xstats[count].id = count;
                count++;
        }
 
        /* RX Priority Stats */
        for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
-               for (i = 0; i < 8; i++) {
-                       snprintf(xstats[count].name, sizeof(xstats[count].name),
-                                "rx_priority%u_%s", i,
-                                rte_ixgbe_rxq_strings[stat].name);
+               for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
                        xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                        rte_ixgbe_rxq_strings[stat].offset +
                                        (sizeof(uint64_t) * i));
+                       xstats[count].id = count;
                        count++;
                }
        }
 
        /* TX Priority Stats */
        for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
-               for (i = 0; i < 8; i++) {
-                       snprintf(xstats[count].name, sizeof(xstats[count].name),
-                                "tx_priority%u_%s", i,
-                                rte_ixgbe_txq_strings[stat].name);
+               for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
                        xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                        rte_ixgbe_txq_strings[stat].offset +
                                        (sizeof(uint64_t) * i));
+                       xstats[count].id = count;
                        count++;
                }
        }
-
        return count;
 }
 
@@ -2812,7 +2974,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev)
 }
 
 static int
-ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                       unsigned n)
 {
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -2829,8 +2991,6 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
 
        /* Extended stats */
        for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
-               snprintf(xstats[i].name, sizeof(xstats[i].name),
-                        "%s", rte_ixgbevf_stats_strings[i].name);
                xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_ixgbevf_stats_strings[i].offset);
        }
@@ -2874,9 +3034,11 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
 static void
 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 
+       dev_info->pci_dev = pci_dev;
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -2892,7 +3054,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
        dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
-       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       dev_info->max_vfs = pci_dev->max_vfs;
        if (hw->mac.type == ixgbe_mac_82598EB)
                dev_info->max_vmdq_pools = ETH_16_POOLS;
        else
@@ -3006,15 +3168,17 @@ static void
 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                     struct rte_eth_dev_info *dev_info)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+       dev_info->pci_dev = pci_dev;
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
        dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
-       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       dev_info->max_vfs = pci_dev->max_vfs;
        if (hw->mac.type == ixgbe_mac_82598EB)
                dev_info->max_vmdq_pools = ETH_16_POOLS;
        else
@@ -3276,6 +3440,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 static void
 ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
        struct rte_eth_link link;
 
        memset(&link, 0, sizeof(link));
@@ -3290,11 +3455,11 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
                                (int)(dev->data->port_id));
        }
-       PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
-                               dev->pci_dev->addr.domain,
-                               dev->pci_dev->addr.bus,
-                               dev->pci_dev->addr.devid,
-                               dev->pci_dev->addr.function);
+       PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
+                               pci_dev->addr.domain,
+                               pci_dev->addr.bus,
+                               pci_dev->addr.devid,
+                               pci_dev->addr.function);
 }
 
 /*
@@ -3308,7 +3473,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
  *  - On failure, a negative value.
  */
 static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+                          struct rte_intr_handle *intr_handle)
 {
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3358,7 +3524,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        } else {
                PMD_DRV_LOG(DEBUG, "enable intr immediately");
                ixgbe_enable_intr(dev);
-               rte_intr_enable(&(dev->pci_dev->intr_handle));
+               rte_intr_enable(intr_handle);
        }
 
 
@@ -3383,6 +3549,8 @@ static void
 ixgbe_dev_interrupt_delayed_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_hw *hw =
@@ -3402,12 +3570,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                ixgbe_dev_link_update(dev, 0);
                intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
                ixgbe_dev_link_status_print(dev);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        }
 
        PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        ixgbe_enable_intr(dev);
-       rte_intr_enable(&(dev->pci_dev->intr_handle));
+       rte_intr_enable(intr_handle);
 }
 
 /**
@@ -3423,13 +3591,13 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
  *  void
  */
 static void
-ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
                            void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
        ixgbe_dev_interrupt_get_status(dev);
-       ixgbe_dev_interrupt_action(dev);
+       ixgbe_dev_interrupt_action(dev, handle);
 }
 
 static int
@@ -3891,6 +4059,38 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
        ixgbe_add_rar(dev, addr, 0, 0);
 }
 
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+               struct ether_addr *mac_addr)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_vf_info *vfinfo;
+       int rar_entry;
+       uint8_t *new_mac = (uint8_t *)(mac_addr);
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       rte_eth_dev_info_get(port, &dev_info);
+
+       if (vf >= dev_info.max_vfs)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+       if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+               rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+                               ETHER_ADDR_LEN);
+               return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+                               IXGBE_RAH_AV);
+       }
+       return -EINVAL;
+}
+
 static int
 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -4007,7 +4207,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t intr_vector = 0;
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 
        int err, mask = 0;
 
@@ -4070,10 +4271,13 @@ static void
 ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 
        PMD_INIT_FUNC_TRACE();
 
+       ixgbevf_intr_disable(hw);
+
        hw->adapter_stopped = 1;
        ixgbe_stop_adapter(hw);
 
@@ -4130,7 +4334,8 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
                        mask = 1;
                        for (j = 0; j < 32; j++) {
                                if (vfta & mask)
-                                       ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
+                                       ixgbe_set_vfta(hw, (i<<5)+j, 0,
+                                                      on, false);
                                mask <<= 1;
                        }
                }
@@ -4152,7 +4357,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
        PMD_INIT_FUNC_TRACE();
 
        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
-       ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+       ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
        if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VF vlan");
                return ret;
@@ -4397,9 +4602,19 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return -ENOTSUP;
 
-       addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+       if (pool >= ETH_64_POOLS)
+               return -EINVAL;
+
+       /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+       if (pool >= 32) {
+               addr = IXGBE_VFRE(1);
+               val = bit1 << (pool - 32);
+       } else {
+               addr = IXGBE_VFRE(0);
+               val = bit1 << pool;
+       }
+
        reg = IXGBE_READ_REG(hw, addr);
-       val = bit1 << pool;
 
        if (on)
                reg |= val;
@@ -4424,9 +4639,19 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return -ENOTSUP;
 
-       addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+       if (pool >= ETH_64_POOLS)
+               return -EINVAL;
+
+       /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+       if (pool >= 32) {
+               addr = IXGBE_VFTE(1);
+               val = bit1 << (pool - 32);
+       } else {
+               addr = IXGBE_VFTE(0);
+               val = bit1 << pool;
+       }
+
        reg = IXGBE_READ_REG(hw, addr);
-       val = bit1 << pool;
 
        if (on)
                reg |= val;
@@ -4451,7 +4676,8 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                return -ENOTSUP;
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
                if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
-                       ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, vlan_on);
+                       ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
+                                                  vlan_on, false);
                        if (ret < 0)
                                return ret;
                }
@@ -4460,6 +4686,216 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
        return ret;
 }
 
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_mac_info *mac;
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       rte_eth_dev_info_get(port, &dev_info);
+
+       if (vf >= dev_info.max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mac = &hw->mac;
+
+       mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_mac_info *mac;
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       rte_eth_dev_info_get(port, &dev_info);
+
+       if (vf >= dev_info.max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mac = &hw->mac;
+       mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+       struct ixgbe_hw *hw;
+       uint32_t ctrl;
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       rte_eth_dev_info_get(port, &dev_info);
+
+       if (vf >= dev_info.max_vfs)
+               return -EINVAL;
+
+       if (vlan_id > 4095)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+       if (vlan_id) {
+               ctrl = vlan_id;
+               ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+       } else {
+               ctrl = 0;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       uint32_t ctrl;
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+       /* enable or disable VMDQ loopback */
+       if (on)
+               ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+       else
+               ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       uint32_t reg_value;
+       int i;
+       int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       for (i = 0; i <= num_queues; i++) {
+               reg_value = IXGBE_QDE_WRITE |
+                               (i << IXGBE_QDE_IDX_SHIFT) |
+                               (on & IXGBE_QDE_ENABLE);
+               IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+       }
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       uint32_t reg_value;
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       rte_eth_dev_info_get(port, &dev_info);
+
+       /* only support VF's 0 to 63 */
+       if ((vf >= dev_info.max_vfs) || (vf > 63))
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+       if (on)
+               reg_value |= IXGBE_SRRCTL_DROP_EN;
+       else
+               reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+       uint16_t queues_per_pool;
+       uint32_t q;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       rte_eth_dev_info_get(port, &dev_info);
+
+       if (vf >= dev_info.max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+       /* The PF has 128 queue pairs and in SRIOV configuration
+        * those queues will be assigned to VF's, so RXDCTL
+        * registers will be dealing with queues which will be
+        * assigned to VF's.
+        * Let's say we have SRIOV configured with 31 VF's then the
+        * first 124 queues 0-123 will be allocated to VF's and only
+        * the last 4 queues 123-127 will be assigned to the PF.
+        */
+
+       queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
+
+       for (q = 0; q < queues_per_pool; q++)
+               (*dev->dev_ops->vlan_strip_queue_set)(dev,
+                               q + vf * queues_per_pool, on);
+       return 0;
+}
+
 #define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
 #define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
 #define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
@@ -4513,7 +4949,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
                                /* search vlan id related pool vlan filter index */
                                reg_index = ixgbe_find_vlvf_slot(hw,
-                                               mirror_conf->vlan.vlan_id[i]);
+                                                mirror_conf->vlan.vlan_id[i],
+                                                false);
                                if (reg_index < 0)
                                        return -EINVAL;
                                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
@@ -4637,6 +5074,8 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 static int
 ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t mask;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4646,7 +5085,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
-       rte_intr_enable(&dev->pci_dev->intr_handle);
+       rte_intr_enable(intr_handle);
 
        return 0;
 }
@@ -4669,6 +5108,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 static int
 ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t mask;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4688,7 +5129,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
                mask &= (1 << (queue_id - 32));
                IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
        }
-       rte_intr_enable(&dev->pci_dev->intr_handle);
+       rte_intr_enable(intr_handle);
 
        return 0;
 }
@@ -4792,12 +5233,16 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
 static void
 ixgbevf_configure_msix(struct rte_eth_dev *dev)
 {
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t q_idx;
        uint32_t vector_idx = IXGBE_MISC_VEC_ID;
 
+       /* Configure VF other cause ivar */
+       ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd.
         */
@@ -4812,9 +5257,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
                intr_handle->intr_vec[q_idx] = vector_idx;
        }
-
-       /* Configure VF other cause ivar */
-       ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
 }
 
 /**
@@ -4825,7 +5267,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
 static void
 ixgbe_configure_msix(struct rte_eth_dev *dev)
 {
-       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
@@ -4943,6 +5386,7 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
        uint16_t tx_rate, uint64_t q_msk)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_vf_info *vfinfo =
                *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
@@ -4957,7 +5401,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
                return -EINVAL;
 
        if (vfinfo != NULL) {
-               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+               for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
                        if (vf_idx == vf)
                                continue;
                        for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
@@ -6169,6 +6613,12 @@ ixgbe_get_regs(struct rte_eth_dev *dev,
        const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
                                    ixgbe_regs_mac_82598EB : ixgbe_regs_others;
 
+       if (data == NULL) {
+               regs->length = ixgbe_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
@@ -6193,6 +6643,12 @@ ixgbevf_get_regs(struct rte_eth_dev *dev,
        int count = 0;
        const struct reg_info *reg_group;
 
+       if (data == NULL) {
+               regs->length = ixgbevf_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
@@ -6758,15 +7214,16 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
                             struct rte_eth_l2_tunnel_conf *l2_tunnel,
                             bool en)
 {
+       struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
        int ret = 0;
        uint32_t vmtir, vmvir;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
+       if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
                PMD_DRV_LOG(ERR,
                            "VF id %u should be less than %u",
                            l2_tunnel->vf_id,
-                           dev->pci_dev->max_vfs);
+                           pci_dev->max_vfs);
                return -EINVAL;
        }
 
@@ -7079,70 +7536,86 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
        return ret;
 }
 
-/* ixgbevf_update_xcast_mode - Update Multicast mode
- * @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
- * @xcast_mode: new multicast mode
- *
- * Updates the Multicast Mode of VF.
- */
-static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
-                                    int xcast_mode)
+static void
+ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct ixgbe_mbx_info *mbx = &hw->mbx;
-       u32 msgbuf[2];
-       s32 err;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       switch (hw->api_version) {
-       case ixgbe_mbox_api_12:
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
+       hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+}
 
-       msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
-       msgbuf[1] = xcast_mode;
+static void
+ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
-       if (err)
-               return err;
+       hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+}
 
-       err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
-       if (err)
-               return err;
+static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       u32 in_msg = 0;
 
-       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-       if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
-               return -EPERM;
+       if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+               return;
 
-       return 0;
+       /* PF reset VF event */
+       if (in_msg == IXGBE_PF_CONTROL_MSG)
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
 }
 
-static void
-ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
 {
+       uint32_t eicr;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       ixgbevf_intr_disable(hw);
+
+       /* read-on-clear nic registers here */
+       eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+       intr->flags = 0;
 
-       ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+       /* only one misc vector supported - mailbox */
+       eicr &= IXGBE_VTEICR_MASK;
+       if (eicr == IXGBE_MISC_VEC_ID)
+               intr->flags |= IXGBE_FLAG_MAILBOX;
+
+       return 0;
 }
 
-static void
-ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-       ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+       if (intr->flags & IXGBE_FLAG_MAILBOX) {
+               ixgbevf_mbx_process(dev);
+               intr->flags &= ~IXGBE_FLAG_MAILBOX;
+       }
+
+       ixgbevf_intr_enable(hw);
+
+       return 0;
 }
 
-static struct rte_driver rte_ixgbe_driver = {
-       .type = PMD_PDEV,
-       .init = rte_ixgbe_pmd_init,
-};
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                             void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
-static struct rte_driver rte_ixgbevf_driver = {
-       .type = PMD_PDEV,
-       .init = rte_ixgbevf_pmd_init,
-};
+       ixgbevf_dev_interrupt_get_status(dev);
+       ixgbevf_dev_interrupt_action(dev);
+}
 
-PMD_REGISTER_DRIVER(rte_ixgbe_driver);
-PMD_REGISTER_DRIVER(rte_ixgbevf_driver);
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");