drivers: use PCI registration macro
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index 910b7db..4b9e3d4 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
 #define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
 #define IXGBE_QDE_STRIP_TAG                    0x00000004
+#define IXGBE_VTEICR_MASK                      0x07
 
 enum ixgbevf_xcast_modes {
        IXGBEVF_XCAST_MODE_NONE = 0,
@@ -157,6 +158,9 @@ enum ixgbevf_xcast_modes {
        IXGBEVF_XCAST_MODE_ALLMULTI,
 };
 
+#define IXGBE_EXVET_VET_EXT_SHIFT              16
+#define IXGBE_DMATXCTL_VT_MASK                 0xFFFF0000
+
 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
@@ -174,17 +178,22 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
-                               struct rte_eth_xstats *xstats, unsigned n);
+                               struct rte_eth_xstat *xstats, unsigned n);
 static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
-                                 struct rte_eth_xstats *xstats, unsigned n);
+                                 struct rte_eth_xstat *xstats, unsigned n);
 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint16_t queue_id,
                                             uint8_t stat_idx,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                               struct rte_eth_dev_info *dev_info);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                 struct rte_eth_dev_info *dev_info);
 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -231,7 +240,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
 static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
                                           struct ether_addr *mac_addr);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
 
 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@@ -263,14 +272,14 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
 
 /* For Eth VMDQ APIs support */
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
-               ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+               ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
 static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
                uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
-               uint64_t pool_mask,uint8_t vlan_on);
+               uint64_t pool_mask, uint8_t vlan_on);
 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                struct rte_eth_mirror_conf *mirror_conf,
                uint8_t rule_id, uint8_t on);
@@ -360,6 +369,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
                                   struct timespec *timestamp);
 static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
                                   const struct timespec *timestamp);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+                                         void *param);
 
 static int ixgbe_dev_l2_tunnel_eth_type_conf
        (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -396,21 +407,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
        last = latest;                                           \
 }
 
-#define IXGBE_SET_HWSTRIP(h, q) do{\
-               uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
-               uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] |= 1 << bit;\
        } while (0)
 
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
-               uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
-               uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] &= ~(1 << bit);\
        } while (0)
 
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
-               uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
-               uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+               uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+               uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
                (r) = (h)->bitmap[idx] >> bit & 1;\
        } while (0)
 
@@ -418,23 +429,80 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
  * The set of PCI devices this driver supports
  */
 static const struct rte_pci_id pci_id_ixgbe_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{ .vendor_id = 0, /* sentinel */ },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+#ifdef RTE_NIC_BYPASS
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+#endif
+       { .vendor_id = 0, /* sentinel */ },
 };
 
-
 /*
  * The set of PCI devices this driver supports (for 82599 VF)
  */
 static const struct rte_pci_id pci_id_ixgbevf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-{ .vendor_id = 0, /* sentinel */ },
-
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
+       { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
+       { .vendor_id = 0, /* sentinel */ },
 };
 
 static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -465,8 +533,10 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .xstats_get           = ixgbe_dev_xstats_get,
        .stats_reset          = ixgbe_dev_stats_reset,
        .xstats_reset         = ixgbe_dev_xstats_reset,
+       .xstats_get_names     = ixgbe_dev_xstats_get_names,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
@@ -525,7 +595,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .timesync_disable     = ixgbe_timesync_disable,
        .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
        .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
-       .get_reg_length       = ixgbe_get_reg_length,
        .get_reg              = ixgbe_get_regs,
        .get_eeprom_length    = ixgbe_get_eeprom_length,
        .get_eeprom           = ixgbe_get_eeprom,
@@ -553,10 +622,12 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .xstats_get           = ixgbevf_dev_xstats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
        .xstats_reset         = ixgbevf_dev_stats_reset,
+       .xstats_get_names     = ixgbevf_dev_xstats_get_names,
        .dev_close            = ixgbevf_dev_close,
        .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
        .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
        .dev_infos_get        = ixgbevf_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -574,7 +645,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .rxq_info_get         = ixgbe_rxq_info_get,
        .txq_info_get         = ixgbe_txq_info_get,
        .mac_addr_set         = ixgbevf_set_default_mac_addr,
-       .get_reg_length       = ixgbevf_get_reg_length,
        .get_reg              = ixgbevf_get_regs,
        .reta_update          = ixgbe_dev_rss_reta_update,
        .reta_query           = ixgbe_dev_rss_reta_query,
@@ -682,6 +752,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
 
 #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
                           sizeof(rte_ixgbe_rxq_strings[0]))
+#define IXGBE_NB_RXQ_PRIO_VALUES 8
 
 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
        {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
@@ -692,6 +763,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
 
 #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
                           sizeof(rte_ixgbe_txq_strings[0]))
+#define IXGBE_NB_TXQ_PRIO_VALUES 8
 
 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
        {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
@@ -898,8 +970,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
-       }
-       else {
+       } else {
                PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
@@ -908,7 +979,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
 }
 
 static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
 {
        struct ixgbe_stat_mapping_registers *stat_mappings =
                IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
@@ -926,7 +997,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
 }
 
 static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
 {
        uint8_t i;
        struct ixgbe_dcb_tc_config *tc;
@@ -949,7 +1020,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
        tc = &dcb_config->tc_config[0];
        tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
        tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-       for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+       for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
                dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
                dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
        }
@@ -1013,7 +1084,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        struct rte_pci_device *pci_dev;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-       struct ixgbe_vfta * shadow_vfta =
+       struct ixgbe_vfta *shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@@ -1036,17 +1107,18 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
         * has already done this work. Only check we don't need a different
         * RX and TX function.
         */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
                struct ixgbe_tx_queue *txq;
                /* TX queue function in primary, set by last queue initialized
-                * Tx queue may not initialized by primary process */
+                * Tx queue may not initialized by primary process
+                */
                if (eth_dev->data->tx_queues) {
                        txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
                        ixgbe_set_tx_function(eth_dev, txq);
                } else {
                        /* Use default TX function if we get here */
                        PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
-                                            "Using default TX function.");
+                                    "Using default TX function.");
                }
 
                ixgbe_set_rx_function(eth_dev);
@@ -1083,7 +1155,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Initialize DCB configuration*/
        memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
-       ixgbe_dcb_init(hw,dcb_config);
+       ixgbe_dcb_init(hw, dcb_config);
        /* Get Hardware Flow Control setting */
        hw->fc.requested_mode = ixgbe_fc_full;
        hw->fc.current_mode = ixgbe_fc_full;
@@ -1124,11 +1196,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
        if (diag == IXGBE_ERR_EEPROM_VERSION) {
                PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
-                   "LOM.  Please be aware there may be issues associated "
-                   "with your hardware.");
+                            "LOM.  Please be aware there may be issues associated "
+                            "with your hardware.");
                PMD_INIT_LOG(ERR, "If you are experiencing problems "
-                   "please contact your Intel or hardware representative "
-                   "who provided you with this hardware.");
+                            "please contact your Intel or hardware representative "
+                            "who provided you with this hardware.");
        } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
                PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
        if (diag) {
@@ -1147,12 +1219,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
-                       hw->mac.num_rar_entries, 0);
+                                              hw->mac.num_rar_entries, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
-                       "Failed to allocate %u bytes needed to store "
-                       "MAC addresses",
-                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+                            "Failed to allocate %u bytes needed to store "
+                            "MAC addresses",
+                            ETHER_ADDR_LEN * hw->mac.num_rar_entries);
                return -ENOMEM;
        }
        /* Copy the permanent MAC address */
@@ -1161,11 +1233,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Allocate memory for storing hash filter MAC addresses */
        eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
-                       IXGBE_VMDQ_NUM_UC_MAC, 0);
+                                                   IXGBE_VMDQ_NUM_UC_MAC, 0);
        if (eth_dev->data->hash_mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
-                       "Failed to allocate %d bytes needed to store MAC addresses",
-                       ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+                            "Failed to allocate %d bytes needed to store MAC addresses",
+                            ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
                return -ENOMEM;
        }
 
@@ -1195,8 +1267,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                             (int) hw->mac.type, (int) hw->phy.type);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
-                       eth_dev->data->port_id, pci_dev->id.vendor_id,
-                       pci_dev->id.device_id);
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id);
 
        rte_intr_callback_register(&pci_dev->intr_handle,
                                   ixgbe_dev_interrupt_handler,
@@ -1211,7 +1283,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        /* initialize 5tuple filter list */
        TAILQ_INIT(&filter_info->fivetuple_list);
        memset(filter_info->fivetuple_mask, 0,
-               sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+              sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
 
        return 0;
 }
@@ -1310,7 +1382,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        struct rte_pci_device *pci_dev;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
-       struct ixgbe_vfta * shadow_vfta =
+       struct ixgbe_vfta *shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@@ -1324,10 +1396,24 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 
        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
-        * RX function */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+        * RX function
+        */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               struct ixgbe_tx_queue *txq;
+               /* TX queue function in primary, set by last queue initialized
+                * Tx queue may not initialized by primary process
+                */
+               if (eth_dev->data->tx_queues) {
+                       txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+                       ixgbe_set_tx_function(eth_dev, txq);
+               } else {
+                       /* Use default TX function if we get here */
+                       PMD_INIT_LOG(NOTICE,
+                                    "No TX queues configured yet. Using default TX function.");
+               }
+
+               ixgbe_set_rx_function(eth_dev);
+
                return 0;
        }
 
@@ -1382,12 +1468,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
-                       hw->mac.num_rar_entries, 0);
+                                              hw->mac.num_rar_entries, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
-                       "Failed to allocate %u bytes needed to store "
-                       "MAC addresses",
-                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+                            "Failed to allocate %u bytes needed to store "
+                            "MAC addresses",
+                            ETHER_ADDR_LEN * hw->mac.num_rar_entries);
                return -ENOMEM;
        }
 
@@ -1417,14 +1503,20 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        /* reset the hardware with the new settings */
        diag = hw->mac.ops.start_hw(hw);
        switch (diag) {
-               case  0:
-                       break;
+       case  0:
+               break;
 
-               default:
-                       PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       return -EIO;
+       default:
+               PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+               return -EIO;
        }
 
+       rte_intr_callback_register(&pci_dev->intr_handle,
+                                  ixgbevf_dev_interrupt_handler,
+                                  (void *)eth_dev);
+       rte_intr_enable(&pci_dev->intr_handle);
+       ixgbevf_intr_enable(hw);
+
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
                     eth_dev->data->port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id, "ixgbe_mac_82599_vf");
@@ -1438,7 +1530,7 @@ static int
 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
 {
        struct ixgbe_hw *hw;
-       unsigned i;
+       struct rte_pci_device *pci_dev = eth_dev->pci_dev;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1457,21 +1549,14 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
        /* Disable the interrupts for VF */
        ixgbevf_intr_disable(hw);
 
-       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-               ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
-               eth_dev->data->rx_queues[i] = NULL;
-       }
-       eth_dev->data->nb_rx_queues = 0;
-
-       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-               ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
-               eth_dev->data->tx_queues[i] = NULL;
-       }
-       eth_dev->data->nb_tx_queues = 0;
-
        rte_free(eth_dev->data->mac_addrs);
        eth_dev->data->mac_addrs = NULL;
 
+       rte_intr_disable(&pci_dev->intr_handle);
+       rte_intr_callback_unregister(&pci_dev->intr_handle,
+                                    ixgbevf_dev_interrupt_handler,
+                                    (void *)eth_dev);
+
        return 0;
 }
 
@@ -1481,6 +1566,8 @@ static struct eth_driver rte_ixgbe_pmd = {
                .id_table = pci_id_ixgbe_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
                        RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .eth_dev_uninit = eth_ixgbe_dev_uninit,
@@ -1495,46 +1582,20 @@ static struct eth_driver rte_ixgbevf_pmd = {
                .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
        .eth_dev_uninit = eth_ixgbevf_dev_uninit,
        .dev_private_size = sizeof(struct ixgbe_adapter),
 };
 
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-
-       rte_eth_driver_register(&rte_ixgbe_pmd);
-       return 0;
-}
-
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
- */
-static int
-rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-
-       rte_eth_driver_register(&rte_ixgbevf_pmd);
-       return 0;
-}
-
 static int
 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_vfta * shadow_vfta =
+       struct ixgbe_vfta *shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
        uint32_t vfta;
        uint32_t vid_idx;
@@ -1572,15 +1633,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int ret = 0;
+       uint32_t reg;
+       uint32_t qinq;
+
+       qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+       qinq &= IXGBE_DMATXCTL_GDV;
 
        switch (vlan_type) {
        case ETH_VLAN_TYPE_INNER:
-               /* Only the high 16-bits is valid */
-               IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+               if (qinq) {
+                       reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+                       reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+                       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+                       reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+                       reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+                               | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+                       IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+               } else {
+                       ret = -ENOTSUP;
+                       PMD_DRV_LOG(ERR, "Inner type is not supported"
+                                   " by single VLAN");
+               }
+               break;
+       case ETH_VLAN_TYPE_OUTER:
+               if (qinq) {
+                       /* Only the high 16-bits is valid */
+                       IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
+                                       IXGBE_EXVET_VET_EXT_SHIFT);
+               } else {
+                       reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+                       reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+                       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+                       reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+                       reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+                               | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+                       IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+               }
+
                break;
        default:
                ret = -EINVAL;
-               PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+               PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
                break;
        }
 
@@ -1608,7 +1701,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_vfta * shadow_vfta =
+       struct ixgbe_vfta *shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
        uint32_t vlnctrl;
        uint16_t i;
@@ -1632,6 +1725,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 {
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+       struct ixgbe_rx_queue *rxq;
 
        if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
                return;
@@ -1640,6 +1734,16 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
                IXGBE_SET_HWSTRIP(hwstrip, queue);
        else
                IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+       if (queue >= dev->data->nb_rx_queues)
+               return;
+
+       rxq = dev->data->rx_queues[queue];
+
+       if (on)
+               rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+       else
+               rxq->vlan_flags = PKT_RX_VLAN_PKT;
 }
 
 static void
@@ -1656,12 +1760,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
                PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
                return;
        }
-       else {
-               /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-               ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-               ctrl &= ~IXGBE_RXDCTL_VME;
-               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
-       }
+
+       /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+       ctrl &= ~IXGBE_RXDCTL_VME;
+       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
        /* record those setting for HW strip per queue */
        ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
 }
@@ -1680,12 +1784,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
                PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
                return;
        }
-       else {
-               /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
-               ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-               ctrl |= IXGBE_RXDCTL_VME;
-               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
-       }
+
+       /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+       ctrl |= IXGBE_RXDCTL_VME;
+       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
        /* record those setting for HW strip per queue */
        ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
 }
@@ -1704,8 +1808,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
                ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
                ctrl &= ~IXGBE_VLNCTRL_VME;
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-       }
-       else {
+       } else {
                /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@@ -1732,8 +1835,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
                ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
                ctrl |= IXGBE_VLNCTRL_VME;
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
-       }
-       else {
+       } else {
                /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@@ -1788,7 +1890,8 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 
        /* Clear pooling mode of PFVTCTL. It's required by X550. */
        if (hw->mac.type == ixgbe_mac_X550 ||
-           hw->mac.type == ixgbe_mac_X550EM_x) {
+           hw->mac.type == ixgbe_mac_X550EM_x ||
+           hw->mac.type == ixgbe_mac_X550EM_a) {
                ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
                ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
                IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
@@ -1832,6 +1935,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
        vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 }
@@ -1861,8 +1965,9 @@ static int
 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t nb_rx_q = dev->data->nb_rx_queues;
-       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
        if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
                /* check multi-queue mode */
@@ -1917,11 +2022,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
                    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
                        PMD_INIT_LOG(ERR, "SRIOV is active,"
-                                       " queue number must less equal to %d.",
+                                       " nb_rx_q=%d nb_tx_q=%d queue number"
+                                       " must be less than or equal to %d.",
+                                       nb_rx_q, nb_tx_q,
                                        RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
                        return -EINVAL;
                }
        } else {
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+                       PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
+                                         " not supported.");
+                       return -EINVAL;
+               }
                /* check configuration for vmdb+dcb mode */
                if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_conf *conf;
@@ -1995,6 +2107,21 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                }
+
+               /*
+                * When DCB/VT is off, maximum number of queues changes,
+                * except for 82598EB, which remains constant.
+                */
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+                               hw->mac.type != ixgbe_mac_82598EB) {
+                       if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR,
+                                            "Neither VT nor DCB are enabled, "
+                                            "nb_tx_q > %d.",
+                                            IXGBE_NONE_MODE_TX_NB_QUEUES);
+                               return -EINVAL;
+                       }
+               }
        }
        return 0;
 }
@@ -2030,6 +2157,25 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+static void
+ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       uint32_t gpie;
+
+       /* only set up it on X550EM_X */
+       if (hw->mac.type == ixgbe_mac_X550EM_x) {
+               gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+               gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
+               IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+               if (hw->phy.type == ixgbe_phy_x550em_ext_t)
+                       intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
+       }
+}
+
 /*
  * Configure device link speed and setup link.
  * It returns 0 on success.
@@ -2048,14 +2194,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        int mask = 0;
        int status;
        uint16_t vf, idx;
+       uint32_t *link_speeds;
 
        PMD_INIT_FUNC_TRACE();
 
-       /* IXGBE devices don't support half duplex */
-       if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
-                       (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
-                            dev->data->dev_conf.link_duplex,
+       /* IXGBE devices don't support:
+       *    - half duplex (checked afterwards for valid speeds)
+       *    - fixed speed: TODO implement
+       */
+       if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
+               PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported",
                             dev->data->port_id);
                return -EINVAL;
        }
@@ -2068,7 +2216,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        ixgbe_stop_adapter(hw);
 
        /* reinitialize adapter
-        * this calls reset and start */
+        * this calls reset and start
+        */
        status = ixgbe_pf_reset_hw(hw);
        if (status != 0)
                return -1;
@@ -2078,11 +2227,18 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);
 
+       ixgbe_dev_phy_intr_setup(dev);
+
        /* check and configure queue intr-vector mapping */
        if ((rte_intr_cap_multiple(intr_handle) ||
             !RTE_ETH_DEV_SRIOV(dev).active) &&
            dev->data->dev_conf.intr_conf.rxq != 0) {
                intr_vector = dev->data->nb_rx_queues;
+               if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
+                       PMD_INIT_LOG(ERR, "At most %d intr queues supported",
+                                       IXGBE_MAX_INTR_QUEUE_NUM);
+                       return -ENOTSUP;
+               }
                if (rte_intr_efd_enable(intr_handle, intr_vector))
                        return -1;
        }
@@ -2145,30 +2301,25 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        if (err)
                goto error;
 
-       switch(dev->data->dev_conf.link_speed) {
-       case ETH_LINK_SPEED_AUTONEG:
+       link_speeds = &dev->data->dev_conf.link_speeds;
+       if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+                       ETH_LINK_SPEED_10G)) {
+               PMD_INIT_LOG(ERR, "Invalid link setting");
+               goto error;
+       }
+
+       speed = 0x0;
+       if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
                speed = (hw->mac.type != ixgbe_mac_82598EB) ?
                                IXGBE_LINK_SPEED_82599_AUTONEG :
                                IXGBE_LINK_SPEED_82598_AUTONEG;
-               break;
-       case ETH_LINK_SPEED_100:
-               /*
-                * Invalid for 82598 but error will be detected by
-                * ixgbe_setup_link()
-                */
-               speed = IXGBE_LINK_SPEED_100_FULL;
-               break;
-       case ETH_LINK_SPEED_1000:
-               speed = IXGBE_LINK_SPEED_1GB_FULL;
-               break;
-       case ETH_LINK_SPEED_10000:
-               speed = IXGBE_LINK_SPEED_10GB_FULL;
-               break;
-       default:
-               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
-                            dev->data->dev_conf.link_speed,
-                            dev->data->port_id);
-               goto error;
+       } else {
+               if (*link_speeds & ETH_LINK_SPEED_10G)
+                       speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (*link_speeds & ETH_LINK_SPEED_1G)
+                       speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               if (*link_speeds & ETH_LINK_SPEED_100M)
+                       speed |= IXGBE_LINK_SPEED_100_FULL;
        }
 
        err = ixgbe_setup_link(hw, speed, link_up);
@@ -2201,7 +2352,7 @@ skip_link_setup:
        /* resume enabled intr since hw reset */
        ixgbe_enable_intr(dev);
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                ETH_VLAN_EXTEND_MASK;
        ixgbe_vlan_offload_set(dev, mask);
 
@@ -2421,8 +2572,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
        hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
 
        for (i = 0; i < 8; i++) {
-               uint32_t mp;
-               mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
                /* global total per queue */
                hw_stats->mpc[i] += mp;
                /* Running comprehensive total for stats display */
@@ -2614,16 +2765,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        /* Rx Errors */
        stats->imissed  = total_missed_rx;
        stats->ierrors  = hw_stats->crcerrs +
-                         hw_stats->mspdc +
-                         hw_stats->rlec +
-                         hw_stats->ruc +
-                         hw_stats->roc +
-                         total_missed_rx +
-                         hw_stats->illerrc +
-                         hw_stats->errbc +
-                         hw_stats->rfc +
-                         hw_stats->fccrc +
-                         hw_stats->fclast;
+                         hw_stats->mspdc +
+                         hw_stats->rlec +
+                         hw_stats->ruc +
+                         hw_stats->roc +
+                         hw_stats->illerrc +
+                         hw_stats->errbc +
+                         hw_stats->rfc +
+                         hw_stats->fccrc +
+                         hw_stats->fclast;
 
        /* Tx Errors */
        stats->oerrors  = 0;
@@ -2645,12 +2795,76 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 /* This function calculates the number of xstats based on the current config */
 static unsigned
 ixgbe_xstats_calc_num(void) {
-       return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
-               (IXGBE_NB_TXQ_PRIO_STATS * 8);
+       return IXGBE_NB_HW_STATS +
+               (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
+               (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
+}
+
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+       const unsigned cnt_stats = ixgbe_xstats_calc_num();
+       unsigned stat, i, count;
+
+       if (xstats_names != NULL) {
+               count = 0;
+
+               /* Note: limit >= cnt_stats checked upstream
+                * in rte_eth_xstats_names()
+                */
+
+               /* Extended stats from ixgbe_hw_stats */
+               for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+                       snprintf(xstats_names[count].name,
+                               sizeof(xstats_names[count].name),
+                               "%s",
+                               rte_ixgbe_stats_strings[i].name);
+                       count++;
+               }
+
+               /* RX Priority Stats */
+               for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+                       for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+                               snprintf(xstats_names[count].name,
+                                       sizeof(xstats_names[count].name),
+                                       "rx_priority%u_%s", i,
+                                       rte_ixgbe_rxq_strings[stat].name);
+                               count++;
+                       }
+               }
+
+               /* TX Priority Stats */
+               for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+                       for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+                               snprintf(xstats_names[count].name,
+                                       sizeof(xstats_names[count].name),
+                                       "tx_priority%u_%s", i,
+                                       rte_ixgbe_txq_strings[stat].name);
+                               count++;
+                       }
+               }
+       }
+       return cnt_stats;
+}
+
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+       struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+       unsigned i;
+
+       if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
+               return -ENOMEM;
+
+       if (xstats_names != NULL)
+               for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
+                       snprintf(xstats_names[i].name,
+                               sizeof(xstats_names[i].name),
+                               "%s", rte_ixgbevf_stats_strings[i].name);
+       return IXGBEVF_NB_XSTATS;
 }
 
 static int
-ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                                         unsigned n)
 {
        struct ixgbe_hw *hw =
@@ -2682,8 +2896,6 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
        /* Extended stats from ixgbe_hw_stats */
        count = 0;
        for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
-               snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
-                        rte_ixgbe_stats_strings[i].name);
                xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                rte_ixgbe_stats_strings[i].offset);
                count++;
@@ -2691,10 +2903,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
 
        /* RX Priority Stats */
        for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
-               for (i = 0; i < 8; i++) {
-                       snprintf(xstats[count].name, sizeof(xstats[count].name),
-                                "rx_priority%u_%s", i,
-                                rte_ixgbe_rxq_strings[stat].name);
+               for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
                        xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                        rte_ixgbe_rxq_strings[stat].offset +
                                        (sizeof(uint64_t) * i));
@@ -2704,17 +2913,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
 
        /* TX Priority Stats */
        for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
-               for (i = 0; i < 8; i++) {
-                       snprintf(xstats[count].name, sizeof(xstats[count].name),
-                                "tx_priority%u_%s", i,
-                                rte_ixgbe_txq_strings[stat].name);
+               for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
                        xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
                                        rte_ixgbe_txq_strings[stat].offset +
                                        (sizeof(uint64_t) * i));
                        count++;
                }
        }
-
        return count;
 }
 
@@ -2737,7 +2942,7 @@ static void
 ixgbevf_update_stats(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
                          IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
 
        /* Good Rx packet, include VF loopback */
@@ -2762,7 +2967,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev)
 }
 
 static int
-ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
                       unsigned n)
 {
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
@@ -2779,8 +2984,6 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
 
        /* Extended stats */
        for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
-               snprintf(xstats[i].name, sizeof(xstats[i].name),
-                        "%s", rte_ixgbevf_stats_strings[i].name);
                xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
                        rte_ixgbevf_stats_strings[i].offset);
        }
@@ -2803,14 +3006,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->ibytes = hw_stats->vfgorc;
        stats->opackets = hw_stats->vfgptc;
        stats->obytes = hw_stats->vfgotc;
-       stats->imcasts = hw_stats->vfmprc;
-       /* stats->imcasts should be removed as imcasts is deprecated */
 }
 
 static void
 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
 {
-       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
                        IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
 
        /* Sync HW register to the last stats */
@@ -2821,17 +3022,25 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
        hw_stats->vfgorc = 0;
        hw_stats->vfgptc = 0;
        hw_stats->vfgotc = 0;
-       hw_stats->vfmprc = 0;
-
 }
 
 static void
 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+       if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+               /*
+                * When DCB/VT is off, maximum number of queues changes,
+                * except for 82598EB, which remains constant.
+                */
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+                               hw->mac.type != ixgbe_mac_82598EB)
+                       dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
+       }
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
@@ -2858,7 +3067,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
        if (hw->mac.type == ixgbe_mac_X550 ||
-           hw->mac.type == ixgbe_mac_X550EM_x)
+           hw->mac.type == ixgbe_mac_X550EM_x ||
+           hw->mac.type == ixgbe_mac_X550EM_a)
                dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
        dev_info->tx_offload_capa =
@@ -2870,7 +3080,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_TCP_TSO;
 
        if (hw->mac.type == ixgbe_mac_X550 ||
-           hw->mac.type == ixgbe_mac_X550EM_x)
+           hw->mac.type == ixgbe_mac_X550EM_x ||
+           hw->mac.type == ixgbe_mac_X550EM_a)
                dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -2901,6 +3112,47 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
        dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
+
+       dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+       if (hw->mac.type == ixgbe_mac_X540 ||
+           hw->mac.type == ixgbe_mac_X540_vf ||
+           hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550_vf) {
+               dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+       }
+}
+
+static const uint32_t *
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* For non-vec functions,
+                * refers to ixgbe_rxd_pkt_info_to_pkt_type();
+                * for vec functions,
+                * refers to _recv_raw_pkts_vec().
+                */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+               return ptypes;
+       return NULL;
 }
 
 static void
@@ -2967,9 +3219,9 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        int link_up;
        int diag;
 
-       link.link_status = 0;
+       link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
-       link.link_duplex = 0;
+       link.link_duplex = ETH_LINK_HALF_DUPLEX;
        memset(&old, 0, sizeof(old));
        rte_ixgbe_dev_atomic_read_link_status(dev, &old);
 
@@ -2982,8 +3234,8 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
 
        if (diag != 0) {
-               link.link_speed = ETH_LINK_SPEED_100;
-               link.link_duplex = ETH_LINK_HALF_DUPLEX;
+               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_duplex = ETH_LINK_FULL_DUPLEX;
                rte_ixgbe_dev_atomic_write_link_status(dev, &link);
                if (link.link_status == old.link_status)
                        return -1;
@@ -2996,26 +3248,26 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                        return -1;
                return 0;
        }
-       link.link_status = 1;
+       link.link_status = ETH_LINK_UP;
        link.link_duplex = ETH_LINK_FULL_DUPLEX;
 
        switch (link_speed) {
        default:
        case IXGBE_LINK_SPEED_UNKNOWN:
-               link.link_duplex = ETH_LINK_HALF_DUPLEX;
-               link.link_speed = ETH_LINK_SPEED_100;
+               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_speed = ETH_SPEED_NUM_100M;
                break;
 
        case IXGBE_LINK_SPEED_100_FULL:
-               link.link_speed = ETH_LINK_SPEED_100;
+               link.link_speed = ETH_SPEED_NUM_100M;
                break;
 
        case IXGBE_LINK_SPEED_1GB_FULL:
-               link.link_speed = ETH_LINK_SPEED_1000;
+               link.link_speed = ETH_SPEED_NUM_1G;
                break;
 
        case IXGBE_LINK_SPEED_10GB_FULL:
-               link.link_speed = ETH_LINK_SPEED_10000;
+               link.link_speed = ETH_SPEED_NUM_10G;
                break;
        }
        rte_ixgbe_dev_atomic_write_link_status(dev, &link);
@@ -3156,6 +3408,11 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        if (eicr & IXGBE_EICR_MAILBOX)
                intr->flags |= IXGBE_FLAG_MAILBOX;
 
+       if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
+           hw->phy.type == ixgbe_phy_x550em_ext_t &&
+           (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
+               intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
+
        return 0;
 }
 
@@ -3211,6 +3468,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        int64_t timeout;
        struct rte_eth_link link;
        int intr_enable_delay = false;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
@@ -3219,6 +3478,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                intr->flags &= ~IXGBE_FLAG_MAILBOX;
        }
 
+       if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+               ixgbe_handle_lasi(hw);
+               intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+       }
+
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                /* get the link status before link update, for predicting later */
                memset(&link, 0, sizeof(link));
@@ -3242,7 +3506,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
 
        if (intr_enable_delay) {
                if (rte_eal_alarm_set(timeout * 1000,
-                                     ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+                                     ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
                        PMD_DRV_LOG(ERR, "Error setting alarm");
        } else {
                PMD_DRV_LOG(DEBUG, "enable intr immediately");
@@ -3282,6 +3546,11 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_pf_mbx_process(dev);
 
+       if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+               ixgbe_handle_lasi(hw);
+               intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+       }
+
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                ixgbe_dev_link_update(dev, 0);
                intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3456,7 +3725,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
  *  Enable flow control according to the current settings.
  */
 static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
 {
        int ret_val = 0;
        uint32_t mflcn_reg, fccfg_reg;
@@ -3503,13 +3772,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                 * and the TX pause can not be disabled
                 */
                nb_rx_en = 0;
-               for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                        reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
                        if (reg & IXGBE_FCRTH_FCEN)
                                nb_rx_en++;
                }
                if (nb_rx_en > 1)
-                       fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+                       fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        case ixgbe_fc_rx_pause:
                /*
@@ -3526,20 +3795,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                 * and the TX pause can not be disabled
                 */
                nb_rx_en = 0;
-               for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
                        reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
                        if (reg & IXGBE_FCRTH_FCEN)
                                nb_rx_en++;
                }
                if (nb_rx_en > 1)
-                       fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+                       fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        case ixgbe_fc_tx_pause:
                /*
                 * Tx Flow control is enabled, and Rx Flow control is
                 * disabled by software override.
                 */
-               fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+               fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        case ixgbe_fc_full:
                /* Flow control (both Rx and Tx) is enabled by SW override. */
@@ -3550,7 +3819,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
-               break;
        }
 
        /* Set 802.3x based flow control settings. */
@@ -3589,13 +3857,13 @@ out:
 }
 
 static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
-               ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+               ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
        }
        return ret_val;
 }
@@ -3609,9 +3877,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        uint8_t tc_num;
        uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
        struct ixgbe_hw *hw =
-                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_dcb_config *dcb_config =
-                IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
 
        enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
                ixgbe_fc_none,
@@ -3644,7 +3912,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
        hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
 
-       err = ixgbe_dcb_pfc_enable(dev,tc_num);
+       err = ixgbe_dcb_pfc_enable(dev, tc_num);
 
        /* Not negotiated is not an error case */
        if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
@@ -3659,11 +3927,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                          struct rte_eth_rss_reta_entry64 *reta_conf,
                          uint16_t reta_size)
 {
-       uint8_t i, j, mask;
+       uint16_t i, sp_reta_size;
+       uint8_t j, mask;
        uint32_t reta, r;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint16_t sp_reta_size;
        uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
@@ -3713,11 +3981,11 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
 {
-       uint8_t i, j, mask;
+       uint16_t i, sp_reta_size;
+       uint8_t j, mask;
        uint32_t reta;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint16_t sp_reta_size;
        uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
@@ -3792,7 +4060,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        /* refuse mtu that requires the support of scattered packets when this
-        * feature has not been enabled before. */
+        * feature has not been enabled before.
+        */
        if (!dev->data->scattered_rx &&
            (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
             dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@@ -3852,7 +4121,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
 static int
 ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
-       struct rte_eth_confconf = &dev->data->dev_conf;
+       struct rte_eth_conf *conf = &dev->data->dev_conf;
        struct ixgbe_adapter *adapter =
                        (struct ixgbe_adapter *)dev->data->dev_private;
 
@@ -3914,10 +4183,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        }
 
        /* Set vfta */
-       ixgbevf_set_vfta_all(dev,1);
+       ixgbevf_set_vfta_all(dev, 1);
 
        /* Set HW strip */
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
                ETH_VLAN_EXTEND_MASK;
        ixgbevf_vlan_offload_set(dev, mask);
 
@@ -3958,6 +4227,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       ixgbevf_intr_disable(hw);
+
        hw->adapter_stopped = 1;
        ixgbe_stop_adapter(hw);
 
@@ -3965,7 +4236,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
          * Clear what we set, but we still keep shadow_vfta to
          * restore after device starts
          */
-       ixgbevf_set_vfta_all(dev,0);
+       ixgbevf_set_vfta_all(dev, 0);
 
        /* Clear stored conf */
        dev->data->scattered_rx = 0;
@@ -3993,25 +4264,30 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 
        ixgbe_dev_free_queues(dev);
 
-       /* reprogram the RAR[0] in case user changed it. */
-       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+       /**
+        * Remove the VF MAC address ro ensure
+        * that the VF traffic goes to the PF
+        * after stop, close and detach of the VF
+        **/
+       ixgbevf_remove_mac_addr(dev, 0);
 }
 
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_vfta * shadow_vfta =
+       struct ixgbe_vfta *shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
        int i = 0, j = 0, vfta = 0, mask = 1;
 
-       for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+       for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
                vfta = shadow_vfta->vfta[i];
                if (vfta) {
                        mask = 1;
-                       for (j = 0; j < 32; j++){
+                       for (j = 0; j < 32; j++) {
                                if (vfta & mask)
-                                       ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
-                               mask<<=1;
+                                       ixgbe_set_vfta(hw, (i<<5)+j, 0,
+                                                      on, false);
+                               mask <<= 1;
                        }
                }
        }
@@ -4023,7 +4299,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_vfta * shadow_vfta =
+       struct ixgbe_vfta *shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
        uint32_t vid_idx = 0;
        uint32_t vid_bit = 0;
@@ -4032,7 +4308,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
        PMD_INIT_FUNC_TRACE();
 
        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
-       ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+       ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
        if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VF vlan");
                return ret;
@@ -4068,7 +4344,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
                ctrl &= ~IXGBE_RXDCTL_VME;
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
 
-       ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+       ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
 }
 
 static void
@@ -4084,7 +4360,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
 
                for (i = 0; i < hw->mac.max_rx_queues; i++)
-                       ixgbevf_vlan_strip_queue_set(dev,i,on);
+                       ixgbevf_vlan_strip_queue_set(dev, i, on);
        }
 }
 
@@ -4104,9 +4380,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
 }
 
 static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addruc_addr)
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
 {
        uint32_t vector = 0;
+
        switch (hw->mac.mc_filter_type) {
        case 0:   /* use bits [47:36] of the address */
                vector = ((uc_addr->addr_bytes[4] >> 4) |
@@ -4134,8 +4411,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
 }
 
 static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
-                              uint8_t on)
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+                       uint8_t on)
 {
        uint32_t vector;
        uint32_t uta_idx;
@@ -4156,7 +4433,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
        if (hw->mac.type < ixgbe_mac_82599EB)
                return -ENOTSUP;
 
-       vector = ixgbe_uta_vector(hw,mac_addr);
+       vector = ixgbe_uta_vector(hw, mac_addr);
        uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
        uta_shift = vector & ixgbe_uta_bit_mask;
 
@@ -4181,7 +4458,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
                                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
        else
-               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
 
        return 0;
 }
@@ -4266,7 +4543,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
 static int
 ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
 {
-       uint32_t reg,addr;
+       uint32_t reg, addr;
        uint32_t val;
        const uint8_t bit1 = 0x1;
 
@@ -4276,16 +4553,26 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return -ENOTSUP;
 
-       addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+       if (pool >= ETH_64_POOLS)
+               return -EINVAL;
+
+       /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+       if (pool >= 32) {
+               addr = IXGBE_VFRE(1);
+               val = bit1 << (pool - 32);
+       } else {
+               addr = IXGBE_VFRE(0);
+               val = bit1 << pool;
+       }
+
        reg = IXGBE_READ_REG(hw, addr);
-       val = bit1 << pool;
 
        if (on)
                reg |= val;
        else
                reg &= ~val;
 
-       IXGBE_WRITE_REG(hw, addr,reg);
+       IXGBE_WRITE_REG(hw, addr, reg);
 
        return 0;
 }
@@ -4293,7 +4580,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
 static int
 ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
 {
-       uint32_t reg,addr;
+       uint32_t reg, addr;
        uint32_t val;
        const uint8_t bit1 = 0x1;
 
@@ -4303,16 +4590,26 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return -ENOTSUP;
 
-       addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+       if (pool >= ETH_64_POOLS)
+               return -EINVAL;
+
+       /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+       if (pool >= 32) {
+               addr = IXGBE_VFTE(1);
+               val = bit1 << (pool - 32);
+       } else {
+               addr = IXGBE_VFTE(0);
+               val = bit1 << pool;
+       }
+
        reg = IXGBE_READ_REG(hw, addr);
-       val = bit1 << pool;
 
        if (on)
                reg |= val;
        else
                reg &= ~val;
 
-       IXGBE_WRITE_REG(hw, addr,reg);
+       IXGBE_WRITE_REG(hw, addr, reg);
 
        return 0;
 }
@@ -4329,10 +4626,12 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return -ENOTSUP;
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
-               if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
-                       ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+               if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
+                       ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
+                                                  vlan_on, false);
                        if (ret < 0)
                                return ret;
+               }
        }
 
        return ret;
@@ -4351,7 +4650,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        struct rte_eth_mirror_conf *mirror_conf,
                        uint8_t rule_id, uint8_t on)
 {
-       uint32_t mr_ctl,vlvf;
+       uint32_t mr_ctl, vlvf;
        uint32_t mp_lsb = 0;
        uint32_t mv_msb = 0;
        uint32_t mv_lsb = 0;
@@ -4364,7 +4663,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        const uint8_t vlan_mask_offset = 32;
        const uint8_t dst_pool_offset = 8;
        const uint8_t rule_mr_offset  = 4;
-       const uint8_t mirror_rule_mask= 0x0F;
+       const uint8_t mirror_rule_mask = 0x0F;
 
        struct ixgbe_mirror_info *mr_info =
                        (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
@@ -4387,11 +4686,12 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
                mirror_type |= IXGBE_MRCTL_VLME;
                /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
-               for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+               for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
                        if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
                                /* search vlan id related pool vlan filter index */
                                reg_index = ixgbe_find_vlvf_slot(hw,
-                                               mirror_conf->vlan.vlan_id[i]);
+                                                mirror_conf->vlan.vlan_id[i],
+                                                false);
                                if (reg_index < 0)
                                        return -EINVAL;
                                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
@@ -4676,6 +4976,9 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
        uint32_t q_idx;
        uint32_t vector_idx = IXGBE_MISC_VEC_ID;
 
+       /* Configure VF other cause ivar */
+       ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd.
         */
@@ -4690,9 +4993,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
                intr_handle->intr_vec[q_idx] = vector_idx;
        }
-
-       /* Configure VF other cause ivar */
-       ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
 }
 
 /**
@@ -4952,7 +5252,8 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 
 #define MAC_TYPE_FILTER_SUP(type)    do {\
        if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
-               (type) != ixgbe_mac_X550)\
+               (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+               (type) != ixgbe_mac_X550EM_a)\
                return -ENOTSUP;\
 } while (0)
 
@@ -5181,7 +5482,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        /* refuse mtu that requires the support of scattered packets when this
-        * feature has not been enabled before. */
+        * feature has not been enabled before.
+        */
        if (!dev->data->scattered_rx &&
            (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
             dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@@ -5713,6 +6015,8 @@ ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
 
        switch (hw->mac.type) {
        case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
                /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
                systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
                systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
@@ -5735,6 +6039,8 @@ ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
 
        switch (hw->mac.type) {
        case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
                /* RXSTMPL stores ns and RXSTMPH stores seconds. */
                rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
                rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
@@ -5758,6 +6064,8 @@ ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
 
        switch (hw->mac.type) {
        case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
                /* TXSTMPL stores ns and TXSTMPH stores seconds. */
                tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
                tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
@@ -5789,15 +6097,15 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
        rte_ixgbe_dev_atomic_read_link_status(dev, &link);
 
        switch (link.link_speed) {
-       case ETH_LINK_SPEED_100:
+       case ETH_SPEED_NUM_100M:
                incval = IXGBE_INCVAL_100;
                shift = IXGBE_INCVAL_SHIFT_100;
                break;
-       case ETH_LINK_SPEED_1000:
+       case ETH_SPEED_NUM_1G:
                incval = IXGBE_INCVAL_1GB;
                shift = IXGBE_INCVAL_SHIFT_1GB;
                break;
-       case ETH_LINK_SPEED_10000:
+       case ETH_SPEED_NUM_10G:
        default:
                incval = IXGBE_INCVAL_10GB;
                shift = IXGBE_INCVAL_SHIFT_10GB;
@@ -5806,6 +6114,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
 
        switch (hw->mac.type) {
        case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
                /* Independent of link speed. */
                incval = 1;
                /* Cycles read will be interpreted as ns. */
@@ -6037,6 +6347,12 @@ ixgbe_get_regs(struct rte_eth_dev *dev,
        const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
                                    ixgbe_regs_mac_82598EB : ixgbe_regs_others;
 
+       if (data == NULL) {
+               regs->length = ixgbe_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
@@ -6061,6 +6377,12 @@ ixgbevf_get_regs(struct rte_eth_dev *dev,
        int count = 0;
        const struct reg_info *reg_group;
 
+       if (data == NULL) {
+               regs->length = ixgbevf_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
@@ -6279,7 +6601,8 @@ ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
        uint32_t etag_etype;
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6323,7 +6646,8 @@ ixgbe_e_tag_enable(struct ixgbe_hw *hw)
        uint32_t etag_etype;
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6363,7 +6687,8 @@ ixgbe_e_tag_disable(struct ixgbe_hw *hw)
        uint32_t etag_etype;
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6406,7 +6731,8 @@ ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
        uint32_t rar_low, rar_high;
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6441,7 +6767,8 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
        uint32_t rar_low, rar_high;
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6560,7 +6887,8 @@ ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6633,7 +6961,8 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
        }
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6703,7 +7032,8 @@ ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6876,7 +7206,8 @@ ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6912,7 +7243,8 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (hw->mac.type != ixgbe_mac_X550 &&
-           hw->mac.type != ixgbe_mac_X550EM_x) {
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
                return -ENOTSUP;
        }
 
@@ -6992,15 +7324,68 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
        ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
 }
 
-static struct rte_driver rte_ixgbe_driver = {
-       .type = PMD_PDEV,
-       .init = rte_ixgbe_pmd_init,
-};
+static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       u32 in_msg = 0;
 
-static struct rte_driver rte_ixgbevf_driver = {
-       .type = PMD_PDEV,
-       .init = rte_ixgbevf_pmd_init,
-};
+       if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+               return;
+
+       /* PF reset VF event */
+       if (in_msg == IXGBE_PF_CONTROL_MSG)
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+}
+
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t eicr;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       ixgbevf_intr_disable(hw);
+
+       /* read-on-clear nic registers here */
+       eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+       intr->flags = 0;
+
+       /* only one misc vector supported - mailbox */
+       eicr &= IXGBE_VTEICR_MASK;
+       if (eicr == IXGBE_MISC_VEC_ID)
+               intr->flags |= IXGBE_FLAG_MAILBOX;
+
+       return 0;
+}
+
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (intr->flags & IXGBE_FLAG_MAILBOX) {
+               ixgbevf_mbx_process(dev);
+               intr->flags &= ~IXGBE_FLAG_MAILBOX;
+       }
+
+       ixgbevf_intr_enable(hw);
+
+       return 0;
+}
+
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+                             void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+       ixgbevf_dev_interrupt_get_status(dev);
+       ixgbevf_dev_interrupt_action(dev);
+}
 
-PMD_REGISTER_DRIVER(rte_ixgbe_driver);
-PMD_REGISTER_DRIVER(rte_ixgbevf_driver);
+DRIVER_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+DRIVER_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
+DRIVER_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+DRIVER_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);