ethdev: use constants for link state
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index 278c925..8bcd0d8 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
 #define IXGBE_HKEY_MAX_INDEX 10
 
 /* Additional timesync values. */
-#define IXGBE_TIMINCA_16NS_SHIFT 24
-#define IXGBE_TIMINCA_INCVALUE   16000000
-#define IXGBE_TIMINCA_INIT       ((0x02 << IXGBE_TIMINCA_16NS_SHIFT) \
-                                 | IXGBE_TIMINCA_INCVALUE)
+#define NSEC_PER_SEC             1000000000L
+#define IXGBE_INCVAL_10GB        0x66666666
+#define IXGBE_INCVAL_1GB         0x40000000
+#define IXGBE_INCVAL_100         0x50000000
+#define IXGBE_INCVAL_SHIFT_10GB  28
+#define IXGBE_INCVAL_SHIFT_1GB   24
+#define IXGBE_INCVAL_SHIFT_100   21
+#define IXGBE_INCVAL_SHIFT_82599 7
+#define IXGBE_INCPER_SHIFT_82599 24
+
+#define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
+
+#define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
+#define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
+#define DEFAULT_ETAG_ETYPE                     0x893f
+#define IXGBE_ETAG_ETYPE                       0x00005084
+#define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
+#define IXGBE_ETAG_ETYPE_VALID                 0x80000000
+#define IXGBE_RAH_ADTYPE                       0x40000000
+#define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
+#define IXGBE_VMVIR_TAGA_MASK                  0x18000000
+#define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
+#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_QDE_STRIP_TAG                    0x00000004
+
+enum ixgbevf_xcast_modes {
+       IXGBEVF_XCAST_MODE_NONE = 0,
+       IXGBEVF_XCAST_MODE_MULTI,
+       IXGBEVF_XCAST_MODE_ALLMULTI,
+};
 
 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
@@ -149,6 +175,8 @@ static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
                                struct rte_eth_xstats *xstats, unsigned n);
+static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
+                                 struct rte_eth_xstats *xstats, unsigned n);
 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
 static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
@@ -157,13 +185,16 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                               struct rte_eth_dev_info *dev_info);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                 struct rte_eth_dev_info *dev_info);
 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
-static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                              enum rte_vlan_type vlan_type,
+                              uint16_t tpid_id);
 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
@@ -206,8 +237,6 @@ static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_conf
 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
 static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
-static int ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
@@ -223,8 +252,6 @@ static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
-                                         void *param);
 static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
                                            uint16_t queue_id);
 static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
@@ -232,6 +259,8 @@ static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
 static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
                                 uint8_t queue, uint8_t msix_vector);
 static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
+static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
 
 /* For Eth VMDQ APIs support */
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
@@ -327,6 +356,27 @@ static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
                                            uint32_t flags);
 static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                                            struct timespec *timestamp);
+static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
+                                  struct timespec *timestamp);
+static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
+                                  const struct timespec *timestamp);
+
+static int ixgbe_dev_l2_tunnel_eth_type_conf
+       (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
+static int ixgbe_dev_l2_tunnel_offload_set
+       (struct rte_eth_dev *dev,
+        struct rte_eth_l2_tunnel_conf *l2_tunnel,
+        uint32_t mask,
+        uint8_t en);
+static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+                                            enum rte_filter_op filter_op,
+                                            void *arg);
+
+static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                                        struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                                        struct rte_eth_udp_tunnel *udp_tunnel);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -351,19 +401,19 @@ static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] |= 1 << bit;\
-       }while(0)
+       } while (0)
 
 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] &= ~(1 << bit);\
-       }while(0)
+       } while (0)
 
 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (r) = (h)->bitmap[idx] >> bit & 1;\
-       }while(0)
+       } while (0)
 
 /*
  * The set of PCI devices this driver supports
@@ -418,6 +468,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .xstats_reset         = ixgbe_dev_xstats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
@@ -482,6 +533,13 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .get_eeprom           = ixgbe_get_eeprom,
        .set_eeprom           = ixgbe_set_eeprom,
        .get_dcb_info         = ixgbe_dev_get_dcb_info,
+       .timesync_adjust_time = ixgbe_timesync_adjust_time,
+       .timesync_read_time   = ixgbe_timesync_read_time,
+       .timesync_write_time  = ixgbe_timesync_write_time,
+       .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
+       .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
+       .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
+       .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
 };
 
 /*
@@ -494,9 +552,14 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .dev_stop             = ixgbevf_dev_stop,
        .link_update          = ixgbe_dev_link_update,
        .stats_get            = ixgbevf_dev_stats_get,
+       .xstats_get           = ixgbevf_dev_xstats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
+       .xstats_reset         = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
+       .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
+       .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
        .dev_infos_get        = ixgbevf_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
@@ -613,11 +676,32 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
                           sizeof(rte_ixgbe_stats_strings[0]))
 
 /* Per-queue statistics */
-#define IXBGE_NB_8_PER_Q_STATS (8 * 7)
-#define IXBGE_NB_16_PER_Q_STATS (16 * 5)
-#define IXGBE_NB_Q_STATS (IXBGE_NB_8_PER_Q_STATS + IXBGE_NB_16_PER_Q_STATS)
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
+       {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
+       {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
+       {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
+       {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
+};
+
+#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
+                          sizeof(rte_ixgbe_rxq_strings[0]))
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
+       {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
+       {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
+       {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
+               pxon2offc)},
+};
+
+#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
+                          sizeof(rte_ixgbe_txq_strings[0]))
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
+       {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
+};
 
-#define IXGBE_NB_XSTATS (IXGBE_NB_HW_STATS + IXGBE_NB_Q_STATS)
+#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
+               sizeof(rte_ixgbevf_stats_strings[0]))
 
 /**
  * Atomically reads the link status information from global
@@ -748,7 +832,7 @@ ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
 {
        uint32_t i;
 
-       for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+       for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
        }
@@ -776,7 +860,8 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        if ((hw->mac.type != ixgbe_mac_82599EB) &&
                (hw->mac.type != ixgbe_mac_X540) &&
                (hw->mac.type != ixgbe_mac_X550) &&
-               (hw->mac.type != ixgbe_mac_X550EM_x))
+               (hw->mac.type != ixgbe_mac_X550EM_x) &&
+               (hw->mac.type != ixgbe_mac_X550EM_a))
                return -ENOSYS;
 
        PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
@@ -881,7 +966,8 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
        /*we only support 4 Tcs for X540, X550 */
        if (hw->mac.type == ixgbe_mac_X540 ||
                hw->mac.type == ixgbe_mac_X550 ||
-               hw->mac.type == ixgbe_mac_X550EM_x) {
+               hw->mac.type == ixgbe_mac_X550EM_x ||
+               hw->mac.type == ixgbe_mac_X550EM_a) {
                dcb_config->num_tcs.pg_tcs = 4;
                dcb_config->num_tcs.pfc_tcs = 4;
        }
@@ -972,6 +1058,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        }
        pci_dev = eth_dev->pci_dev;
 
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -1113,6 +1201,13 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                        eth_dev->data->port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);
 
+       rte_intr_callback_register(&pci_dev->intr_handle,
+                                  ixgbe_dev_interrupt_handler,
+                                  (void *)eth_dev);
+
+       /* enable uio/vfio intr/eventfd mapping */
+       rte_intr_enable(&pci_dev->intr_handle);
+
        /* enable support intr */
        ixgbe_enable_intr(eth_dev);
 
@@ -1179,6 +1274,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw)
 
        /* start with highest supported, proceed down */
        static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+               ixgbe_mbox_api_12,
                ixgbe_mbox_api_11,
                ixgbe_mbox_api_10,
        };
@@ -1233,13 +1329,28 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+               struct ixgbe_tx_queue *txq;
+               /* TX queue function in primary, set by last queue initialized
+                * Tx queue may not initialized by primary process
+                */
+               if (eth_dev->data->tx_queues) {
+                       txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+                       ixgbe_set_tx_function(eth_dev, txq);
+               } else {
+                       /* Use default TX function if we get here */
+                       PMD_INIT_LOG(NOTICE,
+                               "No TX queues configured yet. Using default TX function.");
+               }
+
+               ixgbe_set_rx_function(eth_dev);
+
                return 0;
        }
 
        pci_dev = eth_dev->pci_dev;
 
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -1276,7 +1387,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
         */
        if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
                PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-               return (diag);
+               return diag;
        }
 
        /* negotiate mailbox API version to use with the PF. */
@@ -1327,7 +1438,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 
                default:
                        PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       return (-EIO);
+                       return -EIO;
        }
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@@ -1343,7 +1454,6 @@ static int
 eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
 {
        struct ixgbe_hw *hw;
-       unsigned i;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1362,18 +1472,6 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
        /* Disable the interrupts for VF */
        ixgbevf_intr_disable(hw);
 
-       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
-               ixgbe_dev_rx_queue_release(eth_dev->data->rx_queues[i]);
-               eth_dev->data->rx_queues[i] = NULL;
-       }
-       eth_dev->data->nb_rx_queues = 0;
-
-       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
-               ixgbe_dev_tx_queue_release(eth_dev->data->tx_queues[i]);
-               eth_dev->data->tx_queues[i] = NULL;
-       }
-       eth_dev->data->nb_tx_queues = 0;
-
        rte_free(eth_dev->data->mac_addrs);
        eth_dev->data->mac_addrs = NULL;
 
@@ -1431,7 +1529,7 @@ rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unus
        PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
-       return (0);
+       return 0;
 }
 
 static int
@@ -1469,14 +1567,27 @@ ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
                ixgbe_vlan_hw_strip_disable(dev, queue);
 }
 
-static void
-ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+static int
+ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                   enum rte_vlan_type vlan_type,
+                   uint16_t tpid)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret = 0;
+
+       switch (vlan_type) {
+       case ETH_VLAN_TYPE_INNER:
+               /* Only the high 16-bits is valid */
+               IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+               break;
+       default:
+               ret = -EINVAL;
+               PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+               break;
+       }
 
-       /* Only the high 16-bits is valid */
-       IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+       return ret;
 }
 
 void
@@ -1525,7 +1636,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
 
-       if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
+       if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
                return;
 
        if (on)
@@ -1678,6 +1789,15 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
        ctrl |= IXGBE_EXTENDED_VLAN;
        IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
 
+       /* Clear pooling mode of PFVTCTL. It's required by X550. */
+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x ||
+           hw->mac.type == ixgbe_mac_X550EM_a) {
+               ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+               ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+       }
+
        /*
         * VET EXT field in the EXVET register = 0x8100 by default
         * So no need to change. Same to VT field of DMATXCTL register
@@ -1687,21 +1807,21 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 static void
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
-       if(mask & ETH_VLAN_STRIP_MASK){
+       if (mask & ETH_VLAN_STRIP_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_strip)
                        ixgbe_vlan_hw_strip_enable_all(dev);
                else
                        ixgbe_vlan_hw_strip_disable_all(dev);
        }
 
-       if(mask & ETH_VLAN_FILTER_MASK){
+       if (mask & ETH_VLAN_FILTER_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_filter)
                        ixgbe_vlan_hw_filter_enable(dev);
                else
                        ixgbe_vlan_hw_filter_disable(dev);
        }
 
-       if(mask & ETH_VLAN_EXTEND_MASK){
+       if (mask & ETH_VLAN_EXTEND_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_extend)
                        ixgbe_vlan_hw_extend_enable(dev);
                else
@@ -1716,7 +1836,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 }
 
@@ -1745,8 +1865,9 @@ static int
 ixgbe_check_mq_mode(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint16_t nb_rx_q = dev->data->nb_rx_queues;
-       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
        if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
                /* check multi-queue mode */
@@ -1801,11 +1922,18 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
                    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
                        PMD_INIT_LOG(ERR, "SRIOV is active,"
-                                       " queue number must less equal to %d.",
+                                       " nb_rx_q=%d nb_tx_q=%d queue number"
+                                       " must be less than or equal to %d.",
+                                       nb_rx_q, nb_tx_q,
                                        RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
                        return -EINVAL;
                }
        } else {
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+                       PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
+                                         " not supported.");
+                       return -EINVAL;
+               }
                /* check configuration for vmdb+dcb mode */
                if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_conf *conf;
@@ -1879,6 +2007,21 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                }
+
+               /*
+                * When DCB/VT is off, maximum number of queues changes,
+                * except for 82598EB, which remains constant.
+                */
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+                               hw->mac.type != ixgbe_mac_82598EB) {
+                       if (nb_tx_q > IXGBE_NONE_MODE_TX_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR,
+                                            "Neither VT nor DCB are enabled, "
+                                            "nb_tx_q > %d.",
+                                            IXGBE_NONE_MODE_TX_NB_QUEUES);
+                               return -EINVAL;
+                       }
+               }
        }
        return 0;
 }
@@ -1914,6 +2057,25 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+static void
+ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       uint32_t gpie;
+
+       /* only set up it on X550EM_X */
+       if (hw->mac.type == ixgbe_mac_X550EM_x) {
+               gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+               gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
+               IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+               if (hw->phy.type == ixgbe_phy_x550em_ext_t)
+                       intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
+       }
+}
+
 /*
  * Configure device link speed and setup link.
  * It returns 0 on success.
@@ -1944,6 +2106,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       /* disable uio/vfio intr/eventfd mapping */
+       rte_intr_disable(intr_handle);
+
        /* stop adapter */
        hw->adapter_stopped = 0;
        ixgbe_stop_adapter(hw);
@@ -1959,18 +2124,26 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);
 
+       ixgbe_dev_phy_intr_setup(dev);
+
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0)
+       if ((rte_intr_cap_multiple(intr_handle) ||
+            !RTE_ETH_DEV_SRIOV(dev).active) &&
+           dev->data->dev_conf.intr_conf.rxq != 0) {
                intr_vector = dev->data->nb_rx_queues;
-
-       if (rte_intr_efd_enable(intr_handle, intr_vector))
-               return -1;
+               if (intr_vector > IXGBE_MAX_INTR_QUEUE_NUM) {
+                       PMD_INIT_LOG(ERR, "At most %d intr queues supported",
+                                       IXGBE_MAX_INTR_QUEUE_NUM);
+                       return -ENOTSUP;
+               }
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -1;
+       }
 
        if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
                intr_handle->intr_vec =
                        rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int),
-                                   0);
+                                   dev->data->nb_rx_queues * sizeof(int), 0);
                if (intr_handle->intr_vec == NULL) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
                                     " intr_vec\n", dev->data->nb_rx_queues);
@@ -2057,20 +2230,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 
 skip_link_setup:
 
-       /* check if lsc interrupt is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0) {
-               if (rte_intr_allow_others(intr_handle)) {
-                       rte_intr_callback_register(intr_handle,
-                                                  ixgbe_dev_interrupt_handler,
-                                                  (void *)dev);
+       if (rte_intr_allow_others(intr_handle)) {
+               /* check if lsc interrupt is enabled */
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
                        ixgbe_dev_lsc_interrupt_setup(dev);
-               } else
+       } else {
+               rte_intr_callback_unregister(intr_handle,
+                                            ixgbe_dev_interrupt_handler,
+                                            (void *)dev);
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
                        PMD_INIT_LOG(INFO, "lsc won't enable because of"
                                     " no intr multiplex\n");
        }
 
        /* check if rxq interrupt is enabled */
-       if (dev->data->dev_conf.intr_conf.rxq != 0)
+       if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+           rte_intr_dp_is_en(intr_handle))
                ixgbe_dev_rxq_interrupt_setup(dev);
 
        /* enable uio/vfio intr/eventfd mapping */
@@ -2109,7 +2284,7 @@ skip_link_setup:
 
        ixgbe_restore_statistics_mapping(dev);
 
-       return (0);
+       return 0;
 
 error:
        PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
@@ -2139,9 +2314,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        /* disable interrupts */
        ixgbe_disable_intr(hw);
 
-       /* disable intr eventfd mapping */
-       rte_intr_disable(intr_handle);
-
        /* reset the NIC */
        ixgbe_pf_reset_hw(hw);
        hw->adapter_stopped = 0;
@@ -2182,6 +2354,12 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        memset(filter_info->fivetuple_mask, 0,
                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
 
+       if (!rte_intr_allow_others(intr_handle))
+               /* resume to the default handler */
+               rte_intr_callback_register(intr_handle,
+                                          ixgbe_dev_interrupt_handler,
+                                          (void *)dev);
+
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
        if (intr_handle->intr_vec != NULL) {
@@ -2275,13 +2453,20 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
 }
 
 static void
-ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
-                                                  *hw_stats, uint64_t *total_missed_rx,
-                                                  uint64_t *total_qbrc, uint64_t *total_qprc,
-                                                  uint64_t *total_qprdc)
+ixgbe_read_stats_registers(struct ixgbe_hw *hw,
+                          struct ixgbe_hw_stats *hw_stats,
+                          uint64_t *total_missed_rx, uint64_t *total_qbrc,
+                          uint64_t *total_qprc, uint64_t *total_qprdc)
 {
        uint32_t bprc, lxon, lxoff, total;
+       uint32_t delta_gprc = 0;
        unsigned i;
+       /* Workaround for RX byte count not including CRC bytes when CRC
++       * strip is enabled. CRC bytes are removed from counters when crc_strip
+        * is disabled.
++       */
+       int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
+                       IXGBE_HLREG0_RXCRCSTRP);
 
        hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
@@ -2316,16 +2501,27 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
                    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
        }
        for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
-               hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
-               hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+               delta_gprc += delta_qprc;
+
+               hw_stats->qprc[i] += delta_qprc;
+               hw_stats->qptc[i] += delta_qptc;
+
                hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
                hw_stats->qbrc[i] +=
                    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+               if (crc_strip == 0)
+                       hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
+
                hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
                hw_stats->qbtc[i] +=
                    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
-               *total_qprdc += hw_stats->qprdc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+               hw_stats->qprdc[i] += delta_qprdc;
+               *total_qprdc += hw_stats->qprdc[i];
 
                *total_qprc += hw_stats->qprc[i];
                *total_qbrc += hw_stats->qbrc[i];
@@ -2334,8 +2530,11 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
        hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
        hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
 
-       /* Note that gprc counts missed packets */
-       hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       /*
+        * An errata states that gprc actually counts good + missed packets:
+        * Workaround to set gprc to summated queue packet receives
+        */
+       hw_stats->gprc = *total_qprc;
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
                hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
@@ -2354,6 +2553,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
                hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
                hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        }
+       uint64_t old_tpr = hw_stats->tpr;
+
+       hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+       hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+
+       if (crc_strip == 0)
+               hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
+
+       uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
+       hw_stats->gptc += delta_gptc;
+       hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
+       hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
 
        /*
         * Workaround: mprc hardware is incorrectly counting
@@ -2378,7 +2589,6 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
        hw_stats->lxofftxc += lxoff;
        total = lxon + lxoff;
 
-       hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
        hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
        hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
        hw_stats->gptc -= total;
@@ -2393,8 +2603,6 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats
        hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
        hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
        hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
-       hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
-       hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
        hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
        hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
        hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
@@ -2457,17 +2665,14 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
+       stats->imissed  = total_missed_rx;
        stats->ierrors  = hw_stats->crcerrs +
                          hw_stats->mspdc +
                          hw_stats->rlec +
                          hw_stats->ruc +
                          hw_stats->roc +
-                         total_missed_rx +
                          hw_stats->illerrc +
                          hw_stats->errbc +
-                         hw_stats->xec +
-                         hw_stats->mlfc +
-                         hw_stats->mrfc +
                          hw_stats->rfc +
                          hw_stats->fccrc +
                          hw_stats->fclast;
@@ -2489,6 +2694,13 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
        memset(stats, 0, sizeof(*stats));
 }
 
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+ixgbe_xstats_calc_num(void) {
+       return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
+               (IXGBE_NB_TXQ_PRIO_STATS * 8);
+}
+
 static int
 ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
                                         unsigned n)
@@ -2498,7 +2710,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
        struct ixgbe_hw_stats *hw_stats =
                        IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
        uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
-       unsigned i, count = IXGBE_NB_XSTATS;
+       unsigned i, stat, count = 0;
+
+       count = ixgbe_xstats_calc_num();
 
        if (n < count)
                return count;
@@ -2527,81 +2741,30 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
                count++;
        }
 
-       /* Per-Q stats, with 8 queues available */
-       for (i = 0; i < 8; i++) {
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_mbuf_allocation_errors", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, rnbc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_missed_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, mpc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_xon_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, pxonrxc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "tx_q%u_xon_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, pxontxc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_xoff_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, pxoffrxc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "tx_q%u_xoff_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, pxofftxc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "xx_q%u_xon_to_xoff_priority_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, pxon2offc[i]));
-               count++;
+       /* RX Priority Stats */
+       for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+               for (i = 0; i < 8; i++) {
+                       snprintf(xstats[count].name, sizeof(xstats[count].name),
+                                "rx_priority%u_%s", i,
+                                rte_ixgbe_rxq_strings[stat].name);
+                       xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+                                       rte_ixgbe_rxq_strings[stat].offset +
+                                       (sizeof(uint64_t) * i));
+                       count++;
+               }
        }
 
-       for (i = 0; i < 16; i++) {
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, qprc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_bytes", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, qbrc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "tx_q%u_packets", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, qptc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "tx_q%u_bytes", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, qbtc[i]));
-               count++;
-
-               snprintf(xstats[count].name, sizeof(xstats[count].name),
-                        "rx_q%u_dropped", i);
-               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
-                               offsetof(struct ixgbe_hw_stats, qprdc[i]));
-               count++;
+       /* TX Priority Stats */
+       for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+               for (i = 0; i < 8; i++) {
+                       snprintf(xstats[count].name, sizeof(xstats[count].name),
+                                "tx_priority%u_%s", i,
+                                rte_ixgbe_txq_strings[stat].name);
+                       xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+                                       rte_ixgbe_txq_strings[stat].offset +
+                                       (sizeof(uint64_t) * i));
+                       count++;
+               }
        }
 
        return count;
@@ -2613,15 +2776,17 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
        struct ixgbe_hw_stats *stats =
                        IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
 
+       unsigned count = ixgbe_xstats_calc_num();
+
        /* HW registers are cleared on read */
-       ixgbe_dev_xstats_get(dev, NULL, IXGBE_NB_XSTATS);
+       ixgbe_dev_xstats_get(dev, NULL, count);
 
        /* Reset software totals */
        memset(stats, 0, sizeof(*stats));
 }
 
 static void
-ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ixgbevf_update_stats(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
@@ -2646,6 +2811,42 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        /* Rx Multicst Packet */
        UPDATE_VF_STAT(IXGBE_VFMPRC,
            hw_stats->last_vfmprc, hw_stats->vfmprc);
+}
+
+static int
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+                      unsigned n)
+{
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       unsigned i;
+
+       if (n < IXGBEVF_NB_XSTATS)
+               return IXGBEVF_NB_XSTATS;
+
+       ixgbevf_update_stats(dev);
+
+       if (!xstats)
+               return 0;
+
+       /* Extended stats */
+       for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+               snprintf(xstats[i].name, sizeof(xstats[i].name),
+                        "%s", rte_ixgbevf_stats_strings[i].name);
+               xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+                       rte_ixgbevf_stats_strings[i].offset);
+       }
+
+       return IXGBEVF_NB_XSTATS;
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       ixgbevf_update_stats(dev);
 
        if (stats == NULL)
                return;
@@ -2680,9 +2881,19 @@ static void
 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
 
        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+       if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+               /*
+                * When DCB/VT is off, maximum number of queues changes,
+                * except for 82598EB, which remains constant.
+                */
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_NONE &&
+                               hw->mac.type != ixgbe_mac_82598EB)
+                       dev_info->max_tx_queues = IXGBE_NONE_MODE_TX_NB_QUEUES;
+       }
        dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
        dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
@@ -2708,6 +2919,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
            !RTE_ETH_DEV_SRIOV(dev).active)
                dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x ||
+           hw->mac.type == ixgbe_mac_X550EM_a)
+               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_VLAN_INSERT |
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -2716,6 +2932,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_SCTP_CKSUM  |
                DEV_TX_OFFLOAD_TCP_TSO;
 
+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x ||
+           hw->mac.type == ixgbe_mac_X550EM_a)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
@@ -2746,6 +2967,39 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 }
 
+static const uint32_t *
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* For non-vec functions,
+                * refers to ixgbe_rxd_pkt_info_to_pkt_type();
+                * for vec functions,
+                * refers to _recv_raw_pkts_vec().
+                */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+               return ptypes;
+       return NULL;
+}
+
 static void
 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                     struct rte_eth_dev_info *dev_info)
@@ -2810,7 +3064,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        int link_up;
        int diag;
 
-       link.link_status = 0;
+       link.link_status = ETH_LINK_DOWN;
        link.link_speed = 0;
        link.link_duplex = 0;
        memset(&old, 0, sizeof(old));
@@ -2839,7 +3093,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                        return -1;
                return 0;
        }
-       link.link_status = 1;
+       link.link_status = ETH_LINK_UP;
        link.link_duplex = ETH_LINK_FULL_DUPLEX;
 
        switch (link_speed) {
@@ -2999,29 +3253,10 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        if (eicr & IXGBE_EICR_MAILBOX)
                intr->flags |= IXGBE_FLAG_MAILBOX;
 
-       return 0;
-}
-
-static int
-ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
-{
-       uint32_t eicr;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
-       /* clear all cause mask */
-       ixgbevf_intr_disable(hw);
-
-       /* read-on-clear nic registers here */
-       eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
-       PMD_DRV_LOG(INFO, "eicr %x", eicr);
-
-       intr->flags = 0;
-
-       /* set flag for async link update */
-       if (eicr & IXGBE_EICR_LSC)
-               intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+       if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
+           hw->phy.type == ixgbe_phy_x550em_ext_t &&
+           (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
+               intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
 
        return 0;
 }
@@ -3078,6 +3313,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        int64_t timeout;
        struct rte_eth_link link;
        int intr_enable_delay = false;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
@@ -3086,6 +3323,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                intr->flags &= ~IXGBE_FLAG_MAILBOX;
        }
 
+       if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+               ixgbe_handle_lasi(hw);
+               intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+       }
+
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                /* get the link status before link update, for predicting later */
                memset(&link, 0, sizeof(link));
@@ -3121,18 +3363,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        return 0;
 }
 
-static int
-ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
-{
-       struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       PMD_DRV_LOG(DEBUG, "enable intr immediately");
-       ixgbevf_intr_enable(hw);
-       rte_intr_enable(&dev->pci_dev->intr_handle);
-       return 0;
-}
-
 /**
  * Interrupt handler which shall be registered for alarm callback for delayed
  * handling specific interrupt to wait for the stable nic state. As the
@@ -3161,6 +3391,11 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_pf_mbx_process(dev);
 
+       if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+               ixgbe_handle_lasi(hw);
+               intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+       }
+
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                ixgbe_dev_link_update(dev, 0);
                intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3195,23 +3430,13 @@ ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
        ixgbe_dev_interrupt_action(dev);
 }
 
-static void
-ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
-                             void *param)
-{
-       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-
-       ixgbevf_dev_interrupt_get_status(dev);
-       ixgbevf_dev_interrupt_action(dev);
-}
-
 static int
 ixgbe_dev_led_on(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+       return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
@@ -3220,7 +3445,7 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+       return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
@@ -3302,7 +3527,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                (fc_conf->high_water < fc_conf->low_water)) {
                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
@@ -3363,13 +3588,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        /* Low water mark of zero causes XOFF floods */
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
-               if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
+               if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
                        PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
 
-               if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+               if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
                        PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
@@ -3483,7 +3708,7 @@ ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
 
-       if(hw->mac.type != ixgbe_mac_82598EB) {
+       if (hw->mac.type != ixgbe_mac_82598EB) {
                ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
        }
        return ret_val;
@@ -3524,7 +3749,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
            (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
@@ -3548,11 +3773,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                          struct rte_eth_rss_reta_entry64 *reta_conf,
                          uint16_t reta_size)
 {
-       uint8_t i, j, mask;
+       uint16_t i, sp_reta_size;
+       uint8_t j, mask;
        uint32_t reta, r;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint16_t sp_reta_size;
        uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
@@ -3602,11 +3827,11 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
 {
-       uint8_t i, j, mask;
+       uint16_t i, sp_reta_size;
+       uint8_t j, mask;
        uint32_t reta;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint16_t sp_reta_size;
        uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
@@ -3813,11 +4038,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        ixgbevf_dev_rxtx_start(dev);
 
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0)
+       if (dev->data->dev_conf.intr_conf.rxq != 0) {
                intr_vector = dev->data->nb_rx_queues;
-
-       if (rte_intr_efd_enable(intr_handle, intr_vector))
-               return -1;
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -1;
+       }
 
        if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
                intr_handle->intr_vec =
@@ -3831,16 +4056,6 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        }
        ixgbevf_configure_msix(dev);
 
-       if (dev->data->dev_conf.intr_conf.lsc != 0) {
-               if (rte_intr_allow_others(intr_handle))
-                       rte_intr_callback_register(intr_handle,
-                                       ixgbevf_dev_interrupt_handler,
-                                       (void *)dev);
-               else
-                       PMD_INIT_LOG(INFO, "lsc won't enable because of"
-                                    " no intr multiplex\n");
-       }
-
        rte_intr_enable(intr_handle);
 
        /* Re-enable interrupt for VF */
@@ -3871,9 +4086,6 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 
        ixgbe_dev_clear_queues(dev);
 
-       /* disable intr eventfd mapping */
-       rte_intr_disable(intr_handle);
-
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
        if (intr_handle->intr_vec != NULL) {
@@ -3886,7 +4098,6 @@ static void
 ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_pci_device *pci_dev;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -3896,14 +4107,12 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 
        ixgbe_dev_free_queues(dev);
 
-       /* reprogram the RAR[0] in case user changed it. */
-       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
-       pci_dev = dev->pci_dev;
-       if (pci_dev->intr_handle.intr_vec) {
-               rte_free(pci_dev->intr_handle.intr_vec);
-               pci_dev->intr_handle.intr_vec = NULL;
-       }
+       /**
+        * Remove the VF MAC address ro ensure
+        * that the VF traffic goes to the PF
+        * after stop, close and detach of the VF
+        **/
+       ixgbevf_remove_mac_addr(dev, 0);
 }
 
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
@@ -3915,10 +4124,10 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 
        for (i = 0; i < IXGBE_VFTA_SIZE; i++){
                vfta = shadow_vfta->vfta[i];
-               if(vfta){
+               if (vfta) {
                        mask = 1;
                        for (j = 0; j < 32; j++){
-                               if(vfta & mask)
+                               if (vfta & mask)
                                        ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
                                mask<<=1;
                        }
@@ -3942,7 +4151,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 
        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
        ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
-       if(ret){
+       if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VF vlan");
                return ret;
        }
@@ -3967,11 +4176,11 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 
        PMD_INIT_FUNC_TRACE();
 
-       if(queue >= hw->mac.max_rx_queues)
+       if (queue >= hw->mac.max_rx_queues)
                return;
 
        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-       if(on)
+       if (on)
                ctrl |= IXGBE_RXDCTL_VME;
        else
                ctrl &= ~IXGBE_RXDCTL_VME;
@@ -3989,10 +4198,10 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        int on = 0;
 
        /* VF function only support hw strip feature, others are not support */
-       if(mask & ETH_VLAN_STRIP_MASK){
+       if (mask & ETH_VLAN_STRIP_MASK) {
                on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
 
-               for(i=0; i < hw->mac.max_rx_queues; i++)
+               for (i = 0; i < hw->mac.max_rx_queues; i++)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
        }
 }
@@ -4006,7 +4215,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
                PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
-               return (-1);
+               return -1;
        }
 
        return 0;
@@ -4063,14 +4272,14 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
 
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        vector = ixgbe_uta_vector(hw,mac_addr);
        uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
        uta_shift = vector & ixgbe_uta_bit_mask;
 
        rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
-       if(rc == on)
+       if (rc == on)
                return 0;
 
        reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
@@ -4106,9 +4315,9 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
-       if(on) {
+       if (on) {
                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
                        IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
@@ -4155,10 +4364,10 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
                             " on 82599 hardware and newer");
-               return (-ENOTSUP);
+               return -ENOTSUP;
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
 
@@ -4183,7 +4392,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
@@ -4210,7 +4419,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
@@ -4236,12 +4445,13 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
-               if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
+               if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
                        ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
                        if (ret < 0)
                                return ret;
+               }
        }
 
        return ret;
@@ -4301,7 +4511,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                                /* search vlan id related pool vlan filter index */
                                reg_index = ixgbe_find_vlvf_slot(hw,
                                                mirror_conf->vlan.vlan_id[i]);
-                               if(reg_index < 0)
+                               if (reg_index < 0)
                                        return -EINVAL;
                                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
                                if ((vlvf & IXGBE_VLVF_VIEN) &&
@@ -4319,8 +4529,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 
                        mr_info->mr_conf[rule_id].vlan.vlan_mask =
                                                mirror_conf->vlan.vlan_mask;
-                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
-                               if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+                               if (mirror_conf->vlan.vlan_mask & (1ULL << i))
                                        mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
                                                mirror_conf->vlan.vlan_id[i];
                        }
@@ -4328,7 +4538,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        mv_lsb = 0;
                        mv_msb = 0;
                        mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
                                mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
                }
        }
@@ -4402,7 +4612,7 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
                (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        memset(&mr_info->mr_conf[rule_id], 0,
                sizeof(struct rte_eth_mirror_conf));
@@ -4429,7 +4639,8 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
-       mask |= (1 << queue_id);
+       mask |= (1 << IXGBE_MISC_VEC_ID);
+       RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
        rte_intr_enable(&dev->pci_dev->intr_handle);
@@ -4445,7 +4656,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
-       mask &= ~(1 << queue_id);
+       mask &= ~(1 << IXGBE_MISC_VEC_ID);
+       RTE_SET_USED(queue_id);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
        return 0;
@@ -4581,7 +4793,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t q_idx;
-       uint32_t vector_idx = 0;
+       uint32_t vector_idx = IXGBE_MISC_VEC_ID;
 
        /* won't configure msix register if no mapping is done
         * between intr vector and event fd.
@@ -4598,7 +4810,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
                intr_handle->intr_vec[q_idx] = vector_idx;
        }
 
-       /* Configure VF Rx queue ivar */
+       /* Configure VF other cause ivar */
        ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
 }
 
@@ -4613,7 +4825,8 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
        struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t queue_id, vec = 0;
+       uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
+       uint32_t vec = IXGBE_MISC_VEC_ID;
        uint32_t mask;
        uint32_t gpie;
 
@@ -4623,6 +4836,9 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
        if (!rte_intr_dp_is_en(intr_handle))
                return;
 
+       if (rte_intr_allow_others(intr_handle))
+               vec = base = IXGBE_RX_VEC_START;
+
        /* setup GPIE for MSI-x mode */
        gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
        gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
@@ -4646,23 +4862,23 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
                /* by default, 1:1 mapping */
                ixgbe_set_ivar_map(hw, 0, queue_id, vec);
                intr_handle->intr_vec[queue_id] = vec;
-               if (vec < intr_handle->nb_efd - 1)
+               if (vec < base + intr_handle->nb_efd - 1)
                        vec++;
        }
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
                ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
-                                  intr_handle->max_intr - 1);
+                                  IXGBE_MISC_VEC_ID);
                break;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ixgbe_set_ivar_map(hw, -1, 1, intr_handle->max_intr - 1);
+               ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
                break;
        default:
                break;
        }
-       IXGBE_WRITE_REG(hw, IXGBE_EITR(queue_id),
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
                        IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
 
        /* set up to autoclear timer, and the vectors */
@@ -4855,7 +5071,8 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 
 #define MAC_TYPE_FILTER_SUP(type)    do {\
        if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
-               (type) != ixgbe_mac_X550)\
+               (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+               (type) != ixgbe_mac_X550EM_a)\
                return -ENOTSUP;\
 } while (0)
 
@@ -5570,6 +5787,9 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
        case RTE_ETH_FILTER_FDIR:
                ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
                break;
+       case RTE_ETH_FILTER_L2_TUNNEL:
+               ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
+               break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
                                                        filter_type);
@@ -5605,20 +5825,213 @@ ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
                                         ixgbe_dev_addr_list_itr, TRUE);
 }
 
-static int
-ixgbe_timesync_enable(struct rte_eth_dev *dev)
+static uint64_t
+ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t tsync_ctl;
-       uint32_t tsauxc;
+       uint64_t systime_cycles;
 
-       /* Enable system time for platforms where it isn't on by default. */
-       tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
-       tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
-       IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+               systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+               systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+                               * NSEC_PER_SEC;
+               break;
+       default:
+               systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+               systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+                               << 32;
+       }
 
-       /* Start incrementing the register used to timestamp PTP packets. */
-       IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, IXGBE_TIMINCA_INIT);
+       return systime_cycles;
+}
+
+static uint64_t
+ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint64_t rx_tstamp_cycles;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+               rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+               rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+                               * NSEC_PER_SEC;
+               break;
+       default:
+               /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+               rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+               rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+                               << 32;
+       }
+
+       return rx_tstamp_cycles;
+}
+
+static uint64_t
+ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint64_t tx_tstamp_cycles;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+               tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+               tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+                               * NSEC_PER_SEC;
+               break;
+       default:
+               /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+               tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+               tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+                               << 32;
+       }
+
+       return tx_tstamp_cycles;
+}
+
+static void
+ixgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
+       struct rte_eth_link link;
+       uint32_t incval = 0;
+       uint32_t shift = 0;
+
+       /* Get current link speed. */
+       memset(&link, 0, sizeof(link));
+       ixgbe_dev_link_update(dev, 1);
+       rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+       switch (link.link_speed) {
+       case ETH_LINK_SPEED_100:
+               incval = IXGBE_INCVAL_100;
+               shift = IXGBE_INCVAL_SHIFT_100;
+               break;
+       case ETH_LINK_SPEED_1000:
+               incval = IXGBE_INCVAL_1GB;
+               shift = IXGBE_INCVAL_SHIFT_1GB;
+               break;
+       case ETH_LINK_SPEED_10000:
+       default:
+               incval = IXGBE_INCVAL_10GB;
+               shift = IXGBE_INCVAL_SHIFT_10GB;
+               break;
+       }
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               /* Independent of link speed. */
+               incval = 1;
+               /* Cycles read will be interpreted as ns. */
+               shift = 0;
+               /* Fall-through */
+       case ixgbe_mac_X540:
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+               break;
+       case ixgbe_mac_82599EB:
+               incval >>= IXGBE_INCVAL_SHIFT_82599;
+               shift -= IXGBE_INCVAL_SHIFT_82599;
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+                               (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+               break;
+       default:
+               /* Not supported. */
+               return;
+       }
+
+       memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+       adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+       adapter->systime_tc.cc_shift = shift;
+       adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+       adapter->rx_tstamp_tc.cc_shift = shift;
+       adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+       adapter->tx_tstamp_tc.cc_shift = shift;
+       adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
+
+       adapter->systime_tc.nsec += delta;
+       adapter->rx_tstamp_tc.nsec += delta;
+       adapter->tx_tstamp_tc.nsec += delta;
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+       uint64_t ns;
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
+
+       ns = rte_timespec_to_ns(ts);
+       /* Set the timecounters to a new value. */
+       adapter->systime_tc.nsec = ns;
+       adapter->rx_tstamp_tc.nsec = ns;
+       adapter->tx_tstamp_tc.nsec = ns;
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+       uint64_t ns, systime_cycles;
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
+
+       systime_cycles = ixgbe_read_systime_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+       *ts = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t tsync_ctl;
+       uint32_t tsauxc;
+
+       /* Stop the timesync system time. */
+       IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
+       /* Reset the timesync system time value. */
+       IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
+       IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
+
+       /* Enable system time for platforms where it isn't on by default. */
+       tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+       tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
+       IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+       ixgbe_start_timecounters(dev);
 
        /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
        IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
@@ -5636,6 +6049,8 @@ ixgbe_timesync_enable(struct rte_eth_dev *dev)
        tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
        IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
 
+       IXGBE_WRITE_FLUSH(hw);
+
        return 0;
 }
 
@@ -5670,19 +6085,19 @@ ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
                                 uint32_t flags __rte_unused)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
        uint32_t tsync_rxctl;
-       uint32_t rx_stmpl;
-       uint32_t rx_stmph;
+       uint64_t rx_tstamp_cycles;
+       uint64_t ns;
 
        tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
        if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
                return -EINVAL;
 
-       rx_stmpl = IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
-       rx_stmph = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
-
-       timestamp->tv_sec = (uint64_t)(((uint64_t)rx_stmph << 32) | rx_stmpl);
-       timestamp->tv_nsec = 0;
+       rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
 
        return  0;
 }
@@ -5692,21 +6107,21 @@ ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
                                 struct timespec *timestamp)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
        uint32_t tsync_txctl;
-       uint32_t tx_stmpl;
-       uint32_t tx_stmph;
+       uint64_t tx_tstamp_cycles;
+       uint64_t ns;
 
        tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
        if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
                return -EINVAL;
 
-       tx_stmpl = IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
-       tx_stmph = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
+       tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
 
-       timestamp->tv_sec = (uint64_t)(((uint64_t)tx_stmph << 32) | tx_stmpl);
-       timestamp->tv_nsec = 0;
-
-       return  0;
+       return 0;
 }
 
 static int
@@ -5842,9 +6257,11 @@ ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
        switch (mac_type) {
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
                return ETH_RSS_RETA_SIZE_512;
        case ixgbe_mac_X550_vf:
        case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
                return ETH_RSS_RETA_SIZE_64;
        default:
                return ETH_RSS_RETA_SIZE_128;
@@ -5856,12 +6273,14 @@ ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
        switch (mac_type) {
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
                if (reta_idx < ETH_RSS_RETA_SIZE_128)
                        return IXGBE_RETA(reta_idx >> 2);
                else
                        return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
        case ixgbe_mac_X550_vf:
        case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
                return IXGBE_VFRETA(reta_idx >> 2);
        default:
                return IXGBE_RETA(reta_idx >> 2);
@@ -5873,6 +6292,7 @@ ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
        switch (mac_type) {
        case ixgbe_mac_X550_vf:
        case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
                return IXGBE_VFMRQC;
        default:
                return IXGBE_MRQC;
@@ -5884,6 +6304,7 @@ ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
        switch (mac_type) {
        case ixgbe_mac_X550_vf:
        case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
                return IXGBE_VFRSSRK(i);
        default:
                return IXGBE_RSSRK(i);
@@ -5978,6 +6399,737 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        return 0;
 }
 
+/* Update e-tag ether type */
+static int
+ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
+                           uint16_t ether_type)
+{
+       uint32_t etag_etype;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+       etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
+       etag_etype |= ether_type;
+       IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* Config l2 tunnel ether type */
+static int
+ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
+                                 struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (l2_tunnel == NULL)
+               return -EINVAL;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Enable e-tag tunnel */
+static int
+ixgbe_e_tag_enable(struct ixgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+       etag_etype |= IXGBE_ETAG_ETYPE_VALID;
+       IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* Enable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
+                          enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_enable(hw);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable e-tag tunnel */
+static int
+ixgbe_e_tag_disable(struct ixgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+       etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
+       IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* Disable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
+                           enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_disable(hw);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+       for (i = 1; i < rar_entries; i++) {
+               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+               rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+               if ((rar_high & IXGBE_RAH_AV) &&
+                   (rar_high & IXGBE_RAH_ADTYPE) &&
+                   ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
+                    l2_tunnel->tunnel_id)) {
+                       IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+                       IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+
+                       ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
+
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       /* One entry for one tunnel. Try to remove potential existing entry. */
+       ixgbe_e_tag_filter_del(dev, l2_tunnel);
+
+       rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+       for (i = 1; i < rar_entries; i++) {
+               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+               if (rar_high & IXGBE_RAH_AV) {
+                       continue;
+               } else {
+                       ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
+                       rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
+                       rar_low = l2_tunnel->tunnel_id;
+
+                       IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
+                       IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
+
+                       return ret;
+               }
+       }
+
+       PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+                    " Please remove a rule before adding a new one.");
+       return -EINVAL;
+}
+
+/* Add l2 tunnel filter */
+static int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+                              struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Delete l2 tunnel filter */
+static int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+                              struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+                                 enum rte_filter_op filter_op,
+                                 void *arg)
+{
+       int ret = 0;
+
+       if (filter_op == RTE_ETH_FILTER_NOP)
+               return 0;
+
+       if (arg == NULL) {
+               PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+                           filter_op);
+               return -EINVAL;
+       }
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_ADD:
+               ret = ixgbe_dev_l2_tunnel_filter_add
+                       (dev,
+                        (struct rte_eth_l2_tunnel_conf *)arg);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               ret = ixgbe_dev_l2_tunnel_filter_del
+                       (dev,
+                        (struct rte_eth_l2_tunnel_conf *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int
+ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+       int ret = 0;
+       uint32_t ctrl;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+       ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+       if (en)
+               ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
+       IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+
+       return ret;
+}
+
+/* Enable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_enable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_disable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
+                            struct rte_eth_l2_tunnel_conf *l2_tunnel,
+                            bool en)
+{
+       int ret = 0;
+       uint32_t vmtir, vmvir;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
+               PMD_DRV_LOG(ERR,
+                           "VF id %u should be less than %u",
+                           l2_tunnel->vf_id,
+                           dev->pci_dev->max_vfs);
+               return -EINVAL;
+       }
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       if (en)
+               vmtir = l2_tunnel->tunnel_id;
+       else
+               vmtir = 0;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
+
+       vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
+       vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
+       if (en)
+               vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
+       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
+
+       return ret;
+}
+
+/* Enable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
+                                    struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_disable
+       (struct rte_eth_dev *dev,
+        struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
+                            bool en)
+{
+       int ret = 0;
+       uint32_t qde;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       qde = IXGBE_READ_REG(hw, IXGBE_QDE);
+       if (en)
+               qde |= IXGBE_QDE_STRIP_TAG;
+       else
+               qde &= ~IXGBE_QDE_STRIP_TAG;
+       qde &= ~IXGBE_QDE_READ;
+       qde |= IXGBE_QDE_WRITE;
+       IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
+
+       return ret;
+}
+
+/* Enable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_enable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_disable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Enable/disable l2 tunnel offload functions */
+static int
+ixgbe_dev_l2_tunnel_offload_set
+       (struct rte_eth_dev *dev,
+        struct rte_eth_l2_tunnel_conf *l2_tunnel,
+        uint32_t mask,
+        uint8_t en)
+{
+       int ret = 0;
+
+       if (l2_tunnel == NULL)
+               return -EINVAL;
+
+       ret = -EINVAL;
+       if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_enable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+               else
+                       ret = ixgbe_dev_l2_tunnel_disable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+       }
+
+       if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_insertion_enable(
+                               dev,
+                               l2_tunnel);
+               else
+                       ret = ixgbe_dev_l2_tunnel_insertion_disable(
+                               dev,
+                               l2_tunnel);
+       }
+
+       if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_stripping_enable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+               else
+                       ret = ixgbe_dev_l2_tunnel_stripping_disable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+       }
+
+       if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_forwarding_enable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+               else
+                       ret = ixgbe_dev_l2_tunnel_forwarding_disable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
+                       uint16_t port)
+{
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* There's only one register for VxLAN UDP port.
+ * So, we cannot add several ports. Will update it.
+ */
+static int
+ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
+                    uint16_t port)
+{
+       if (port == 0) {
+               PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+               return -EINVAL;
+       }
+
+       return ixgbe_update_vxlan_port(hw, port);
+}
+
+/* We cannot delete the VxLAN port. For there's a register for VxLAN
+ * UDP port, it must have a value.
+ * So, will reset it to the original value 0.
+ */
+static int
+ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
+                    uint16_t port)
+{
+       uint16_t cur_port;
+
+       cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
+
+       if (cur_port != port) {
+               PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
+               return -EINVAL;
+       }
+
+       return ixgbe_update_vxlan_port(hw, 0);
+}
+
+/* Add UDP tunneling port */
+static int
+ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
+               break;
+
+       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_TUNNEL_TYPE_TEREDO:
+               PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+               ret = -EINVAL;
+               break;
+
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x &&
+           hw->mac.type != ixgbe_mac_X550EM_a) {
+               return -ENOTSUP;
+       }
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
+               break;
+       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_TUNNEL_TYPE_TEREDO:
+               PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+               ret = -EINVAL;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @netdev: pointer to net device structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ */
+static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+                                    int xcast_mode)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+       s32 err;
+
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_12:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+       msgbuf[1] = xcast_mode;
+
+       err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+       if (err)
+               return err;
+
+       err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
+       if (err)
+               return err;
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+       if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+               return -EPERM;
+
+       return 0;
+}
+
+static void
+ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+}
+
+static void
+ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,