ixgbe: fix packet type from vector Rx
[dpdk.git] / drivers / net / ixgbe / ixgbe_ethdev.c
index ca65103..6ce4111 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -68,6 +68,9 @@
 #include "ixgbe_ethdev.h"
 #include "ixgbe_bypass.h"
 #include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "ixgbe_regs.h"
 
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
@@ -82,6 +85,9 @@
  */
 #define IXGBE_FC_LO    0x40
 
+/* Default minimum inter-interrupt interval for EITR configuration */
+#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT    0x79E
+
 /* Timer value included in XOFF frames. */
 #define IXGBE_FC_PAUSE 0x680
 
@@ -91,6 +97,7 @@
 
 #define IXGBE_MMW_SIZE_DEFAULT        0x4
 #define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
+#define IXGBE_MAX_RING_DESC           4096 /* replicate define from rxtx */
 
 /*
  *  Default values for RX/TX configuration
 
 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
 
+#define IXGBE_HKEY_MAX_INDEX 10
+
+/* Additional timesync values. */
+#define NSEC_PER_SEC             1000000000L
+#define IXGBE_INCVAL_10GB        0x66666666
+#define IXGBE_INCVAL_1GB         0x40000000
+#define IXGBE_INCVAL_100         0x50000000
+#define IXGBE_INCVAL_SHIFT_10GB  28
+#define IXGBE_INCVAL_SHIFT_1GB   24
+#define IXGBE_INCVAL_SHIFT_100   21
+#define IXGBE_INCVAL_SHIFT_82599 7
+#define IXGBE_INCPER_SHIFT_82599 24
+
+#define IXGBE_CYCLECOUNTER_MASK   0xffffffffffffffffULL
+
+#define IXGBE_VT_CTL_POOLING_MODE_MASK         0x00030000
+#define IXGBE_VT_CTL_POOLING_MODE_ETAG         0x00010000
+#define DEFAULT_ETAG_ETYPE                     0x893f
+#define IXGBE_ETAG_ETYPE                       0x00005084
+#define IXGBE_ETAG_ETYPE_MASK                  0x0000ffff
+#define IXGBE_ETAG_ETYPE_VALID                 0x80000000
+#define IXGBE_RAH_ADTYPE                       0x40000000
+#define IXGBE_RAL_ETAG_FILTER_MASK             0x00003fff
+#define IXGBE_VMVIR_TAGA_MASK                  0x18000000
+#define IXGBE_VMVIR_TAGA_ETAG_INSERT           0x08000000
+#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
+#define IXGBE_QDE_STRIP_TAG                    0x00000004
+
+enum ixgbevf_xcast_modes {
+       IXGBEVF_XCAST_MODE_NONE = 0,
+       IXGBEVF_XCAST_MODE_MULTI,
+       IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
 static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
@@ -131,20 +173,28 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
                                int wait_to_complete);
 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
+static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_xstats *xstats, unsigned n);
+static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
+                                 struct rte_eth_xstats *xstats, unsigned n);
 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
 static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint16_t queue_id,
                                             uint8_t stat_idx,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                               struct rte_eth_dev_info *dev_info);
+static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                 struct rte_eth_dev_info *dev_info);
 static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
 
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
-static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
+static int ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                              enum rte_vlan_type vlan_type,
+                              uint16_t tpid_id);
 static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
@@ -171,6 +221,7 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                        uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
 static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
@@ -179,15 +230,19 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
 static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                uint32_t index, uint32_t pool);
 static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+                                          struct ether_addr *mac_addr);
 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
 
 /* For Virtual Function support */
 static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -197,6 +252,15 @@ static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
 static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
+static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+                                           uint16_t queue_id);
+static int ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+                                            uint16_t queue_id);
+static void ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                                uint8_t queue, uint8_t msix_vector);
+static void ixgbevf_configure_msix(struct rte_eth_dev *dev);
+static void ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
 
 /* For Eth VMDQ APIs support */
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
@@ -209,10 +273,17 @@ static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
 static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                uint64_t pool_mask,uint8_t vlan_on);
 static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-               struct rte_eth_vmdq_mirror_conf *mirror_conf,
+               struct rte_eth_mirror_conf *mirror_conf,
                uint8_t rule_id, uint8_t on);
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);
+static int ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
+                                         uint16_t queue_id);
+static int ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev,
+                                          uint16_t queue_id);
+static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                              uint8_t queue, uint8_t msix_vector);
+static void ixgbe_configure_msix(struct rte_eth_dev *dev);
 
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
                uint16_t queue_idx, uint16_t tx_rate);
@@ -223,6 +294,8 @@ static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
                                 struct ether_addr *mac_addr,
                                 uint32_t index, uint32_t pool);
 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+                                            struct ether_addr *mac_addr);
 static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
                        struct rte_eth_syn_filter *filter,
                        bool add);
@@ -257,13 +330,61 @@ static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
                     void *arg);
 static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
 
+static int ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+                                     struct ether_addr *mc_addr_set,
+                                     uint32_t nb_mc_addr);
+static int ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+                                  struct rte_eth_dcb_info *dcb_info);
+
+static int ixgbe_get_reg_length(struct rte_eth_dev *dev);
+static int ixgbe_get_regs(struct rte_eth_dev *dev,
+                           struct rte_dev_reg_info *regs);
+static int ixgbe_get_eeprom_length(struct rte_eth_dev *dev);
+static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
+                               struct rte_dev_eeprom_info *eeprom);
+static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
+                               struct rte_dev_eeprom_info *eeprom);
+
+static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
+static int ixgbevf_get_regs(struct rte_eth_dev *dev,
+                               struct rte_dev_reg_info *regs);
+
+static int ixgbe_timesync_enable(struct rte_eth_dev *dev);
+static int ixgbe_timesync_disable(struct rte_eth_dev *dev);
+static int ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                                           struct timespec *timestamp,
+                                           uint32_t flags);
+static int ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                                           struct timespec *timestamp);
+static int ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
+                                  struct timespec *timestamp);
+static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
+                                  const struct timespec *timestamp);
+
+static int ixgbe_dev_l2_tunnel_eth_type_conf
+       (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
+static int ixgbe_dev_l2_tunnel_offload_set
+       (struct rte_eth_dev *dev,
+        struct rte_eth_l2_tunnel_conf *l2_tunnel,
+        uint32_t mask,
+        uint8_t en);
+static int ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+                                            enum rte_filter_op filter_op,
+                                            void *arg);
+
+static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                                        struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                                        struct rte_eth_udp_tunnel *udp_tunnel);
+
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
-#define UPDATE_VF_STAT(reg, last, cur)                         \
+#define UPDATE_VF_STAT(reg, last, cur)                          \
 {                                                               \
-       u32 latest = IXGBE_READ_REG(hw, reg);                   \
-       cur += latest - last;                                   \
+       uint32_t latest = IXGBE_READ_REG(hw, reg);              \
+       cur += (latest - last) & UINT_MAX;                      \
        last = latest;                                          \
 }
 
@@ -280,19 +401,19 @@ static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] |= 1 << bit;\
-       }while(0)
+       } while (0)
 
 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] &= ~(1 << bit);\
-       }while(0)
+       } while (0)
 
 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (r) = (h)->bitmap[idx] >> bit & 1;\
-       }while(0)
+       } while (0)
 
 /*
  * The set of PCI devices this driver supports
@@ -317,6 +438,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = {
 
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+       .nb_max = IXGBE_MAX_RING_DESC,
+       .nb_min = IXGBE_MIN_RING_DESC,
+       .nb_align = IXGBE_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+       .nb_max = IXGBE_MAX_RING_DESC,
+       .nb_min = IXGBE_MIN_RING_DESC,
+       .nb_align = IXGBE_TXD_ALIGN,
+};
+
 static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_configure        = ixgbe_dev_configure,
        .dev_start            = ixgbe_dev_start,
@@ -330,9 +463,12 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .allmulticast_disable = ixgbe_dev_allmulticast_disable,
        .link_update          = ixgbe_dev_link_update,
        .stats_get            = ixgbe_dev_stats_get,
+       .xstats_get           = ixgbe_dev_xstats_get,
        .stats_reset          = ixgbe_dev_stats_reset,
+       .xstats_reset         = ixgbe_dev_xstats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
@@ -343,6 +479,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_start       = ixgbe_dev_tx_queue_start,
        .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+       .rx_queue_intr_enable = ixgbe_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = ixgbe_dev_rx_queue_intr_disable,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
        .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
@@ -355,6 +493,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
        .mac_addr_remove      = ixgbe_remove_rar,
+       .mac_addr_set         = ixgbe_set_default_mac_addr,
        .uc_hash_table_set    = ixgbe_uc_hash_table_set,
        .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
        .mirror_rule_set      = ixgbe_mirror_rule_set,
@@ -381,6 +520,26 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
        .rss_hash_update      = ixgbe_dev_rss_hash_update,
        .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
        .filter_ctrl          = ixgbe_dev_filter_ctrl,
+       .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
+       .rxq_info_get         = ixgbe_rxq_info_get,
+       .txq_info_get         = ixgbe_txq_info_get,
+       .timesync_enable      = ixgbe_timesync_enable,
+       .timesync_disable     = ixgbe_timesync_disable,
+       .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
+       .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
+       .get_reg_length       = ixgbe_get_reg_length,
+       .get_reg              = ixgbe_get_regs,
+       .get_eeprom_length    = ixgbe_get_eeprom_length,
+       .get_eeprom           = ixgbe_get_eeprom,
+       .set_eeprom           = ixgbe_set_eeprom,
+       .get_dcb_info         = ixgbe_dev_get_dcb_info,
+       .timesync_adjust_time = ixgbe_timesync_adjust_time,
+       .timesync_read_time   = ixgbe_timesync_read_time,
+       .timesync_write_time  = ixgbe_timesync_write_time,
+       .l2_tunnel_eth_type_conf = ixgbe_dev_l2_tunnel_eth_type_conf,
+       .l2_tunnel_offload_set   = ixgbe_dev_l2_tunnel_offload_set,
+       .udp_tunnel_port_add  = ixgbe_dev_udp_tunnel_port_add,
+       .udp_tunnel_port_del  = ixgbe_dev_udp_tunnel_port_del,
 };
 
 /*
@@ -393,21 +552,157 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .dev_stop             = ixgbevf_dev_stop,
        .link_update          = ixgbe_dev_link_update,
        .stats_get            = ixgbevf_dev_stats_get,
+       .xstats_get           = ixgbevf_dev_xstats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
+       .xstats_reset         = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
+       .allmulticast_enable  = ixgbevf_dev_allmulticast_enable,
+       .allmulticast_disable = ixgbevf_dev_allmulticast_disable,
        .dev_infos_get        = ixgbevf_dev_info_get,
+       .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
        .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
+       .rx_descriptor_done   = ixgbe_dev_rx_descriptor_done,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
+       .rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = ixgbevf_dev_rx_queue_intr_disable,
        .mac_addr_add         = ixgbevf_add_mac_addr,
        .mac_addr_remove      = ixgbevf_remove_mac_addr,
+       .set_mc_addr_list     = ixgbe_dev_set_mc_addr_list,
+       .rxq_info_get         = ixgbe_rxq_info_get,
+       .txq_info_get         = ixgbe_txq_info_get,
+       .mac_addr_set         = ixgbevf_set_default_mac_addr,
+       .get_reg_length       = ixgbevf_get_reg_length,
+       .get_reg              = ixgbevf_get_regs,
+       .reta_update          = ixgbe_dev_rss_reta_update,
+       .reta_query           = ixgbe_dev_rss_reta_query,
+       .rss_hash_update      = ixgbe_dev_rss_hash_update,
+       .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+};
+
+/* store statistics names and its offset in stats structure */
+struct rte_ixgbe_xstats_name_off {
+       char name[RTE_ETH_XSTATS_NAME_SIZE];
+       unsigned offset;
+};
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
+       {"rx_crc_errors", offsetof(struct ixgbe_hw_stats, crcerrs)},
+       {"rx_illegal_byte_errors", offsetof(struct ixgbe_hw_stats, illerrc)},
+       {"rx_error_bytes", offsetof(struct ixgbe_hw_stats, errbc)},
+       {"mac_local_errors", offsetof(struct ixgbe_hw_stats, mlfc)},
+       {"mac_remote_errors", offsetof(struct ixgbe_hw_stats, mrfc)},
+       {"rx_length_errors", offsetof(struct ixgbe_hw_stats, rlec)},
+       {"tx_xon_packets", offsetof(struct ixgbe_hw_stats, lxontxc)},
+       {"rx_xon_packets", offsetof(struct ixgbe_hw_stats, lxonrxc)},
+       {"tx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxofftxc)},
+       {"rx_xoff_packets", offsetof(struct ixgbe_hw_stats, lxoffrxc)},
+       {"rx_size_64_packets", offsetof(struct ixgbe_hw_stats, prc64)},
+       {"rx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, prc127)},
+       {"rx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, prc255)},
+       {"rx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, prc511)},
+       {"rx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+               prc1023)},
+       {"rx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+               prc1522)},
+       {"rx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bprc)},
+       {"rx_multicast_packets", offsetof(struct ixgbe_hw_stats, mprc)},
+       {"rx_fragment_errors", offsetof(struct ixgbe_hw_stats, rfc)},
+       {"rx_undersize_errors", offsetof(struct ixgbe_hw_stats, ruc)},
+       {"rx_oversize_errors", offsetof(struct ixgbe_hw_stats, roc)},
+       {"rx_jabber_errors", offsetof(struct ixgbe_hw_stats, rjc)},
+       {"rx_management_packets", offsetof(struct ixgbe_hw_stats, mngprc)},
+       {"rx_management_dropped", offsetof(struct ixgbe_hw_stats, mngpdc)},
+       {"tx_management_packets", offsetof(struct ixgbe_hw_stats, mngptc)},
+       {"rx_total_packets", offsetof(struct ixgbe_hw_stats, tpr)},
+       {"rx_total_bytes", offsetof(struct ixgbe_hw_stats, tor)},
+       {"tx_total_packets", offsetof(struct ixgbe_hw_stats, tpt)},
+       {"tx_size_64_packets", offsetof(struct ixgbe_hw_stats, ptc64)},
+       {"tx_size_65_to_127_packets", offsetof(struct ixgbe_hw_stats, ptc127)},
+       {"tx_size_128_to_255_packets", offsetof(struct ixgbe_hw_stats, ptc255)},
+       {"tx_size_256_to_511_packets", offsetof(struct ixgbe_hw_stats, ptc511)},
+       {"tx_size_512_to_1023_packets", offsetof(struct ixgbe_hw_stats,
+               ptc1023)},
+       {"tx_size_1024_to_max_packets", offsetof(struct ixgbe_hw_stats,
+               ptc1522)},
+       {"tx_multicast_packets", offsetof(struct ixgbe_hw_stats, mptc)},
+       {"tx_broadcast_packets", offsetof(struct ixgbe_hw_stats, bptc)},
+       {"rx_mac_short_packet_dropped", offsetof(struct ixgbe_hw_stats, mspdc)},
+       {"rx_l3_l4_xsum_error", offsetof(struct ixgbe_hw_stats, xec)},
+
+       {"flow_director_added_filters", offsetof(struct ixgbe_hw_stats,
+               fdirustat_add)},
+       {"flow_director_removed_filters", offsetof(struct ixgbe_hw_stats,
+               fdirustat_remove)},
+       {"flow_director_filter_add_errors", offsetof(struct ixgbe_hw_stats,
+               fdirfstat_fadd)},
+       {"flow_director_filter_remove_errors", offsetof(struct ixgbe_hw_stats,
+               fdirfstat_fremove)},
+       {"flow_director_matched_filters", offsetof(struct ixgbe_hw_stats,
+               fdirmatch)},
+       {"flow_director_missed_filters", offsetof(struct ixgbe_hw_stats,
+               fdirmiss)},
+
+       {"rx_fcoe_crc_errors", offsetof(struct ixgbe_hw_stats, fccrc)},
+       {"rx_fcoe_dropped", offsetof(struct ixgbe_hw_stats, fcoerpdc)},
+       {"rx_fcoe_mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats,
+               fclast)},
+       {"rx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeprc)},
+       {"tx_fcoe_packets", offsetof(struct ixgbe_hw_stats, fcoeptc)},
+       {"rx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwrc)},
+       {"tx_fcoe_bytes", offsetof(struct ixgbe_hw_stats, fcoedwtc)},
+       {"rx_fcoe_no_direct_data_placement", offsetof(struct ixgbe_hw_stats,
+               fcoe_noddp)},
+       {"rx_fcoe_no_direct_data_placement_ext_buff",
+               offsetof(struct ixgbe_hw_stats, fcoe_noddp_ext_buff)},
+
+       {"tx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+               lxontxc)},
+       {"rx_flow_control_xon_packets", offsetof(struct ixgbe_hw_stats,
+               lxonrxc)},
+       {"tx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+               lxofftxc)},
+       {"rx_flow_control_xoff_packets", offsetof(struct ixgbe_hw_stats,
+               lxoffrxc)},
+       {"rx_total_missed_packets", offsetof(struct ixgbe_hw_stats, mpctotal)},
+};
+
+#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
+                          sizeof(rte_ixgbe_stats_strings[0]))
+
+/* Per-queue statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
+       {"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
+       {"dropped", offsetof(struct ixgbe_hw_stats, mpc)},
+       {"xon_packets", offsetof(struct ixgbe_hw_stats, pxonrxc)},
+       {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxoffrxc)},
+};
+
+#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
+                          sizeof(rte_ixgbe_rxq_strings[0]))
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
+       {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
+       {"xoff_packets", offsetof(struct ixgbe_hw_stats, pxofftxc)},
+       {"xon_to_xoff_packets", offsetof(struct ixgbe_hw_stats,
+               pxon2offc)},
+};
+
+#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
+                          sizeof(rte_ixgbe_txq_strings[0]))
+
+static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
+       {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
 };
 
+#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
+               sizeof(rte_ixgbevf_stats_strings[0]))
+
 /**
  * Atomically reads the link status information from global
  * structure rte_eth_dev.
@@ -537,7 +832,7 @@ ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
 {
        uint32_t i;
 
-       for(i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
+       for (i = 0; i != IXGBE_NB_STAT_MAPPING_REGS; i++) {
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
        }
@@ -565,10 +860,11 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        if ((hw->mac.type != ixgbe_mac_82599EB) &&
                (hw->mac.type != ixgbe_mac_X540) &&
                (hw->mac.type != ixgbe_mac_X550) &&
-               (hw->mac.type != ixgbe_mac_X550EM_x))
+               (hw->mac.type != ixgbe_mac_X550EM_x) &&
+               (hw->mac.type != ixgbe_mac_X550EM_a))
                return -ENOSYS;
 
-       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+       PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
                     queue_id, stat_idx);
 
@@ -594,20 +890,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        else
                stat_mappings->rqsmr[n] |= qsmr_mask;
 
-       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+       PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
                     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
                     queue_id, stat_idx);
-       PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+       PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
                     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
        /* Now write the mapping in the appropriate register */
        if (is_rx) {
-               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
+               PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
        }
        else {
-               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
+               PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
        }
@@ -670,7 +966,8 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
        /*we only support 4 Tcs for X540, X550 */
        if (hw->mac.type == ixgbe_mac_X540 ||
                hw->mac.type == ixgbe_mac_X550 ||
-               hw->mac.type == ixgbe_mac_X550EM_x) {
+               hw->mac.type == ixgbe_mac_X550EM_x ||
+               hw->mac.type == ixgbe_mac_X550EM_a) {
                dcb_config->num_tcs.pg_tcs = 4;
                dcb_config->num_tcs.pfc_tcs = 4;
        }
@@ -751,8 +1048,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                        ixgbe_set_tx_function(eth_dev, txq);
                } else {
                        /* Use default TX function if we get here */
-                       PMD_INIT_LOG(INFO, "No TX queues configured yet. "
-                                          "Using default TX function.");
+                       PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
+                                            "Using default TX function.");
                }
 
                ixgbe_set_rx_function(eth_dev);
@@ -761,6 +1058,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        }
        pci_dev = eth_dev->pci_dev;
 
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -840,6 +1139,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                return -EIO;
        }
 
+       /* Reset the hw statistics */
+       ixgbe_dev_stats_reset(eth_dev);
+
        /* disable interrupt */
        ixgbe_disable_intr(hw);
 
@@ -899,11 +1201,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                        eth_dev->data->port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);
 
-       rte_intr_callback_register(&(pci_dev->intr_handle),
-               ixgbe_dev_interrupt_handler, (void *)eth_dev);
+       rte_intr_callback_register(&pci_dev->intr_handle,
+                                  ixgbe_dev_interrupt_handler,
+                                  (void *)eth_dev);
 
-       /* enable uio intr after callback register */
-       rte_intr_enable(&(pci_dev->intr_handle));
+       /* enable uio/vfio intr/eventfd mapping */
+       rte_intr_enable(&pci_dev->intr_handle);
 
        /* enable support intr */
        ixgbe_enable_intr(eth_dev);
@@ -916,6 +1219,46 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int
+eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct rte_pci_device *pci_dev;
+       struct ixgbe_hw *hw;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return -EPERM;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       pci_dev = eth_dev->pci_dev;
+
+       if (hw->adapter_stopped == 0)
+               ixgbe_dev_close(eth_dev);
+
+       eth_dev->dev_ops = NULL;
+       eth_dev->rx_pkt_burst = NULL;
+       eth_dev->tx_pkt_burst = NULL;
+
+       /* Unlock any pending hardware semaphore */
+       ixgbe_swfw_lock_reset(hw);
+
+       /* disable uio intr before callback unregister */
+       rte_intr_disable(&(pci_dev->intr_handle));
+       rte_intr_callback_unregister(&(pci_dev->intr_handle),
+               ixgbe_dev_interrupt_handler, (void *)eth_dev);
+
+       /* uninitialize PF if max_vfs not zero */
+       ixgbe_pf_host_uninit(eth_dev);
+
+       rte_free(eth_dev->data->mac_addrs);
+       eth_dev->data->mac_addrs = NULL;
+
+       rte_free(eth_dev->data->hash_mac_addrs);
+       eth_dev->data->hash_mac_addrs = NULL;
+
+       return 0;
+}
 
 /*
  * Negotiate mailbox API version with the PF.
@@ -931,6 +1274,7 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw)
 
        /* start with highest supported, proceed down */
        static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+               ixgbe_mbox_api_12,
                ixgbe_mbox_api_11,
                ixgbe_mbox_api_10,
        };
@@ -985,13 +1329,28 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+               struct ixgbe_tx_queue *txq;
+               /* TX queue function in primary, set by last queue initialized
+                * Tx queue may not initialized by primary process
+                */
+               if (eth_dev->data->tx_queues) {
+                       txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues - 1];
+                       ixgbe_set_tx_function(eth_dev, txq);
+               } else {
+                       /* Use default TX function if we get here */
+                       PMD_INIT_LOG(NOTICE,
+                               "No TX queues configured yet. Using default TX function.");
+               }
+
+               ixgbe_set_rx_function(eth_dev);
+
                return 0;
        }
 
        pci_dev = eth_dev->pci_dev;
 
+       rte_eth_copy_pci_info(eth_dev, pci_dev);
+
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -1012,6 +1371,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        /* init_mailbox_params */
        hw->mbx.ops.init_params(hw);
 
+       /* Reset the hw statistics */
+       ixgbevf_dev_stats_reset(eth_dev);
+
        /* Disable the interrupts for VF */
        ixgbevf_intr_disable(hw);
 
@@ -1025,7 +1387,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
         */
        if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
                PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-               return (diag);
+               return diag;
        }
 
        /* negotiate mailbox API version to use with the PF. */
@@ -1076,7 +1438,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 
                default:
                        PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       return (-EIO);
+                       return -EIO;
        }
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@@ -1086,13 +1448,45 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+/* Virtual Function device uninit */
+
+static int
+eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct ixgbe_hw *hw;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return -EPERM;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+       if (hw->adapter_stopped == 0)
+               ixgbevf_dev_close(eth_dev);
+
+       eth_dev->dev_ops = NULL;
+       eth_dev->rx_pkt_burst = NULL;
+       eth_dev->tx_pkt_burst = NULL;
+
+       /* Disable the interrupts for VF */
+       ixgbevf_intr_disable(hw);
+
+       rte_free(eth_dev->data->mac_addrs);
+       eth_dev->data->mac_addrs = NULL;
+
+       return 0;
+}
+
 static struct eth_driver rte_ixgbe_pmd = {
        .pci_drv = {
                .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                       RTE_PCI_DRV_DETACHABLE,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
+       .eth_dev_uninit = eth_ixgbe_dev_uninit,
        .dev_private_size = sizeof(struct ixgbe_adapter),
 };
 
@@ -1103,9 +1497,10 @@ static struct eth_driver rte_ixgbevf_pmd = {
        .pci_drv = {
                .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
+       .eth_dev_uninit = eth_ixgbevf_dev_uninit,
        .dev_private_size = sizeof(struct ixgbe_adapter),
 };
 
@@ -1134,7 +1529,7 @@ rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unus
        PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
-       return (0);
+       return 0;
 }
 
 static int
@@ -1172,14 +1567,27 @@ ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
                ixgbe_vlan_hw_strip_disable(dev, queue);
 }
 
-static void
-ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
+static int
+ixgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                   enum rte_vlan_type vlan_type,
+                   uint16_t tpid)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret = 0;
+
+       switch (vlan_type) {
+       case ETH_VLAN_TYPE_INNER:
+               /* Only the high 16-bits is valid */
+               IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+               break;
+       default:
+               ret = -EINVAL;
+               PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+               break;
+       }
 
-       /* Only the high 16-bits is valid */
-       IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+       return ret;
 }
 
 void
@@ -1228,7 +1636,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
        struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
 
-       if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
+       if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
                return;
 
        if (on)
@@ -1248,7 +1656,7 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /* No queue level support */
-               PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+               PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
                return;
        }
        else {
@@ -1272,7 +1680,7 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
                /* No queue level supported */
-               PMD_INIT_LOG(INFO, "82598EB not support queue level hw strip");
+               PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
                return;
        }
        else {
@@ -1381,6 +1789,14 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
        ctrl |= IXGBE_EXTENDED_VLAN;
        IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl);
 
+       /* Clear pooling mode of PFVTCTL. It's required by X550. */
+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x) {
+               ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+               ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+       }
+
        /*
         * VET EXT field in the EXVET register = 0x8100 by default
         * So no need to change. Same to VT field of DMATXCTL register
@@ -1390,21 +1806,21 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 static void
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
-       if(mask & ETH_VLAN_STRIP_MASK){
+       if (mask & ETH_VLAN_STRIP_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_strip)
                        ixgbe_vlan_hw_strip_enable_all(dev);
                else
                        ixgbe_vlan_hw_strip_disable_all(dev);
        }
 
-       if(mask & ETH_VLAN_FILTER_MASK){
+       if (mask & ETH_VLAN_FILTER_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_filter)
                        ixgbe_vlan_hw_filter_enable(dev);
                else
                        ixgbe_vlan_hw_filter_disable(dev);
        }
 
-       if(mask & ETH_VLAN_EXTEND_MASK){
+       if (mask & ETH_VLAN_EXTEND_MASK) {
                if (dev->data->dev_conf.rxmode.hw_vlan_extend)
                        ixgbe_vlan_hw_extend_enable(dev);
                else
@@ -1419,10 +1835,180 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 }
 
+static int
+ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
+{
+       switch (nb_rx_q) {
+       case 1:
+       case 2:
+               RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+               break;
+       case 4:
+               RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
+       RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+
+       return 0;
+}
+
+static int
+ixgbe_check_mq_mode(struct rte_eth_dev *dev)
+{
+       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       uint16_t nb_rx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+       if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+               /* check multi-queue mode */
+               switch (dev_conf->rxmode.mq_mode) {
+               case ETH_MQ_RX_VMDQ_DCB:
+               case ETH_MQ_RX_VMDQ_DCB_RSS:
+                       /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+                       PMD_INIT_LOG(ERR, "SRIOV active,"
+                                       " unsupported mq_mode rx %d.",
+                                       dev_conf->rxmode.mq_mode);
+                       return -EINVAL;
+               case ETH_MQ_RX_RSS:
+               case ETH_MQ_RX_VMDQ_RSS:
+                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+                       if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
+                               if (ixgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
+                                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                               " invalid queue number"
+                                               " for VMDQ RSS, allowed"
+                                               " value are 1, 2 or 4.");
+                                       return -EINVAL;
+                               }
+                       break;
+               case ETH_MQ_RX_VMDQ_ONLY:
+               case ETH_MQ_RX_NONE:
+                       /* if nothing mq mode configure, use default scheme */
+                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+                       if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+                               RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+                       break;
+               default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+                       /* SRIOV only works in VMDq enable mode */
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " wrong mq_mode rx %d.",
+                                       dev_conf->rxmode.mq_mode);
+                       return -EINVAL;
+               }
+
+               switch (dev_conf->txmode.mq_mode) {
+               case ETH_MQ_TX_VMDQ_DCB:
+                       /* DCB VMDQ in SRIOV mode, not implement yet */
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " unsupported VMDQ mq_mode tx %d.",
+                                       dev_conf->txmode.mq_mode);
+                       return -EINVAL;
+               default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+                       dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+                       break;
+               }
+
+               /* check valid queue number */
+               if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+                   (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+                       PMD_INIT_LOG(ERR, "SRIOV is active,"
+                                       " nb_rx_q=%d nb_tx_q=%d queue number"
+                                       " must be less than or equal to %d.",
+                                       nb_rx_q, nb_tx_q,
+                                       RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+                       return -EINVAL;
+               }
+       } else {
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+                       PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
+                                         " not supported.");
+                       return -EINVAL;
+               }
+               /* check configuration for vmdb+dcb mode */
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+                       const struct rte_eth_vmdq_dcb_conf *conf;
+
+                       if (nb_rx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.",
+                                               IXGBE_VMDQ_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
+                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+                                               " nb_queue_pools must be %d or %d.",
+                                               ETH_16_POOLS, ETH_32_POOLS);
+                               return -EINVAL;
+                       }
+               }
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+                       const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+                       if (nb_tx_q != IXGBE_VMDQ_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d",
+                                                IXGBE_VMDQ_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
+                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
+                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                               PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
+                                               " nb_queue_pools != %d and"
+                                               " nb_queue_pools != %d.",
+                                               ETH_16_POOLS, ETH_32_POOLS);
+                               return -EINVAL;
+                       }
+               }
+
+               /* For DCB mode check our configuration before we go further */
+               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+                       const struct rte_eth_dcb_rx_conf *conf;
+
+                       if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
+                                                IXGBE_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
+                       if (!(conf->nb_tcs == ETH_4_TCS ||
+                              conf->nb_tcs == ETH_8_TCS)) {
+                               PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+                                               " and nb_tcs != %d.",
+                                               ETH_4_TCS, ETH_8_TCS);
+                               return -EINVAL;
+                       }
+               }
+
+               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+                       const struct rte_eth_dcb_tx_conf *conf;
+
+                       if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
+                               PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
+                                                IXGBE_DCB_NB_QUEUES);
+                               return -EINVAL;
+                       }
+                       conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
+                       if (!(conf->nb_tcs == ETH_4_TCS ||
+                              conf->nb_tcs == ETH_8_TCS)) {
+                               PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
+                                               " and nb_tcs != %d.",
+                                               ETH_4_TCS, ETH_8_TCS);
+                               return -EINVAL;
+                       }
+               }
+       }
+       return 0;
+}
+
 static int
 ixgbe_dev_configure(struct rte_eth_dev *dev)
 {
@@ -1430,8 +2016,16 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        struct ixgbe_adapter *adapter =
                (struct ixgbe_adapter *)dev->data->dev_private;
+       int ret;
 
        PMD_INIT_FUNC_TRACE();
+       /* multipe queue mode checking */
+       ret  = ixgbe_check_mq_mode(dev);
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
+                           ret);
+               return ret;
+       }
 
        /* set flag to update link status after init */
        intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -1446,6 +2040,25 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
        return 0;
 }
 
+static void
+ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       uint32_t gpie;
+
+       /* only set up it on X550EM_X */
+       if (hw->mac.type == ixgbe_mac_X550EM_x) {
+               gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+               gpie |= IXGBE_SDP0_GPIEN_X550EM_x;
+               IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+               if (hw->phy.type == ixgbe_phy_x550em_ext_t)
+                       intr->mask |= IXGBE_EICR_GPI_SDP0_X550EM_x;
+       }
+}
+
 /*
  * Configure device link speed and setup link.
  * It returns 0 on success.
@@ -1457,6 +2070,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_vf_info *vfinfo =
                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t intr_vector = 0;
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
@@ -1474,8 +2089,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       /* disable uio/vfio intr/eventfd mapping */
+       rte_intr_disable(intr_handle);
+
        /* stop adapter */
-       hw->adapter_stopped = FALSE;
+       hw->adapter_stopped = 0;
        ixgbe_stop_adapter(hw);
 
        /* reinitialize adapter
@@ -1489,6 +2107,31 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);
 
+       ixgbe_dev_phy_intr_setup(dev);
+
+       /* check and configure queue intr-vector mapping */
+       if ((rte_intr_cap_multiple(intr_handle) ||
+            !RTE_ETH_DEV_SRIOV(dev).active) &&
+           dev->data->dev_conf.intr_conf.rxq != 0) {
+               intr_vector = dev->data->nb_rx_queues;
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -1;
+       }
+
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   dev->data->nb_rx_queues * sizeof(int), 0);
+               if (intr_handle->intr_vec == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                                    " intr_vec\n", dev->data->nb_rx_queues);
+                       return -ENOMEM;
+               }
+       }
+
+       /* confiugre msix for sleep until rx interrupt */
+       ixgbe_configure_msix(dev);
+
        /* initialize transmission unit */
        ixgbe_dev_tx_init(dev);
 
@@ -1516,8 +2159,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                        goto error;
        }
 
-       /* Turn on the laser */
-       ixgbe_enable_tx_laser(hw);
+       if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               /* Turn on the copper */
+               ixgbe_set_phy_power(hw, true);
+       } else {
+               /* Turn on the laser */
+               ixgbe_enable_tx_laser(hw);
+       }
 
        err = ixgbe_check_link(hw, &speed, &link_up, 0);
        if (err)
@@ -1560,9 +2208,26 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 
 skip_link_setup:
 
-       /* check if lsc interrupt is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0)
-               ixgbe_dev_lsc_interrupt_setup(dev);
+       if (rte_intr_allow_others(intr_handle)) {
+               /* check if lsc interrupt is enabled */
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
+                       ixgbe_dev_lsc_interrupt_setup(dev);
+       } else {
+               rte_intr_callback_unregister(intr_handle,
+                                            ixgbe_dev_interrupt_handler,
+                                            (void *)dev);
+               if (dev->data->dev_conf.intr_conf.lsc != 0)
+                       PMD_INIT_LOG(INFO, "lsc won't enable because of"
+                                    " no intr multiplex\n");
+       }
+
+       /* check if rxq interrupt is enabled */
+       if (dev->data->dev_conf.intr_conf.rxq != 0 &&
+           rte_intr_dp_is_en(intr_handle))
+               ixgbe_dev_rxq_interrupt_setup(dev);
+
+       /* enable uio/vfio intr/eventfd mapping */
+       rte_intr_enable(intr_handle);
 
        /* resume enabled intr since hw reset */
        ixgbe_enable_intr(dev);
@@ -1597,7 +2262,7 @@ skip_link_setup:
 
        ixgbe_restore_statistics_mapping(dev);
 
-       return (0);
+       return 0;
 
 error:
        PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
@@ -1619,6 +2284,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        struct ixgbe_filter_info *filter_info =
                IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
        struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
        int vf;
 
        PMD_INIT_FUNC_TRACE();
@@ -1628,7 +2294,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 
        /* reset the NIC */
        ixgbe_pf_reset_hw(hw);
-       hw->adapter_stopped = FALSE;
+       hw->adapter_stopped = 0;
 
        /* stop adapter */
        ixgbe_stop_adapter(hw);
@@ -1637,8 +2303,13 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
                     vf < dev->pci_dev->max_vfs; vf++)
                vfinfo[vf].clear_to_send = false;
 
-       /* Turn off the laser */
-       ixgbe_disable_tx_laser(hw);
+       if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               /* Turn off the copper */
+               ixgbe_set_phy_power(hw, false);
+       } else {
+               /* Turn off the laser */
+               ixgbe_disable_tx_laser(hw);
+       }
 
        ixgbe_dev_clear_queues(dev);
 
@@ -1661,10 +2332,22 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        memset(filter_info->fivetuple_mask, 0,
                sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
 
+       if (!rte_intr_allow_others(intr_handle))
+               /* resume to the default handler */
+               rte_intr_callback_register(intr_handle,
+                                          ixgbe_dev_interrupt_handler,
+                                          (void *)dev);
+
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec != NULL) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }
 
 /*
- * Set device link up: enable tx laser.
+ * Set device link up: enable tx.
  */
 static int
 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
@@ -1680,18 +2363,21 @@ ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
                        return -ENOTSUP;
                }
 #endif
+       }
+
+       if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               /* Turn on the copper */
+               ixgbe_set_phy_power(hw, true);
+       } else {
                /* Turn on the laser */
                ixgbe_enable_tx_laser(hw);
-               return 0;
        }
 
-       PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
-                    hw->device_id);
-       return -ENOTSUP;
+       return 0;
 }
 
 /*
- * Set device link down: disable tx laser.
+ * Set device link down: disable tx.
  */
 static int
 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
@@ -1707,14 +2393,17 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
                        return -ENOTSUP;
                }
 #endif
+       }
+
+       if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               /* Turn off the copper */
+               ixgbe_set_phy_power(hw, false);
+       } else {
                /* Turn off the laser */
                ixgbe_disable_tx_laser(hw);
-               return 0;
        }
 
-       PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
-                    hw->device_id);
-       return -ENOTSUP;
+       return 0;
 }
 
 /*
@@ -1733,29 +2422,29 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
        ixgbe_dev_stop(dev);
        hw->adapter_stopped = 1;
 
+       ixgbe_dev_free_queues(dev);
+
        ixgbe_disable_pcie_master(hw);
 
        /* reprogram the RAR[0] in case user changed it. */
        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
 }
 
-/*
- * This function is based on ixgbe_update_stats_counters() in base/ixgbe.c
- */
 static void
-ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+ixgbe_read_stats_registers(struct ixgbe_hw *hw,
+                          struct ixgbe_hw_stats *hw_stats,
+                          uint64_t *total_missed_rx, uint64_t *total_qbrc,
+                          uint64_t *total_qprc, uint64_t *total_qprdc)
 {
-       struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_hw_stats *hw_stats =
-                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
        uint32_t bprc, lxon, lxoff, total;
-       uint64_t total_missed_rx, total_qbrc, total_qprc;
+       uint32_t delta_gprc = 0;
        unsigned i;
-
-       total_missed_rx = 0;
-       total_qbrc = 0;
-       total_qprc = 0;
+       /* Workaround for RX byte count not including CRC bytes when CRC
++       * strip is enabled. CRC bytes are removed from counters when crc_strip
+        * is disabled.
++       */
+       int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
+                       IXGBE_HLREG0_RXCRCSTRP);
 
        hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
@@ -1768,41 +2457,62 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                /* global total per queue */
                hw_stats->mpc[i] += mp;
                /* Running comprehensive total for stats display */
-               total_missed_rx += hw_stats->mpc[i];
-               if (hw->mac.type == ixgbe_mac_82598EB)
+               *total_missed_rx += hw_stats->mpc[i];
+               if (hw->mac.type == ixgbe_mac_82598EB) {
                        hw_stats->rnbc[i] +=
                            IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+                       hw_stats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+                       hw_stats->pxoffrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               } else {
+                       hw_stats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       hw_stats->pxoffrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+                       hw_stats->pxon2offc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+               }
                hw_stats->pxontxc[i] +=
                    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-               hw_stats->pxonrxc[i] +=
-                   IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
                hw_stats->pxofftxc[i] +=
                    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
-               hw_stats->pxoffrxc[i] +=
-                   IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
-               hw_stats->pxon2offc[i] +=
-                   IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
        }
        for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
-               hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
-               hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               uint32_t delta_qprc = IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               uint32_t delta_qptc = IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               uint32_t delta_qprdc = IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+               delta_gprc += delta_qprc;
+
+               hw_stats->qprc[i] += delta_qprc;
+               hw_stats->qptc[i] += delta_qptc;
+
                hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
                hw_stats->qbrc[i] +=
                    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+               if (crc_strip == 0)
+                       hw_stats->qbrc[i] -= delta_qprc * ETHER_CRC_LEN;
+
                hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
                hw_stats->qbtc[i] +=
                    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
-               hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 
-               total_qprc += hw_stats->qprc[i];
-               total_qbrc += hw_stats->qbrc[i];
+               hw_stats->qprdc[i] += delta_qprdc;
+               *total_qprdc += hw_stats->qprdc[i];
+
+               *total_qprc += hw_stats->qprc[i];
+               *total_qbrc += hw_stats->qbrc[i];
        }
        hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
        hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
        hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
 
-       /* Note that gprc counts missed packets */
-       hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       /*
+        * An errata states that gprc actually counts good + missed packets:
+        * Workaround to set gprc to summated queue packet receives
+        */
+       hw_stats->gprc = *total_qprc;
 
        if (hw->mac.type != ixgbe_mac_82598EB) {
                hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
@@ -1821,6 +2531,18 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
                hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        }
+       uint64_t old_tpr = hw_stats->tpr;
+
+       hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+       hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+
+       if (crc_strip == 0)
+               hw_stats->gorc -= delta_gprc * ETHER_CRC_LEN;
+
+       uint64_t delta_gptc = IXGBE_READ_REG(hw, IXGBE_GPTC);
+       hw_stats->gptc += delta_gptc;
+       hw_stats->gotc -= delta_gptc * ETHER_CRC_LEN;
+       hw_stats->tor -= (hw_stats->tpr - old_tpr) * ETHER_CRC_LEN;
 
        /*
         * Workaround: mprc hardware is incorrectly counting
@@ -1845,7 +2567,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        hw_stats->lxofftxc += lxoff;
        total = lxon + lxoff;
 
-       hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
        hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
        hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
        hw_stats->gptc -= total;
@@ -1860,8 +2581,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
        hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
        hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
-       hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
-       hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
        hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
        hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
        hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
@@ -1880,6 +2599,32 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
                hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
        }
 
+       /* Flow Director Stats registers */
+       hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+       hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+}
+
+/*
+ * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
+ */
+static void
+ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       struct ixgbe_hw *hw =
+                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw_stats *hw_stats =
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+       unsigned i;
+
+       total_missed_rx = 0;
+       total_qbrc = 0;
+       total_qprc = 0;
+       total_qprdc = 0;
+
+       ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
+                       &total_qprc, &total_qprdc);
+
        if (stats == NULL)
                return;
 
@@ -1888,7 +2633,6 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->ibytes = total_qbrc;
        stats->opackets = hw_stats->gptc;
        stats->obytes = hw_stats->gotc;
-       stats->imcasts = hw_stats->mprc;
 
        for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) {
                stats->q_ipackets[i] = hw_stats->qprc[i];
@@ -1899,28 +2643,20 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->ibadcrc  = hw_stats->crcerrs;
-       stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
        stats->imissed  = total_missed_rx;
-       stats->ierrors  = stats->ibadcrc +
-                         stats->ibadlen +
-                         stats->imissed +
-                         hw_stats->illerrc + hw_stats->errbc;
+       stats->ierrors  = hw_stats->crcerrs +
+                         hw_stats->mspdc +
+                         hw_stats->rlec +
+                         hw_stats->ruc +
+                         hw_stats->roc +
+                         hw_stats->illerrc +
+                         hw_stats->errbc +
+                         hw_stats->rfc +
+                         hw_stats->fccrc +
+                         hw_stats->fclast;
 
        /* Tx Errors */
        stats->oerrors  = 0;
-
-       /* XON/XOFF pause frames */
-       stats->tx_pause_xon  = hw_stats->lxontxc;
-       stats->rx_pause_xon  = hw_stats->lxonrxc;
-       stats->tx_pause_xoff = hw_stats->lxofftxc;
-       stats->rx_pause_xoff = hw_stats->lxoffrxc;
-
-       /* Flow Director Stats registers */
-       hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
-       hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-       stats->fdirmatch = hw_stats->fdirmatch;
-       stats->fdirmiss = hw_stats->fdirmiss;
 }
 
 static void
@@ -1936,10 +2672,101 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
        memset(stats, 0, sizeof(*stats));
 }
 
-static void
-ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+/* This function calculates the number of xstats based on the current config */
+static unsigned
+ixgbe_xstats_calc_num(void) {
+       return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
+               (IXGBE_NB_TXQ_PRIO_STATS * 8);
+}
+
+static int
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+                                        unsigned n)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw =
+                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw_stats *hw_stats =
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+       unsigned i, stat, count = 0;
+
+       count = ixgbe_xstats_calc_num();
+
+       if (n < count)
+               return count;
+
+       total_missed_rx = 0;
+       total_qbrc = 0;
+       total_qprc = 0;
+       total_qprdc = 0;
+
+       ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
+                                  &total_qprc, &total_qprdc);
+
+       /* If this is a reset xstats is NULL, and we have cleared the
+        * registers by reading them.
+        */
+       if (!xstats)
+               return 0;
+
+       /* Extended stats from ixgbe_hw_stats */
+       count = 0;
+       for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+               snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
+                        rte_ixgbe_stats_strings[i].name);
+               xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+                               rte_ixgbe_stats_strings[i].offset);
+               count++;
+       }
+
+       /* RX Priority Stats */
+       for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+               for (i = 0; i < 8; i++) {
+                       snprintf(xstats[count].name, sizeof(xstats[count].name),
+                                "rx_priority%u_%s", i,
+                                rte_ixgbe_rxq_strings[stat].name);
+                       xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+                                       rte_ixgbe_rxq_strings[stat].offset +
+                                       (sizeof(uint64_t) * i));
+                       count++;
+               }
+       }
+
+       /* TX Priority Stats */
+       for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+               for (i = 0; i < 8; i++) {
+                       snprintf(xstats[count].name, sizeof(xstats[count].name),
+                                "tx_priority%u_%s", i,
+                                rte_ixgbe_txq_strings[stat].name);
+                       xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
+                                       rte_ixgbe_txq_strings[stat].offset +
+                                       (sizeof(uint64_t) * i));
+                       count++;
+               }
+       }
+
+       return count;
+}
+
+static void
+ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw_stats *stats =
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       unsigned count = ixgbe_xstats_calc_num();
+
+       /* HW registers are cleared on read */
+       ixgbe_dev_xstats_get(dev, NULL, count);
+
+       /* Reset software totals */
+       memset(stats, 0, sizeof(*stats));
+}
+
+static void
+ixgbevf_update_stats(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
                          IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
 
@@ -1962,6 +2789,42 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        /* Rx Multicst Packet */
        UPDATE_VF_STAT(IXGBE_VFMPRC,
            hw_stats->last_vfmprc, hw_stats->vfmprc);
+}
+
+static int
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+                      unsigned n)
+{
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       unsigned i;
+
+       if (n < IXGBEVF_NB_XSTATS)
+               return IXGBEVF_NB_XSTATS;
+
+       ixgbevf_update_stats(dev);
+
+       if (!xstats)
+               return 0;
+
+       /* Extended stats */
+       for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+               snprintf(xstats[i].name, sizeof(xstats[i].name),
+                        "%s", rte_ixgbevf_stats_strings[i].name);
+               xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
+                       rte_ixgbevf_stats_strings[i].offset);
+       }
+
+       return IXGBEVF_NB_XSTATS;
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
+                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       ixgbevf_update_stats(dev);
 
        if (stats == NULL)
                return;
@@ -1971,6 +2834,7 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        stats->opackets = hw_stats->vfgptc;
        stats->obytes = hw_stats->vfgotc;
        stats->imcasts = hw_stats->vfmprc;
+       /* stats->imcasts should be removed as imcasts is deprecated */
 }
 
 static void
@@ -2023,6 +2887,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
            !RTE_ETH_DEV_SRIOV(dev).active)
                dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
 
+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x)
+               dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_VLAN_INSERT |
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
@@ -2031,6 +2899,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_SCTP_CKSUM  |
                DEV_TX_OFFLOAD_TCP_TSO;
 
+       if (hw->mac.type == ixgbe_mac_X550 ||
+           hw->mac.type == ixgbe_mac_X550EM_x)
+               dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
                        .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
@@ -2052,10 +2924,48 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
-       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
+
+       dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+       dev_info->reta_size = ixgbe_reta_size_get(hw->mac.type);
        dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
 }
 
+static const uint32_t *
+ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* For non-vec functions,
+                * refers to ixgbe_rxd_pkt_info_to_pkt_type();
+                * for vec functions,
+                * refers to _recv_raw_pkts_vec().
+                */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV4_EXT,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L3_IPV6,
+               RTE_PTYPE_INNER_L3_IPV6_EXT,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+               return ptypes;
+       return NULL;
+}
+
 static void
 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                     struct rte_eth_dev_info *dev_info)
@@ -2081,7 +2991,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                                DEV_TX_OFFLOAD_UDP_CKSUM   |
                                DEV_TX_OFFLOAD_TCP_CKSUM   |
-                               DEV_TX_OFFLOAD_SCTP_CKSUM;
+                               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+                               DEV_TX_OFFLOAD_TCP_TSO;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -2104,6 +3015,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
                                ETH_TXQ_FLAGS_NOOFFLOADS,
        };
+
+       dev_info->rx_desc_lim = rx_desc_lim;
+       dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -2122,11 +3036,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        memset(&old, 0, sizeof(old));
        rte_ixgbe_dev_atomic_read_link_status(dev, &old);
 
+       hw->mac.get_link_status = true;
+
        /* check if it needs to wait to complete, if lsc interrupt is enabled */
        if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
                diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
        else
                diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+
        if (diag != 0) {
                link.link_speed = ETH_LINK_SPEED_100;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
@@ -2136,12 +3053,6 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                return 0;
        }
 
-       if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
-           !hw->mac.get_link_status) {
-               memcpy(&link, &old, sizeof(link));
-               return -1;
-       }
-
        if (link_up == 0) {
                rte_ixgbe_dev_atomic_write_link_status(dev, &link);
                if (link.link_status == old.link_status)
@@ -2252,6 +3163,28 @@ ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
        return 0;
 }
 
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       intr->mask |= IXGBE_EICR_RTX_QUEUE;
+
+       return 0;
+}
+
 /*
  * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
  *
@@ -2275,17 +3208,22 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 
        /* read-on-clear nic registers here */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
-       PMD_DRV_LOG(INFO, "eicr %x", eicr);
+       PMD_DRV_LOG(DEBUG, "eicr %x", eicr);
 
        intr->flags = 0;
-       if (eicr & IXGBE_EICR_LSC) {
-               /* set flag for async link update */
+
+       /* set flag for async link update */
+       if (eicr & IXGBE_EICR_LSC)
                intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-       }
 
        if (eicr & IXGBE_EICR_MAILBOX)
                intr->flags |= IXGBE_FLAG_MAILBOX;
 
+       if (hw->mac.type ==  ixgbe_mac_X550EM_x &&
+           hw->phy.type == ixgbe_phy_x550em_ext_t &&
+           (eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
+               intr->flags |= IXGBE_FLAG_PHY_INTERRUPT;
+
        return 0;
 }
 
@@ -2316,7 +3254,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
                                (int)(dev->data->port_id));
        }
-       PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+       PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
                                dev->pci_dev->addr.domain,
                                dev->pci_dev->addr.bus,
                                dev->pci_dev->addr.devid,
@@ -2341,6 +3279,8 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        int64_t timeout;
        struct rte_eth_link link;
        int intr_enable_delay = false;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
@@ -2349,6 +3289,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                intr->flags &= ~IXGBE_FLAG_MAILBOX;
        }
 
+       if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+               ixgbe_handle_lasi(hw);
+               intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+       }
+
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                /* get the link status before link update, for predicting later */
                memset(&link, 0, sizeof(link));
@@ -2412,6 +3357,11 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_pf_mbx_process(dev);
 
+       if (intr->flags & IXGBE_FLAG_PHY_INTERRUPT) {
+               ixgbe_handle_lasi(hw);
+               intr->flags &= ~IXGBE_FLAG_PHY_INTERRUPT;
+       }
+
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                ixgbe_dev_link_update(dev, 0);
                intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -2438,9 +3388,10 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
  */
 static void
 ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
-                                                       void *param)
+                           void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
        ixgbe_dev_interrupt_get_status(dev);
        ixgbe_dev_interrupt_action(dev);
 }
@@ -2451,7 +3402,7 @@ ixgbe_dev_led_on(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+       return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
@@ -2460,7 +3411,7 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+       return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
@@ -2530,8 +3481,6 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
-               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
        PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
@@ -2544,7 +3493,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                (fc_conf->high_water < fc_conf->low_water)) {
                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
@@ -2552,6 +3501,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        hw->fc.high_water[0]  = fc_conf->high_water;
        hw->fc.low_water[0]   = fc_conf->low_water;
        hw->fc.send_xon       = fc_conf->send_xon;
+       hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
 
        err = ixgbe_fc_enable(hw);
 
@@ -2604,13 +3554,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        /* Low water mark of zero causes XOFF floods */
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
-               if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
+               if ((!hw->fc.high_water[tc_num]) || (!hw->fc.low_water[tc_num])) {
                        PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
 
-               if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
+               if (hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
                        PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
@@ -2724,7 +3674,7 @@ ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
 
-       if(hw->mac.type != ixgbe_mac_82598EB) {
+       if (hw->mac.type != ixgbe_mac_82598EB) {
                ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
        }
        return ret_val;
@@ -2765,7 +3715,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
            (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
@@ -2789,16 +3739,26 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                          struct rte_eth_rss_reta_entry64 *reta_conf,
                          uint16_t reta_size)
 {
-       uint8_t i, j, mask;
+       uint16_t i, sp_reta_size;
+       uint8_t j, mask;
        uint32_t reta, r;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+
+       if (!ixgbe_rss_update_sp(hw->mac.type)) {
+               PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+                       "NIC.");
+               return -ENOTSUP;
+       }
+
+       sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+       if (reta_size != sp_reta_size) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)\n", reta_size, sp_reta_size);
                return -EINVAL;
        }
 
@@ -2809,10 +3769,11 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                                                IXGBE_4_BIT_MASK);
                if (!mask)
                        continue;
+               reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
                if (mask == IXGBE_4_BIT_MASK)
                        r = 0;
                else
-                       r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+                       r = IXGBE_READ_REG(hw, reta_reg);
                for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
                                reta |= reta_conf[idx].reta[shift + j] <<
@@ -2821,7 +3782,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                                reta |= r & (IXGBE_8_BIT_MASK <<
                                                (CHAR_BIT * j));
                }
-               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+               IXGBE_WRITE_REG(hw, reta_reg, reta);
        }
 
        return 0;
@@ -2832,20 +3793,23 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
 {
-       uint8_t i, j, mask;
+       uint16_t i, sp_reta_size;
+       uint8_t j, mask;
        uint32_t reta;
        uint16_t idx, shift;
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t reta_reg;
 
        PMD_INIT_FUNC_TRACE();
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
+       if (reta_size != sp_reta_size) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                               "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)\n", reta_size, sp_reta_size);
                return -EINVAL;
        }
 
-       for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+       for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
                idx = i / RTE_RETA_GROUP_SIZE;
                shift = i % RTE_RETA_GROUP_SIZE;
                mask = (uint8_t)((reta_conf[idx].mask >> shift) &
@@ -2853,7 +3817,8 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
                if (!mask)
                        continue;
 
-               reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
+               reta = IXGBE_READ_REG(hw, reta_reg);
                for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
                        if (mask & (0x1 << j))
                                reta_conf[idx].reta[shift + j] =
@@ -2883,6 +3848,14 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static void
+ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+       ixgbe_remove_rar(dev, 0);
+
+       ixgbe_add_rar(dev, addr, 0, 0);
+}
+
 static int
 ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -2943,10 +3916,25 @@ ixgbevf_intr_disable(struct ixgbe_hw *hw)
        IXGBE_WRITE_FLUSH(hw);
 }
 
+static void
+ixgbevf_intr_enable(struct ixgbe_hw *hw)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       /* VF enable interrupt autoclean */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_VF_IRQ_ENABLE_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, IXGBE_VF_IRQ_ENABLE_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
 static int
 ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
 
        PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
                     dev->data->port_id);
@@ -2957,16 +3945,23 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
+               PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 1;
        }
 #else
        if (conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
+               PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 0;
        }
 #endif
 
+       /*
+        * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+        * allocation or vector Rx preconditions we will reset it.
+        */
+       adapter->rx_bulk_alloc_allowed = true;
+       adapter->rx_vec_allowed = true;
+
        return 0;
 }
 
@@ -2975,6 +3970,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t intr_vector = 0;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+
        int err, mask = 0;
 
        PMD_INIT_FUNC_TRACE();
@@ -3005,6 +4003,30 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 
        ixgbevf_dev_rxtx_start(dev);
 
+       /* check and configure queue intr-vector mapping */
+       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+               intr_vector = dev->data->nb_rx_queues;
+               if (rte_intr_efd_enable(intr_handle, intr_vector))
+                       return -1;
+       }
+
+       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+               intr_handle->intr_vec =
+                       rte_zmalloc("intr_vec",
+                                   dev->data->nb_rx_queues * sizeof(int), 0);
+               if (intr_handle->intr_vec == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+                                    " intr_vec\n", dev->data->nb_rx_queues);
+                       return -ENOMEM;
+               }
+       }
+       ixgbevf_configure_msix(dev);
+
+       rte_intr_enable(intr_handle);
+
+       /* Re-enable interrupt for VF */
+       ixgbevf_intr_enable(hw);
+
        return 0;
 }
 
@@ -3012,10 +4034,11 @@ static void
 ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
 
        PMD_INIT_FUNC_TRACE();
 
-       hw->adapter_stopped = TRUE;
+       hw->adapter_stopped = 1;
        ixgbe_stop_adapter(hw);
 
        /*
@@ -3028,6 +4051,13 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
        dev->data->scattered_rx = 0;
 
        ixgbe_dev_clear_queues(dev);
+
+       /* Clean datapath event and queue/vec mapping */
+       rte_intr_efd_disable(intr_handle);
+       if (intr_handle->intr_vec != NULL) {
+               rte_free(intr_handle->intr_vec);
+               intr_handle->intr_vec = NULL;
+       }
 }
 
 static void
@@ -3041,8 +4071,14 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 
        ixgbevf_dev_stop(dev);
 
-       /* reprogram the RAR[0] in case user changed it. */
-       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+       ixgbe_dev_free_queues(dev);
+
+       /**
+        * Remove the VF MAC address ro ensure
+        * that the VF traffic goes to the PF
+        * after stop, close and detach of the VF
+        **/
+       ixgbevf_remove_mac_addr(dev, 0);
 }
 
 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
@@ -3054,10 +4090,10 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 
        for (i = 0; i < IXGBE_VFTA_SIZE; i++){
                vfta = shadow_vfta->vfta[i];
-               if(vfta){
+               if (vfta) {
                        mask = 1;
                        for (j = 0; j < 32; j++){
-                               if(vfta & mask)
+                               if (vfta & mask)
                                        ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
                                mask<<=1;
                        }
@@ -3081,7 +4117,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 
        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
        ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
-       if(ret){
+       if (ret) {
                PMD_INIT_LOG(ERR, "Unable to set VF vlan");
                return ret;
        }
@@ -3106,11 +4142,11 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 
        PMD_INIT_FUNC_TRACE();
 
-       if(queue >= hw->mac.max_rx_queues)
+       if (queue >= hw->mac.max_rx_queues)
                return;
 
        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
-       if(on)
+       if (on)
                ctrl |= IXGBE_RXDCTL_VME;
        else
                ctrl &= ~IXGBE_RXDCTL_VME;
@@ -3128,10 +4164,10 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        int on = 0;
 
        /* VF function only support hw strip feature, others are not support */
-       if(mask & ETH_VLAN_STRIP_MASK){
+       if (mask & ETH_VLAN_STRIP_MASK) {
                on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
 
-               for(i=0; i < hw->mac.max_rx_queues; i++)
+               for (i = 0; i < hw->mac.max_rx_queues; i++)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
        }
 }
@@ -3145,7 +4181,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
                PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
-               return (-1);
+               return -1;
        }
 
        return 0;
@@ -3202,14 +4238,14 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
 
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        vector = ixgbe_uta_vector(hw,mac_addr);
        uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
        uta_shift = vector & ixgbe_uta_bit_mask;
 
        rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
-       if(rc == on)
+       if (rc == on)
                return 0;
 
        reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
@@ -3245,9 +4281,9 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
 
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
-       if(on) {
+       if (on) {
                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
                        IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
@@ -3294,10 +4330,10 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
                             " on 82599 hardware and newer");
-               return (-ENOTSUP);
+               return -ENOTSUP;
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
 
@@ -3322,7 +4358,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
@@ -3349,7 +4385,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
@@ -3375,7 +4411,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
                if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
                        ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
@@ -3386,9 +4422,17 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
        return ret;
 }
 
+#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+       ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+       ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
 static int
 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-                       struct rte_eth_vmdq_mirror_conf *mirror_conf,
+                       struct rte_eth_mirror_conf *mirror_conf,
                        uint8_t rule_id, uint8_t on)
 {
        uint32_t mr_ctl,vlvf;
@@ -3410,32 +4454,37 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint8_t mirror_type = 0;
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
-       /* Check if vlan mask is valid */
-       if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
-               if (mirror_conf->vlan.vlan_mask == 0)
-                       return (-EINVAL);
+       if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+               return -EINVAL;
+
+       if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+               PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+                       mirror_conf->rule_type);
+               return -EINVAL;
        }
 
-       /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
-       if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+       if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+               mirror_type |= IXGBE_MRCTL_VLME;
+               /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
                for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
                        if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
                                /* search vlan id related pool vlan filter index */
                                reg_index = ixgbe_find_vlvf_slot(hw,
                                                mirror_conf->vlan.vlan_id[i]);
-                               if(reg_index < 0)
-                                       return (-EINVAL);
+                               if (reg_index < 0)
+                                       return -EINVAL;
                                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
                                if ((vlvf & IXGBE_VLVF_VIEN) &&
-                                       ((vlvf & IXGBE_VLVF_VLANID_MASK)
-                                               == mirror_conf->vlan.vlan_id[i]))
+                                   ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
+                                     mirror_conf->vlan.vlan_id[i]))
                                        vlan_mask |= (1ULL << reg_index);
                                else
-                                       return (-EINVAL);
+                                       return -EINVAL;
                        }
                }
 
@@ -3445,8 +4494,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 
                        mr_info->mr_conf[rule_id].vlan.vlan_mask =
                                                mirror_conf->vlan.vlan_mask;
-                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
-                               if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+                               if (mirror_conf->vlan.vlan_mask & (1ULL << i))
                                        mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
                                                mirror_conf->vlan.vlan_id[i];
                        }
@@ -3454,7 +4503,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        mv_lsb = 0;
                        mv_msb = 0;
                        mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-                       for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
                                mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
                }
        }
@@ -3463,7 +4512,8 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
         * if enable pool mirror, write related pool mask register,if disable
         * pool mirror, clear PFMRVM register
         */
-       if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+       if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+               mirror_type |= IXGBE_MRCTL_VPME;
                if (on) {
                        mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
                        mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
@@ -3476,31 +4526,35 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                        mr_info->mr_conf[rule_id].pool_mask = 0;
                }
        }
+       if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+               mirror_type |= IXGBE_MRCTL_UPME;
+       if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+               mirror_type |= IXGBE_MRCTL_DPME;
 
        /* read  mirror control register and recalculate it */
-       mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+       mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
 
        if (on) {
-               mr_ctl |= mirror_conf->rule_type_mask;
+               mr_ctl |= mirror_type;
                mr_ctl &= mirror_rule_mask;
                mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
        } else
-               mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+               mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
 
-       mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+       mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
        mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
 
        /* write mirrror control  register */
        IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
-        /* write pool mirrror control  register */
-       if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+       /* write pool mirrror control  register */
+       if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
                                mp_msb);
        }
        /* write VLAN mirrror control  register */
-       if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+       if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
                IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
                                mv_msb);
@@ -3523,10 +4577,10 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
                (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        memset(&mr_info->mr_conf[rule_id], 0,
-               sizeof(struct rte_eth_vmdq_mirror_conf));
+               sizeof(struct rte_eth_mirror_conf));
 
        /* clear PFVMCTL register */
        IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
@@ -3542,93 +4596,352 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
        return 0;
 }
 
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
-       uint16_t queue_idx, uint16_t tx_rate)
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t rf_dec, rf_int;
-       uint32_t bcnrc_val;
-       uint16_t link_speed = dev->data->dev_link.link_speed;
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       if (queue_idx >= hw->mac.max_tx_queues)
-               return -EINVAL;
+       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+       mask |= (1 << IXGBE_MISC_VEC_ID);
+       RTE_SET_USED(queue_id);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
-       if (tx_rate != 0) {
-               /* Calculate the rate factor values to set */
-               rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
-               rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
-               rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+       rte_intr_enable(&dev->pci_dev->intr_handle);
 
-               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
-               bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
-                               IXGBE_RTTBCNRC_RF_INT_MASK_M);
-               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
-       } else {
-               bcnrc_val = 0;
-       }
+       return 0;
+}
 
-       /*
-        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
-        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
-        * set as 0x4.
-        */
-       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
-                               IXGBE_MAX_JUMBO_FRAME_SIZE))
-               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
-                       IXGBE_MMW_SIZE_JUMBO_FRAME);
-       else
-               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
-                       IXGBE_MMW_SIZE_DEFAULT);
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       /* Set RTTBCNRC of queue X */
-       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
-       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
-       IXGBE_WRITE_FLUSH(hw);
+       mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+       mask &= ~(1 << IXGBE_MISC_VEC_ID);
+       RTE_SET_USED(queue_id);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
 
        return 0;
 }
 
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
-       uint16_t tx_rate, uint64_t q_msk)
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-       uint32_t queue_stride =
-               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
-       uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
-       uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
-       uint16_t total_rate = 0;
-
-       if (queue_end >= hw->mac.max_tx_queues)
-               return -EINVAL;
-
-       if (vfinfo != NULL) {
-               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
-                       if (vf_idx == vf)
-                               continue;
-                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
-                               idx++)
-                               total_rate += vfinfo[vf_idx].tx_rate[idx];
-               }
-       } else
-               return -EINVAL;
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-       /* Store tx_rate for this vf. */
-       for (idx = 0; idx < nb_q_per_pool; idx++) {
-               if (((uint64_t)0x1 << idx) & q_msk) {
-                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
-                               vfinfo[vf].tx_rate[idx] = tx_rate;
-                       total_rate += tx_rate;
-               }
+       if (queue_id < 16) {
+               ixgbe_disable_intr(hw);
+               intr->mask |= (1 << queue_id);
+               ixgbe_enable_intr(dev);
+       } else if (queue_id < 32) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+               mask &= (1 << queue_id);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+       } else if (queue_id < 64) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+               mask &= (1 << (queue_id - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
        }
+       rte_intr_enable(&dev->pci_dev->intr_handle);
 
-       if (total_rate > dev->data->dev_link.link_speed) {
-               /*
-                * Reset stored TX rate of the VF if it causes exceed
-                * link speed.
+       return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       uint32_t mask;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (queue_id < 16) {
+               ixgbe_disable_intr(hw);
+               intr->mask &= ~(1 << queue_id);
+               ixgbe_enable_intr(dev);
+       } else if (queue_id < 32) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+               mask &= ~(1 << queue_id);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+       } else if (queue_id < 64) {
+               mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+               mask &= ~(1 << (queue_id - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+
+       return 0;
+}
+
+static void
+ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                    uint8_t queue, uint8_t msix_vector)
+{
+       uint32_t tmp, idx;
+
+       if (direction == -1) {
+               /* other causes */
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+               tmp &= ~0xFF;
+               tmp |= msix_vector;
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
+       } else {
+               /* rx or tx cause */
+               msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+               idx = ((16 * (queue & 1)) + (8 * direction));
+               tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+               tmp &= ~(0xFF << idx);
+               tmp |= (msix_vector << idx);
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
+       }
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ *  pointer to ixgbe_hw struct
+ * @direction
+ *  0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ *  queue to map the corresponding interrupt to
+ * @msix_vector
+ *  the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+                  uint8_t queue, uint8_t msix_vector)
+{
+       uint32_t tmp, idx;
+
+       msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (direction == -1)
+                       direction = 0;
+               idx = (((direction * 64) + queue) >> 2) & 0x1F;
+               tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
+               tmp &= ~(0xFF << (8 * (queue & 0x3)));
+               tmp |= (msix_vector << (8 * (queue & 0x3)));
+               IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
+       } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
+                       (hw->mac.type == ixgbe_mac_X540)) {
+               if (direction == -1) {
+                       /* other causes */
+                       idx = ((queue & 1) * 8);
+                       tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+                       tmp &= ~(0xFF << idx);
+                       tmp |= (msix_vector << idx);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
+               } else {
+                       /* rx or tx causes */
+                       idx = ((16 * (queue & 1)) + (8 * direction));
+                       tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+                       tmp &= ~(0xFF << idx);
+                       tmp |= (msix_vector << idx);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
+               }
+       }
+}
+
+static void
+ixgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t q_idx;
+       uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+
+       /* won't configure msix register if no mapping is done
+        * between intr vector and event fd.
+        */
+       if (!rte_intr_dp_is_en(intr_handle))
+               return;
+
+       /* Configure all RX queues of VF */
+       for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+               /* Force all queue use vector 0,
+                * as IXGBE_VF_MAXMSIVECOTR = 1
+                */
+               ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+               intr_handle->intr_vec[q_idx] = vector_idx;
+       }
+
+       /* Configure VF other cause ivar */
+       ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ *  board private structure
+ */
+static void
+ixgbe_configure_msix(struct rte_eth_dev *dev)
+{
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
+       uint32_t vec = IXGBE_MISC_VEC_ID;
+       uint32_t mask;
+       uint32_t gpie;
+
+       /* won't configure msix register if no mapping is done
+        * between intr vector and event fd
+        */
+       if (!rte_intr_dp_is_en(intr_handle))
+               return;
+
+       if (rte_intr_allow_others(intr_handle))
+               vec = base = IXGBE_RX_VEC_START;
+
+       /* setup GPIE for MSI-x mode */
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+       gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+               IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
+       /* auto clearing and auto setting corresponding bits in EIMS
+        * when MSI-X interrupt is triggered
+        */
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* Populate the IVAR table and set the ITR values to the
+        * corresponding register.
+        */
+       for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+            queue_id++) {
+               /* by default, 1:1 mapping */
+               ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+               intr_handle->intr_vec[queue_id] = vec;
+               if (vec < base + intr_handle->nb_efd - 1)
+                       vec++;
+       }
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+                                  IXGBE_MISC_VEC_ID);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+               break;
+       default:
+               break;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
+                       IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+
+       /* set up to autoclear timer, and the vectors */
+       mask = IXGBE_EIMS_ENABLE_MASK;
+       mask &= ~(IXGBE_EIMS_OTHER |
+                 IXGBE_EIMS_MAILBOX |
+                 IXGBE_EIMS_LSC);
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+       uint16_t queue_idx, uint16_t tx_rate)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rf_dec, rf_int;
+       uint32_t bcnrc_val;
+       uint16_t link_speed = dev->data->dev_link.link_speed;
+
+       if (queue_idx >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+               rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+               rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+                               IXGBE_RTTBCNRC_RF_INT_MASK_M);
+               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       /*
+        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+        * set as 0x4.
+        */
+       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+                               IXGBE_MAX_JUMBO_FRAME_SIZE))
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_JUMBO_FRAME);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_DEFAULT);
+
+       /* Set RTTBCNRC of queue X */
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       uint32_t queue_stride =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+       uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+       uint16_t total_rate = 0;
+
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo != NULL) {
+               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else
+               return -EINVAL;
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /*
+                * Reset stored TX rate of the VF if it causes exceed
+                * link speed.
                 */
                memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
                return -EINVAL;
@@ -3713,6 +5026,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
        }
 }
 
+static void
+ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+}
+
 #define MAC_TYPE_FILTER_SUP(type)    do {\
        if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
                (type) != ixgbe_mac_X550)\
@@ -4430,6 +5751,9 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
        case RTE_ETH_FILTER_FDIR:
                ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
                break;
+       case RTE_ETH_FILTER_L2_TUNNEL:
+               ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
+               break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
                                                        filter_type);
@@ -4439,6 +5763,1319 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
        return ret;
 }
 
+static u8 *
+ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
+                       u8 **mc_addr_ptr, u32 *vmdq)
+{
+       u8 *mc_addr;
+
+       *vmdq = 0;
+       mc_addr = *mc_addr_ptr;
+       *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
+       return mc_addr;
+}
+
+static int
+ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+                         struct ether_addr *mc_addr_set,
+                         uint32_t nb_mc_addr)
+{
+       struct ixgbe_hw *hw;
+       u8 *mc_addr_list;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mc_addr_list = (u8 *)mc_addr_set;
+       return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+                                        ixgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint64_t systime_cycles;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+               /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+               systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+               systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+                               * NSEC_PER_SEC;
+               break;
+       default:
+               systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+               systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+                               << 32;
+       }
+
+       return systime_cycles;
+}
+
+static uint64_t
+ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint64_t rx_tstamp_cycles;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+               /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+               rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+               rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+                               * NSEC_PER_SEC;
+               break;
+       default:
+               /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+               rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+               rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+                               << 32;
+       }
+
+       return rx_tstamp_cycles;
+}
+
+static uint64_t
+ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint64_t tx_tstamp_cycles;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+               /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+               tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+               tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+                               * NSEC_PER_SEC;
+               break;
+       default:
+               /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+               tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+               tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+                               << 32;
+       }
+
+       return tx_tstamp_cycles;
+}
+
+static void
+ixgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
+       struct rte_eth_link link;
+       uint32_t incval = 0;
+       uint32_t shift = 0;
+
+       /* Get current link speed. */
+       memset(&link, 0, sizeof(link));
+       ixgbe_dev_link_update(dev, 1);
+       rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+       switch (link.link_speed) {
+       case ETH_LINK_SPEED_100:
+               incval = IXGBE_INCVAL_100;
+               shift = IXGBE_INCVAL_SHIFT_100;
+               break;
+       case ETH_LINK_SPEED_1000:
+               incval = IXGBE_INCVAL_1GB;
+               shift = IXGBE_INCVAL_SHIFT_1GB;
+               break;
+       case ETH_LINK_SPEED_10000:
+       default:
+               incval = IXGBE_INCVAL_10GB;
+               shift = IXGBE_INCVAL_SHIFT_10GB;
+               break;
+       }
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_X550:
+               /* Independent of link speed. */
+               incval = 1;
+               /* Cycles read will be interpreted as ns. */
+               shift = 0;
+               /* Fall-through */
+       case ixgbe_mac_X540:
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+               break;
+       case ixgbe_mac_82599EB:
+               incval >>= IXGBE_INCVAL_SHIFT_82599;
+               shift -= IXGBE_INCVAL_SHIFT_82599;
+               IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+                               (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+               break;
+       default:
+               /* Not supported. */
+               return;
+       }
+
+       memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+       adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+       adapter->systime_tc.cc_shift = shift;
+       adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+       adapter->rx_tstamp_tc.cc_shift = shift;
+       adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+       adapter->tx_tstamp_tc.cc_shift = shift;
+       adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
+
+       adapter->systime_tc.nsec += delta;
+       adapter->rx_tstamp_tc.nsec += delta;
+       adapter->tx_tstamp_tc.nsec += delta;
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+       uint64_t ns;
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
+
+       ns = rte_timespec_to_ns(ts);
+       /* Set the timecounters to a new value. */
+       adapter->systime_tc.nsec = ns;
+       adapter->rx_tstamp_tc.nsec = ns;
+       adapter->tx_tstamp_tc.nsec = ns;
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+       uint64_t ns, systime_cycles;
+       struct ixgbe_adapter *adapter =
+                       (struct ixgbe_adapter *)dev->data->dev_private;
+
+       systime_cycles = ixgbe_read_systime_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+       *ts = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t tsync_ctl;
+       uint32_t tsauxc;
+
+       /* Stop the timesync system time. */
+       IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
+       /* Reset the timesync system time value. */
+       IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
+       IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
+
+       /* Enable system time for platforms where it isn't on by default. */
+       tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+       tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
+       IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+       ixgbe_start_timecounters(dev);
+
+       /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
+                       (ETHER_TYPE_1588 |
+                        IXGBE_ETQF_FILTER_EN |
+                        IXGBE_ETQF_1588));
+
+       /* Enable timestamping of received PTP packets. */
+       tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+       tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
+       IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+       /* Enable timestamping of transmitted PTP packets. */
+       tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+       tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
+       IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t tsync_ctl;
+
+       /* Disable timestamping of transmitted PTP packets. */
+       tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+       tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
+       IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+       /* Disable timestamping of received PTP packets. */
+       tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+       tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
+       IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+       /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
+
+       /* Stop incrementating the System Time registers. */
+       IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
+
+       return 0;
+}
+
+static int
+ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                                struct timespec *timestamp,
+                                uint32_t flags __rte_unused)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
+       uint32_t tsync_rxctl;
+       uint64_t rx_tstamp_cycles;
+       uint64_t ns;
+
+       tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+       if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
+               return -EINVAL;
+
+       rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return  0;
+}
+
+static int
+ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                                struct timespec *timestamp)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_adapter *adapter =
+               (struct ixgbe_adapter *)dev->data->dev_private;
+       uint32_t tsync_txctl;
+       uint64_t tx_tstamp_cycles;
+       uint64_t ns;
+
+       tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+       if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
+               return -EINVAL;
+
+       tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+ixgbe_get_reg_length(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int count = 0;
+       int g_ind = 0;
+       const struct reg_info *reg_group;
+       const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+                                   ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+       while ((reg_group = reg_set[g_ind++]))
+               count += ixgbe_regs_group_count(reg_group);
+
+       return count;
+}
+
+static int
+ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+       int count = 0;
+       int g_ind = 0;
+       const struct reg_info *reg_group;
+
+       while ((reg_group = ixgbevf_regs[g_ind++]))
+               count += ixgbe_regs_group_count(reg_group);
+
+       return count;
+}
+
+static int
+ixgbe_get_regs(struct rte_eth_dev *dev,
+             struct rte_dev_reg_info *regs)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t *data = regs->data;
+       int g_ind = 0;
+       int count = 0;
+       const struct reg_info *reg_group;
+       const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+                                   ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+       /* Support only full register dump */
+       if ((regs->length == 0) ||
+           (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
+               regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+                       hw->device_id;
+               while ((reg_group = reg_set[g_ind++]))
+                       count += ixgbe_read_regs_group(dev, &data[count],
+                               reg_group);
+               return 0;
+       }
+
+       return -ENOTSUP;
+}
+
+static int
+ixgbevf_get_regs(struct rte_eth_dev *dev,
+               struct rte_dev_reg_info *regs)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t *data = regs->data;
+       int g_ind = 0;
+       int count = 0;
+       const struct reg_info *reg_group;
+
+       /* Support only full register dump */
+       if ((regs->length == 0) ||
+           (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
+               regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+                       hw->device_id;
+               while ((reg_group = ixgbevf_regs[g_ind++]))
+                       count += ixgbe_read_regs_group(dev, &data[count],
+                                                     reg_group);
+               return 0;
+       }
+
+       return -ENOTSUP;
+}
+
+static int
+ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Return unit is byte count */
+       return hw->eeprom.word_size * 2;
+}
+
+static int
+ixgbe_get_eeprom(struct rte_eth_dev *dev,
+               struct rte_dev_eeprom_info *in_eeprom)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       uint16_t *data = in_eeprom->data;
+       int first, length;
+
+       first = in_eeprom->offset >> 1;
+       length = in_eeprom->length >> 1;
+       if ((first > hw->eeprom.word_size) ||
+           ((first + length) > hw->eeprom.word_size))
+               return -EINVAL;
+
+       in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       return eeprom->ops.read_buffer(hw, first, length, data);
+}
+
+static int
+ixgbe_set_eeprom(struct rte_eth_dev *dev,
+               struct rte_dev_eeprom_info *in_eeprom)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       uint16_t *data = in_eeprom->data;
+       int first, length;
+
+       first = in_eeprom->offset >> 1;
+       length = in_eeprom->length >> 1;
+       if ((first > hw->eeprom.word_size) ||
+           ((first + length) > hw->eeprom.word_size))
+               return -EINVAL;
+
+       in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       return eeprom->ops.write_buffer(hw,  first, length, data);
+}
+
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+       switch (mac_type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               return ETH_RSS_RETA_SIZE_512;
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
+               return ETH_RSS_RETA_SIZE_64;
+       default:
+               return ETH_RSS_RETA_SIZE_128;
+       }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+       switch (mac_type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       case ixgbe_mac_X550EM_a:
+               if (reta_idx < ETH_RSS_RETA_SIZE_128)
+                       return IXGBE_RETA(reta_idx >> 2);
+               else
+                       return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
+               return IXGBE_VFRETA(reta_idx >> 2);
+       default:
+               return IXGBE_RETA(reta_idx >> 2);
+       }
+}
+
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+       switch (mac_type) {
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
+               return IXGBE_VFMRQC;
+       default:
+               return IXGBE_MRQC;
+       }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+       switch (mac_type) {
+       case ixgbe_mac_X550_vf:
+       case ixgbe_mac_X550EM_x_vf:
+       case ixgbe_mac_X550EM_a_vf:
+               return IXGBE_VFRSSRK(i);
+       default:
+               return IXGBE_RSSRK(i);
+       }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+       switch (mac_type) {
+       case ixgbe_mac_82599_vf:
+       case ixgbe_mac_X540_vf:
+               return 0;
+       default:
+               return 1;
+       }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+                       struct rte_eth_dcb_info *dcb_info)
+{
+       struct ixgbe_dcb_config *dcb_config =
+                       IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+       struct ixgbe_dcb_tc_config *tc;
+       uint8_t i, j;
+
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+               dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+       else
+               dcb_info->nb_tcs = 1;
+
+       if (dcb_config->vt_mode) { /* vt is enabled*/
+               struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+               for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                       for (j = 0; j < dcb_info->nb_tcs; j++) {
+                               dcb_info->tc_queue.tc_rxq[i][j].base =
+                                               i * dcb_info->nb_tcs + j;
+                               dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+                               dcb_info->tc_queue.tc_txq[i][j].base =
+                                               i * dcb_info->nb_tcs + j;
+                               dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+                       }
+               }
+       } else { /* vt is disabled*/
+               struct rte_eth_dcb_rx_conf *rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+               if (dcb_info->nb_tcs == ETH_4_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 80;
+                       dcb_info->tc_queue.tc_txq[0][4].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][5].base = 104;
+                       dcb_info->tc_queue.tc_txq[0][6].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][7].base = 120;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+               }
+       }
+       for (i = 0; i < dcb_info->nb_tcs; i++) {
+               tc = &dcb_config->tc_config[i];
+               dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+       }
+       return 0;
+}
+
+/* Update e-tag ether type */
+static int
+ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
+                           uint16_t ether_type)
+{
+       uint32_t etag_etype;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+       etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
+       etag_etype |= ether_type;
+       IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* Config l2 tunnel ether type */
+static int
+ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
+                                 struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (l2_tunnel == NULL)
+               return -EINVAL;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Enable e-tag tunnel */
+static int
+ixgbe_e_tag_enable(struct ixgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+       etag_etype |= IXGBE_ETAG_ETYPE_VALID;
+       IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* Enable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
+                          enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_enable(hw);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable e-tag tunnel */
+static int
+ixgbe_e_tag_disable(struct ixgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+       etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
+       IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* Disable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
+                           enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_disable(hw);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+       for (i = 1; i < rar_entries; i++) {
+               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+               rar_low  = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+               if ((rar_high & IXGBE_RAH_AV) &&
+                   (rar_high & IXGBE_RAH_ADTYPE) &&
+                   ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
+                    l2_tunnel->tunnel_id)) {
+                       IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+                       IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+
+                       ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
+
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+                      struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       /* One entry for one tunnel. Try to remove potential existing entry. */
+       ixgbe_e_tag_filter_del(dev, l2_tunnel);
+
+       rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+       for (i = 1; i < rar_entries; i++) {
+               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+               if (rar_high & IXGBE_RAH_AV) {
+                       continue;
+               } else {
+                       ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
+                       rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
+                       rar_low = l2_tunnel->tunnel_id;
+
+                       IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
+                       IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
+
+                       return ret;
+               }
+       }
+
+       PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+                    " Please remove a rule before adding a new one.");
+       return -EINVAL;
+}
+
+/* Add l2 tunnel filter */
+static int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+                              struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Delete l2 tunnel filter */
+static int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+                              struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+                                 enum rte_filter_op filter_op,
+                                 void *arg)
+{
+       int ret = 0;
+
+       if (filter_op == RTE_ETH_FILTER_NOP)
+               return 0;
+
+       if (arg == NULL) {
+               PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+                           filter_op);
+               return -EINVAL;
+       }
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_ADD:
+               ret = ixgbe_dev_l2_tunnel_filter_add
+                       (dev,
+                        (struct rte_eth_l2_tunnel_conf *)arg);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               ret = ixgbe_dev_l2_tunnel_filter_del
+                       (dev,
+                        (struct rte_eth_l2_tunnel_conf *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int
+ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+       int ret = 0;
+       uint32_t ctrl;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+       ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+       if (en)
+               ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
+       IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+
+       return ret;
+}
+
+/* Enable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_enable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_disable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
+                            struct rte_eth_l2_tunnel_conf *l2_tunnel,
+                            bool en)
+{
+       int ret = 0;
+       uint32_t vmtir, vmvir;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
+               PMD_DRV_LOG(ERR,
+                           "VF id %u should be less than %u",
+                           l2_tunnel->vf_id,
+                           dev->pci_dev->max_vfs);
+               return -EINVAL;
+       }
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       if (en)
+               vmtir = l2_tunnel->tunnel_id;
+       else
+               vmtir = 0;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
+
+       vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
+       vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
+       if (en)
+               vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
+       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
+
+       return ret;
+}
+
+/* Enable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
+                                    struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_disable
+       (struct rte_eth_dev *dev,
+        struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
+                            bool en)
+{
+       int ret = 0;
+       uint32_t qde;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       qde = IXGBE_READ_REG(hw, IXGBE_QDE);
+       if (en)
+               qde |= IXGBE_QDE_STRIP_TAG;
+       else
+               qde &= ~IXGBE_QDE_STRIP_TAG;
+       qde &= ~IXGBE_QDE_READ;
+       qde |= IXGBE_QDE_WRITE;
+       IXGBE_WRITE_REG(hw, IXGBE_QDE, qde);
+
+       return ret;
+}
+
+/* Enable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_enable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_stripping_en_dis(dev, 1);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Disable l2 tunnel tag stripping */
+static int
+ixgbe_dev_l2_tunnel_stripping_disable
+       (struct rte_eth_dev *dev,
+        enum rte_eth_tunnel_type l2_tunnel_type)
+{
+       int ret = 0;
+
+       switch (l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = ixgbe_e_tag_stripping_en_dis(dev, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Enable/disable l2 tunnel offload functions */
+static int
+ixgbe_dev_l2_tunnel_offload_set
+       (struct rte_eth_dev *dev,
+        struct rte_eth_l2_tunnel_conf *l2_tunnel,
+        uint32_t mask,
+        uint8_t en)
+{
+       int ret = 0;
+
+       if (l2_tunnel == NULL)
+               return -EINVAL;
+
+       ret = -EINVAL;
+       if (mask & ETH_L2_TUNNEL_ENABLE_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_enable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+               else
+                       ret = ixgbe_dev_l2_tunnel_disable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+       }
+
+       if (mask & ETH_L2_TUNNEL_INSERTION_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_insertion_enable(
+                               dev,
+                               l2_tunnel);
+               else
+                       ret = ixgbe_dev_l2_tunnel_insertion_disable(
+                               dev,
+                               l2_tunnel);
+       }
+
+       if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_stripping_enable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+               else
+                       ret = ixgbe_dev_l2_tunnel_stripping_disable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+       }
+
+       if (mask & ETH_L2_TUNNEL_FORWARDING_MASK) {
+               if (en)
+                       ret = ixgbe_dev_l2_tunnel_forwarding_enable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+               else
+                       ret = ixgbe_dev_l2_tunnel_forwarding_disable(
+                               dev,
+                               l2_tunnel->l2_tunnel_type);
+       }
+
+       return ret;
+}
+
+static int
+ixgbe_update_vxlan_port(struct ixgbe_hw *hw,
+                       uint16_t port)
+{
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, port);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/* There's only one register for VxLAN UDP port.
+ * So, we cannot add several ports. Will update it.
+ */
+static int
+ixgbe_add_vxlan_port(struct ixgbe_hw *hw,
+                    uint16_t port)
+{
+       if (port == 0) {
+               PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+               return -EINVAL;
+       }
+
+       return ixgbe_update_vxlan_port(hw, port);
+}
+
+/* We cannot delete the VxLAN port. For there's a register for VxLAN
+ * UDP port, it must have a value.
+ * So, will reset it to the original value 0.
+ */
+static int
+ixgbe_del_vxlan_port(struct ixgbe_hw *hw,
+                    uint16_t port)
+{
+       uint16_t cur_port;
+
+       cur_port = (uint16_t)IXGBE_READ_REG(hw, IXGBE_VXLANCTRL);
+
+       if (cur_port != port) {
+               PMD_DRV_LOG(ERR, "Port %u does not exist.", port);
+               return -EINVAL;
+       }
+
+       return ixgbe_update_vxlan_port(hw, 0);
+}
+
+/* Add UDP tunneling port */
+static int
+ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               ret = ixgbe_add_vxlan_port(hw, udp_tunnel->udp_port);
+               break;
+
+       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_TUNNEL_TYPE_TEREDO:
+               PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+               ret = -EINVAL;
+               break;
+
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       int ret = 0;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_X550 &&
+           hw->mac.type != ixgbe_mac_X550EM_x) {
+               return -ENOTSUP;
+       }
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               ret = ixgbe_del_vxlan_port(hw, udp_tunnel->udp_port);
+               break;
+       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_TUNNEL_TYPE_TEREDO:
+               PMD_DRV_LOG(ERR, "Tunnel type is not supported now.");
+               ret = -EINVAL;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/* ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @netdev: pointer to net device structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ */
+static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+                                    int xcast_mode)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+       s32 err;
+
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_12:
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+       msgbuf[1] = xcast_mode;
+
+       err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
+       if (err)
+               return err;
+
+       err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
+       if (err)
+               return err;
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+       if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+               return -EPERM;
+
+       return 0;
+}
+
+static void
+ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+}
+
+static void
+ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,