tailq: remove unneeded inclusions
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index 01c69e9..06da231 100644 (file)
@@ -1,13 +1,13 @@
 /*-
  *   BSD LICENSE
- * 
+ *
  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
  *   All rights reserved.
- * 
+ *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   are met:
- * 
+ *
  *     * Redistributions of source code must retain the above copyright
  *       notice, this list of conditions and the following disclaimer.
  *     * Redistributions in binary form must reproduce the above copyright
@@ -17,7 +17,7 @@
  *     * Neither the name of Intel Corporation nor the names of its
  *       contributors may be used to endorse or promote products derived
  *       from this software without specific prior written permission.
- * 
+ *
  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -39,6 +39,7 @@
 #include <unistd.h>
 #include <stdarg.h>
 #include <inttypes.h>
+#include <netinet/in.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
 #include <rte_branch_prediction.h>
 #include <rte_memory.h>
 #include <rte_memzone.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_alarm.h>
 #include <rte_ether.h>
 #include <rte_ethdev.h>
 #include <rte_atomic.h>
 #include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
 
 #include "ixgbe_logs.h"
 #include "ixgbe/ixgbe_api.h"
@@ -65,6 +67,7 @@
 #include "ixgbe/ixgbe_common.h"
 #include "ixgbe_ethdev.h"
 #include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
 
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 
+#define IXGBE_MMW_SIZE_DEFAULT        0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
+
+/*
+ *  Default values for RX/TX configuration
+ */
+#define IXGBE_DEFAULT_RX_FREE_THRESH  32
+#define IXGBE_DEFAULT_RX_PTHRESH      8
+#define IXGBE_DEFAULT_RX_HTHRESH      8
+#define IXGBE_DEFAULT_RX_WTHRESH      0
+
+#define IXGBE_DEFAULT_TX_FREE_THRESH  32
+#define IXGBE_DEFAULT_TX_PTHRESH      32
+#define IXGBE_DEFAULT_TX_HTHRESH      0
+#define IXGBE_DEFAULT_TX_WTHRESH      0
+#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
+#define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
+#define IXGBE_8_BIT_WIDTH  CHAR_BIT
+#define IXGBE_8_BIT_MASK   UINT8_MAX
 
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
 
-static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
-               struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
 static void ixgbe_dev_close(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -111,11 +137,15 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t stat_idx,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
-                               struct rte_eth_dev_info *dev_info);
+                              struct rte_eth_dev_info *dev_info);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+                                struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
-static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 
+static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
                int on);
@@ -127,14 +157,18 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
-static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-               struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-               struct rte_eth_rss_reta *reta_conf);
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-               struct rte_eth_rss_reta *reta_conf);    
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -148,8 +182,7 @@ static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
 
 /* For Virtual Function support */
-static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
-               struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
@@ -158,7 +191,7 @@ static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
-static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 
+static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
@@ -169,18 +202,61 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
                ether_addr* mac_addr,uint8_t on);
 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
-static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool, 
+static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
                uint16_t rx_mask, uint8_t on);
 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 
+static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                uint64_t pool_mask,uint8_t vlan_on);
-static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 
-               struct rte_eth_vmdq_mirror_conf *mirror_conf, 
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+               struct rte_eth_vmdq_mirror_conf *mirror_conf,
                uint8_t rule_id, uint8_t on);
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+               uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+               uint16_t tx_rate, uint64_t q_msk);
+
+static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+                                struct ether_addr *mac_addr,
+                                uint32_t index, uint32_t pool);
+static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+                       struct rte_eth_syn_filter *filter,
+                       bool add);
+static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+                       struct rte_eth_syn_filter *filter);
+static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+                       enum rte_filter_op filter_op,
+                       void *arg);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+                       struct ixgbe_5tuple_filter *filter);
+static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       struct ixgbe_5tuple_filter *filter);
+static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ntuple_filter *filter,
+                       bool add);
+static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+                               enum rte_filter_op filter_op,
+                               void *arg);
+static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ntuple_filter *filter);
+static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter,
+                       bool add);
+static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+                               enum rte_filter_op filter_op,
+                               void *arg);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter);
+static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg);
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
@@ -205,13 +281,13 @@ static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] |= 1 << bit;\
        }while(0)
-       
+
 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] &= ~(1 << bit);\
        }while(0)
+
 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
@@ -245,6 +321,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_configure        = ixgbe_dev_configure,
        .dev_start            = ixgbe_dev_start,
        .dev_stop             = ixgbe_dev_stop,
+       .dev_set_link_up    = ixgbe_dev_set_link_up,
+       .dev_set_link_down  = ixgbe_dev_set_link_down,
        .dev_close            = ixgbe_dev_close,
        .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
        .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
@@ -255,10 +333,15 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .stats_reset          = ixgbe_dev_stats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
        .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
+       .rx_queue_start       = ixgbe_dev_rx_queue_start,
+       .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
+       .tx_queue_start       = ixgbe_dev_tx_queue_start,
+       .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
@@ -267,26 +350,21 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
        .dev_led_on           = ixgbe_dev_led_on,
        .dev_led_off          = ixgbe_dev_led_off,
+       .flow_ctrl_get        = ixgbe_flow_ctrl_get,
        .flow_ctrl_set        = ixgbe_flow_ctrl_set,
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
        .mac_addr_remove      = ixgbe_remove_rar,
        .uc_hash_table_set    = ixgbe_uc_hash_table_set,
        .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
-       .mirror_rule_set        = ixgbe_mirror_rule_set,
-       .mirror_rule_reset      = ixgbe_mirror_rule_reset,
+       .mirror_rule_set      = ixgbe_mirror_rule_set,
+       .mirror_rule_reset    = ixgbe_mirror_rule_reset,
        .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
        .set_vf_rx            = ixgbe_set_pool_rx,
        .set_vf_tx            = ixgbe_set_pool_tx,
        .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
-       .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
-       .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
-       .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
-       .fdir_infos_get               = ixgbe_fdir_info_get,
-       .fdir_add_perfect_filter      = ixgbe_fdir_add_perfect_filter,
-       .fdir_update_perfect_filter   = ixgbe_fdir_update_perfect_filter,
-       .fdir_remove_perfect_filter   = ixgbe_fdir_remove_perfect_filter,
-       .fdir_set_masks               = ixgbe_fdir_set_masks,
+       .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+       .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
        .reta_update          = ixgbe_dev_rss_reta_update,
        .reta_query           = ixgbe_dev_rss_reta_query,
 #ifdef RTE_NIC_BYPASS
@@ -300,6 +378,9 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .bypass_ver_show      = ixgbe_bypass_ver_show,
        .bypass_wd_reset      = ixgbe_bypass_wd_reset,
 #endif /* RTE_NIC_BYPASS */
+       .rss_hash_update      = ixgbe_dev_rss_hash_update,
+       .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+       .filter_ctrl          = ixgbe_dev_filter_ctrl,
 };
 
 /*
@@ -315,7 +396,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .stats_get            = ixgbevf_dev_stats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
-       .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_infos_get        = ixgbevf_dev_info_get,
+       .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
@@ -323,6 +405,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
+       .mac_addr_add         = ixgbevf_add_mac_addr,
+       .mac_addr_remove      = ixgbevf_remove_mac_addr,
 };
 
 /**
@@ -418,9 +502,9 @@ ixgbe_enable_intr(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+
        IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
        IXGBE_WRITE_FLUSH(hw);
 }
@@ -479,15 +563,19 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        uint32_t q_map;
        uint8_t n, offset;
 
-       if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
+       if ((hw->mac.type != ixgbe_mac_82599EB) &&
+               (hw->mac.type != ixgbe_mac_X540) &&
+               (hw->mac.type != ixgbe_mac_X550) &&
+               (hw->mac.type != ixgbe_mac_X550EM_x))
                return -ENOSYS;
 
-       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
 
        n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
        if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
-               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
                return -EIO;
        }
        offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
@@ -507,19 +595,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        else
                stat_mappings->rqsmr[n] |= qsmr_mask;
 
-       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
-                    "%s[%d] = 0x%08x\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
-                    is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+                    is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
        /* Now write the mapping in the appropriate register */
        if (is_rx) {
-               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
        }
        else {
-               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
        }
@@ -559,7 +648,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
                tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
                                 (uint8_t)(100/dcb_max_tc + (i & 1));
                tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
-               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
                                 (uint8_t)(100/dcb_max_tc + (i & 1));
                tc->pfc = ixgbe_dcb_pfc_disabled;
        }
@@ -579,30 +668,66 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
        /* support all DCB capabilities in 82599 */
        dcb_config->support.capabilities = 0xFF;
 
-       /*we only support 4 Tcs for X540*/              
-       if (hw->mac.type == ixgbe_mac_X540) {
+       /*we only support 4 Tcs for X540, X550 */
+       if (hw->mac.type == ixgbe_mac_X540 ||
+               hw->mac.type == ixgbe_mac_X550 ||
+               hw->mac.type == ixgbe_mac_X550EM_x) {
                dcb_config->num_tcs.pg_tcs = 4;
                dcb_config->num_tcs.pfc_tcs = 4;
        }
-} 
+}
+
+/*
+ * Ensure that all locks are released before first NVM or PHY access
+ */
+static void
+ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
+{
+       uint16_t mask;
+
+       /*
+        * Phy lock should not fail in this early stage. If this is the case,
+        * it is due to an improper exit of the application.
+        * So force the release of the faulty lock. Release of common lock
+        * is done automatically by swfw_sync function.
+        */
+       mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
+       if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+               PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
+       }
+       ixgbe_release_swfw_semaphore(hw, mask);
+
+       /*
+        * These ones are more tricky since they are common to all ports; but
+        * swfw_sync retries last long enough (1s) to be almost sure that if
+        * lock can not be taken it is due to an improper lock of the
+        * semaphore.
+        */
+       mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
+       if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
+               PMD_DRV_LOG(DEBUG, "SWFW common locks released");
+       }
+       ixgbe_release_swfw_semaphore(hw, mask);
+}
 
 /*
  * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
  * It returns 0 on success.
  */
 static int
-eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
-                    struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct rte_pci_device *pci_dev;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct ixgbe_vfta * shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-       struct ixgbe_hwstrip *hwstrip = 
+       struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
        struct ixgbe_dcb_config *dcb_config =
                IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
        uint32_t ctrl_ext;
        uint16_t csum;
        int diag, i;
@@ -613,10 +738,24 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
        eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
 
-       /* for secondary processes, we don't initialise any further as primary
+       /*
+        * For secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
-        * RX function */
+        * RX and TX function.
+        */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+               struct igb_tx_queue *txq;
+               /* TX queue function in primary, set by last queue initialized
+                * Tx queue may not initialized by primary process */
+               if (eth_dev->data->tx_queues) {
+                       txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+                       set_tx_function(eth_dev, txq);
+               } else {
+                       /* Use default TX function if we get here */
+                       PMD_INIT_LOG(INFO, "No TX queues configured yet. "
+                                          "Using default TX function.");
+               }
+
                if (eth_dev->data->scattered_rx)
                        eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                return 0;
@@ -627,11 +766,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP
        hw->allow_unsupported_sfp = 1;
-#endif
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
 #ifdef RTE_NIC_BYPASS
        diag = ixgbe_bypass_init_shared_code(hw);
 #else
@@ -643,6 +780,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                return -EIO;
        }
 
+       /* pick up the PCI bus settings for reporting later */
+       ixgbe_get_bus_info(hw);
+
+       /* Unlock any pending hardware semaphore */
+       ixgbe_swfw_lock_reset(hw);
+
        /* Initialize DCB configuration*/
        memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
        ixgbe_dcb_init(hw,dcb_config);
@@ -687,11 +830,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        if (diag == IXGBE_ERR_EEPROM_VERSION) {
                PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
                    "LOM.  Please be aware there may be issues associated "
-                   "with your hardware.\n If you are experiencing problems "
+                   "with your hardware.");
+               PMD_INIT_LOG(ERR, "If you are experiencing problems "
                    "please contact your Intel or hardware representative "
-                   "who provided you with this hardware.\n");
+                   "who provided you with this hardware.");
        } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
        if (diag) {
                PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
                return -EIO;
@@ -700,9 +844,6 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* disable interrupt */
        ixgbe_disable_intr(hw);
 
-       /* pick up the PCI bus settings for reporting later */
-       ixgbe_get_bus_info(hw);
-
        /* reset mappings for queue statistics hw counters*/
        ixgbe_reset_qstat_mappings(hw);
 
@@ -711,14 +852,15 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        hw->mac.num_rar_entries, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
-                       "Failed to allocate %d bytes needed to store MAC addresses",
+                       "Failed to allocate %u bytes needed to store "
+                       "MAC addresses",
                        ETHER_ADDR_LEN * hw->mac.num_rar_entries);
                return -ENOMEM;
        }
        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
                        &eth_dev->data->mac_addrs[0]);
-       
+
        /* Allocate memory for storing hash filter MAC addresses */
        eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
                        IXGBE_VMDQ_NUM_UC_MAC, 0);
@@ -747,12 +889,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        IXGBE_WRITE_FLUSH(hw);
 
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-               PMD_INIT_LOG(DEBUG,
-                            "MAC: %d, PHY: %d, SFP+: %d<n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
                             (int) hw->mac.type, (int) hw->phy.type,
                             (int) hw->phy.sfp_type);
        else
-               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
                             (int) hw->mac.type, (int) hw->phy.type);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -768,25 +909,74 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* enable support intr */
        ixgbe_enable_intr(eth_dev);
 
+       /* initialize 5tuple filter list */
+       TAILQ_INIT(&filter_info->fivetuple_list);
+       memset(filter_info->fivetuple_mask, 0,
+               sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
        return 0;
 }
 
+
+/*
+ * Negotiate mailbox API version with the PF.
+ * After reset API version is always set to the basic one (ixgbe_mbox_api_10).
+ * Then we try to negotiate starting with the most recent one.
+ * If all negotiation attempts fail, then we will proceed with
+ * the default one (ixgbe_mbox_api_10).
+ */
+static void
+ixgbevf_negotiate_api(struct ixgbe_hw *hw)
+{
+       int32_t i;
+
+       /* start with highest supported, proceed down */
+       static const enum ixgbe_pfvf_api_rev sup_ver[] = {
+               ixgbe_mbox_api_11,
+               ixgbe_mbox_api_10,
+       };
+
+       for (i = 0;
+                       i != RTE_DIM(sup_ver) &&
+                       ixgbevf_negotiate_api_version(hw, sup_ver[i]) != 0;
+                       i++)
+               ;
+}
+
+static void
+generate_random_mac_addr(struct ether_addr *mac_addr)
+{
+       uint64_t random;
+
+       /* Set Organizationally Unique Identifier (OUI) prefix. */
+       mac_addr->addr_bytes[0] = 0x00;
+       mac_addr->addr_bytes[1] = 0x09;
+       mac_addr->addr_bytes[2] = 0xC0;
+       /* Force indication of locally assigned MAC address. */
+       mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+       /* Generate the last 3 bytes of the MAC address with a random number. */
+       random = rte_rand();
+       memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
 /*
  * Virtual Function device init
  */
 static int
-eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
-                    struct rte_eth_dev *eth_dev)
+eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
 {
-       struct rte_pci_device *pci_dev;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        int diag;
+       uint32_t tc, tcs;
+       struct rte_pci_device *pci_dev;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct ixgbe_vfta * shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-       struct ixgbe_hwstrip *hwstrip = 
+       struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+       struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
 
-       PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+       PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
        eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@@ -813,7 +1003,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* initialize the hw strip bitmap*/
        memset(hwstrip, 0, sizeof(*hwstrip));
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
        diag = ixgbe_init_shared_code(hw);
        if (diag != IXGBE_SUCCESS) {
                PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
@@ -826,32 +1016,58 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* Disable the interrupts for VF */
        ixgbevf_intr_disable(hw);
 
-       hw->mac.num_rar_entries = hw->mac.max_rx_queues;
+       hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
        diag = hw->mac.ops.reset_hw(hw);
 
-       if (diag != IXGBE_SUCCESS) {
+       /*
+        * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
+        * the underlying PF driver has not assigned a MAC address to the VF.
+        * In this case, assign a random MAC address.
+        */
+       if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
                PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       RTE_LOG(ERR, PMD, "\tThe MAC address is not valid.\n"
-                                       "\tThe most likely cause of this error is that the VM host\n"
-                                       "\thas not assigned a valid MAC address to this VF device.\n"
-                                       "\tPlease consult the DPDK Release Notes (FAQ section) for\n"
-                                       "\ta possible solution to this problem.\n");
                return (diag);
        }
 
+       /* negotiate mailbox API version to use with the PF. */
+       ixgbevf_negotiate_api(hw);
+
+       /* Get Rx/Tx queue count via mailbox, which is ready after reset_hw */
+       ixgbevf_get_queues(hw, &tcs, &tc);
+
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
                        hw->mac.num_rar_entries, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
-                       "Failed to allocate %d bytes needed to store MAC addresses",
+                       "Failed to allocate %u bytes needed to store "
+                       "MAC addresses",
                        ETHER_ADDR_LEN * hw->mac.num_rar_entries);
                return -ENOMEM;
        }
 
+       /* Generate a random MAC address, if none was assigned by PF. */
+       if (is_zero_ether_addr(perm_addr)) {
+               generate_random_mac_addr(perm_addr);
+               diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
+               if (diag) {
+                       rte_free(eth_dev->data->mac_addrs);
+                       eth_dev->data->mac_addrs = NULL;
+                       return diag;
+               }
+               PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+               PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+                            "%02x:%02x:%02x:%02x:%02x:%02x",
+                            perm_addr->addr_bytes[0],
+                            perm_addr->addr_bytes[1],
+                            perm_addr->addr_bytes[2],
+                            perm_addr->addr_bytes[3],
+                            perm_addr->addr_bytes[4],
+                            perm_addr->addr_bytes[5]);
+       }
+
        /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
-                       &eth_dev->data->mac_addrs[0]);
+       ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
 
        /* reset the hardware with the new settings */
        diag = hw->mac.ops.start_hw(hw);
@@ -864,9 +1080,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        return (-EIO);
        }
 
-       PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
-                        eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
-                        "ixgbe_mac_82599_vf");
+       PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
        return 0;
 }
@@ -875,9 +1091,7 @@ static struct eth_driver rte_ixgbe_pmd = {
        {
                .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
-#ifdef RTE_EAL_UNBIND_PORTS
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
-#endif
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -890,9 +1104,7 @@ static struct eth_driver rte_ixgbevf_pmd = {
        {
                .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
-#ifdef RTE_EAL_UNBIND_PORTS
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
-#endif
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -903,8 +1115,8 @@ static struct eth_driver rte_ixgbevf_pmd = {
  * Invoked once at EAL init time.
  * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
  */
-int
-rte_ixgbe_pmd_init(void)
+static int
+rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
 {
        PMD_INIT_FUNC_TRACE();
 
@@ -917,10 +1129,10 @@ rte_ixgbe_pmd_init(void)
  * Invoked one at EAL init time.
  * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
  */
-int
-rte_ixgbevf_pmd_init(void)
+static int
+rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
 {
-       DEBUGFUNC("rte_ixgbevf_pmd_init");
+       PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
        return (0);
@@ -1011,10 +1223,10 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
 }
 
-static void 
+static void
 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 {
-       struct ixgbe_hwstrip *hwstrip = 
+       struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
 
        if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
@@ -1125,7 +1337,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
 
                        /* record those setting for HW strip per queue */
-                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);                      
+                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
                }
        }
 }
@@ -1235,19 +1447,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
        int status;
-       
+       uint16_t vf, idx;
+
        PMD_INIT_FUNC_TRACE();
 
        /* IXGBE devices don't support half duplex */
        if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
                        (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-               PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
-                               dev->data->dev_conf.link_duplex,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+                            dev->data->dev_conf.link_duplex,
+                            dev->data->port_id);
                return -EINVAL;
        }
 
@@ -1261,21 +1476,31 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        if (status != 0)
                return -1;
        hw->mac.ops.start_hw(hw);
+       hw->mac.get_link_status = true;
 
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);
 
        /* initialize transmission unit */
        ixgbe_dev_tx_init(dev);
-      
+
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbe_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+               goto error;
+       }
+
+       err = ixgbe_dev_rxtx_start(dev);
+       if (err < 0) {
+               PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
                goto error;
        }
 
-       ixgbe_dev_rxtx_start(dev);
+       /* Skip link setup if loopback mode is enabled for 82599. */
+       if (hw->mac.type == ixgbe_mac_82599EB &&
+                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+               goto skip_link_setup;
 
        if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
                err = hw->mac.ops.setup_sfp(hw);
@@ -1289,6 +1514,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        err = ixgbe_check_link(hw, &speed, &link_up, 0);
        if (err)
                goto error;
+       dev->data->dev_link.link_status = link_up;
+
        err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
        if (err)
                goto error;
@@ -1313,15 +1540,18 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                speed = IXGBE_LINK_SPEED_10GB_FULL;
                break;
        default:
-               PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
-                               dev->data->dev_conf.link_speed, dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+                            dev->data->dev_conf.link_speed,
+                            dev->data->port_id);
                goto error;
        }
 
-       err = ixgbe_setup_link(hw, speed, negotiate, link_up);
+       err = ixgbe_setup_link(hw, speed, link_up);
        if (err)
                goto error;
 
+skip_link_setup:
+
        /* check if lsc interrupt is enabled */
        if (dev->data->dev_conf.intr_conf.lsc != 0)
                ixgbe_dev_lsc_interrupt_setup(dev);
@@ -1336,10 +1566,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
                ixgbe_vmdq_vlan_hw_filter_enable(dev);
-       }       
+       }
 
        /* Configure DCB hw */
-       ixgbe_configure_dcb(dev); 
+       ixgbe_configure_dcb(dev);
 
        if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
                err = ixgbe_fdir_configure(dev);
@@ -1347,6 +1577,16 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                        goto error;
        }
 
+       /* Restore vf rate limit */
+       if (vfinfo != NULL) {
+               for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+                       for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+                               if (vfinfo[vf].tx_rate[idx] != 0)
+                                       ixgbe_set_vf_rate_limit(dev, vf,
+                                               vfinfo[vf].tx_rate[idx],
+                                               1 << idx);
+       }
+
        ixgbe_restore_statistics_mapping(dev);
 
        return (0);
@@ -1366,6 +1606,12 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        struct rte_eth_link link;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
+       int vf;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1379,14 +1625,87 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        /* stop adapter */
        ixgbe_stop_adapter(hw);
 
+       for (vf = 0; vfinfo != NULL &&
+                    vf < dev->pci_dev->max_vfs; vf++)
+               vfinfo[vf].clear_to_send = false;
+
        /* Turn off the laser */
        ixgbe_disable_tx_laser(hw);
 
        ixgbe_dev_clear_queues(dev);
 
+       /* Clear stored conf */
+       dev->data->scattered_rx = 0;
+
        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
        rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+
+       /* Remove all ntuple filters of the device */
+       for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
+            p_5tuple != NULL; p_5tuple = p_5tuple_next) {
+               p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
+               TAILQ_REMOVE(&filter_info->fivetuple_list,
+                            p_5tuple, entries);
+               rte_free(p_5tuple);
+       }
+       memset(filter_info->fivetuple_mask, 0,
+               sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+}
+
+/*
+ * Set device link up: enable tx laser.
+ */
+static int
+ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+               if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+                       /* Not suported in bypass mode */
+                       PMD_INIT_LOG(ERR, "Set link up is not supported "
+                                    "by device id 0x%x", hw->device_id);
+                       return -ENOTSUP;
+               }
+#endif
+               /* Turn on the laser */
+               ixgbe_enable_tx_laser(hw);
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+                    hw->device_id);
+       return -ENOTSUP;
+}
+
+/*
+ * Set device link down: disable tx laser.
+ */
+static int
+ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+               if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+                       /* Not suported in bypass mode */
+                       PMD_INIT_LOG(ERR, "Set link down is not supported "
+                                    "by device id 0x%x", hw->device_id);
+                       return -ENOTSUP;
+               }
+#endif
+               /* Turn off the laser */
+               ixgbe_disable_tx_laser(hw);
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+                    hw->device_id);
+       return -ENOTSUP;
 }
 
 /*
@@ -1571,11 +1890,23 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->ierrors = total_missed_rx + hw_stats->crcerrs +
-               hw_stats->rlec;
-
+       stats->ibadcrc  = hw_stats->crcerrs;
+       stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+       stats->imissed  = total_missed_rx;
+       stats->ierrors  = stats->ibadcrc +
+                         stats->ibadlen +
+                         stats->imissed +
+                         hw_stats->illerrc + hw_stats->errbc;
+
+       /* Tx Errors */
        stats->oerrors  = 0;
 
+       /* XON/XOFF pause frames */
+       stats->tx_pause_xon  = hw_stats->lxontxc;
+       stats->rx_pause_xon  = hw_stats->lxonrxc;
+       stats->tx_pause_xoff = hw_stats->lxofftxc;
+       stats->rx_pause_xoff = hw_stats->lxoffrxc;
+
        /* Flow Director Stats registers */
        hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
        hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
@@ -1626,7 +1957,6 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        if (stats == NULL)
                return;
 
-       memset(stats, 0, sizeof(*stats));
        stats->ipackets = hw_stats->vfgprc;
        stats->ibytes = hw_stats->vfgorc;
        stats->opackets = hw_stats->vfgptc;
@@ -1668,6 +1998,93 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                dev_info->max_vmdq_pools = ETH_16_POOLS;
        else
                dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+       dev_info->rx_offload_capa =
+               DEV_RX_OFFLOAD_VLAN_STRIP |
+               DEV_RX_OFFLOAD_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_UDP_CKSUM  |
+               DEV_RX_OFFLOAD_TCP_CKSUM;
+       dev_info->tx_offload_capa =
+               DEV_TX_OFFLOAD_VLAN_INSERT |
+               DEV_TX_OFFLOAD_IPV4_CKSUM  |
+               DEV_TX_OFFLOAD_UDP_CKSUM   |
+               DEV_TX_OFFLOAD_TCP_CKSUM   |
+               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+               DEV_TX_OFFLOAD_TCP_TSO;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
+       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+       dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+                    struct rte_eth_dev_info *dev_info)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+       dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+       dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+       dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+       dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+       dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               dev_info->max_vmdq_pools = ETH_16_POOLS;
+       else
+               dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+                               DEV_RX_OFFLOAD_IPV4_CKSUM |
+                               DEV_RX_OFFLOAD_UDP_CKSUM  |
+                               DEV_RX_OFFLOAD_TCP_CKSUM;
+       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+                               DEV_TX_OFFLOAD_IPV4_CKSUM  |
+                               DEV_TX_OFFLOAD_UDP_CKSUM   |
+                               DEV_TX_OFFLOAD_TCP_CKSUM   |
+                               DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -1676,7 +2093,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_link link, old;
-       ixgbe_link_speed link_speed;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        int link_up;
        int diag;
 
@@ -1700,6 +2117,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                return 0;
        }
 
+       if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
+           !hw->mac.get_link_status) {
+               memcpy(&link, &old, sizeof(link));
+               return -1;
+       }
+
        if (link_up == 0) {
                rte_ixgbe_dev_atomic_write_link_status(dev, &link);
                if (link.link_status == old.link_status)
@@ -1834,7 +2257,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        /* read-on-clear nic registers here */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
        PMD_DRV_LOG(INFO, "eicr %x", eicr);
-       
+
        intr->flags = 0;
        if (eicr & IXGBE_EICR_LSC) {
                /* set flag for async link update */
@@ -1882,7 +2305,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
 }
 
 /*
- * It executes link_update after knowing an interrupt occured.
+ * It executes link_update after knowing an interrupt occurred.
  *
  * @param dev
  *  Pointer to struct rte_eth_dev.
@@ -1898,14 +2321,14 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        int64_t timeout;
        struct rte_eth_link link;
-       int intr_enable_delay = false;  
+       int intr_enable_delay = false;
 
-       PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+       PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
        if (intr->flags & IXGBE_FLAG_MAILBOX) {
                ixgbe_pf_mbx_process(dev);
                intr->flags &= ~IXGBE_FLAG_MAILBOX;
-       } 
+       }
 
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                /* get the link status before link update, for predicting later */
@@ -1922,11 +2345,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                else
                        /* handle it 4 sec later, wait it being stable */
                        timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
-               
+
                ixgbe_dev_link_status_print(dev);
 
                intr_enable_delay = true;
-       } 
+       }
 
        if (intr_enable_delay) {
                if (rte_eal_alarm_set(timeout * 1000,
@@ -1937,7 +2360,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                ixgbe_enable_intr(dev);
                rte_intr_enable(&(dev->pci_dev->intr_handle));
        }
-                       
+
 
        return 0;
 }
@@ -1977,7 +2400,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
        }
 
-       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        ixgbe_enable_intr(dev);
        rte_intr_enable(&(dev->pci_dev->intr_handle));
 }
@@ -2021,6 +2444,55 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }
 
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * MFLCN register.
+        */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * FCCFG register.
+        */
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
+
+       return 0;
+}
+
 static int
 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
@@ -2039,8 +2511,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
        /*
         * At least reserve one Ethernet frame for watermark
@@ -2049,8 +2523,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((fc_conf->high_water > max_high_water) ||
                (fc_conf->high_water < fc_conf->low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2082,7 +2556,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                return 0;
        }
 
-       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
        return -EIO;
 }
 
@@ -2092,7 +2566,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
  *  @tc_num: traffic class number
  *  Enable flow control according to the current settings.
  */
-static int 
+static int
 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
 {
        int ret_val = 0;
@@ -2101,7 +2575,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        uint32_t fcrtl, fcrth;
        uint8_t i;
        uint8_t nb_rx_en;
-       
+
        /* Validate the water mark configuration */
        if (!hw->fc.pause_time) {
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
@@ -2112,13 +2586,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
                if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
+
                if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
@@ -2137,7 +2611,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        case ixgbe_fc_none:
                /*
                 * If the count of enabled RX Priority Flow control >1,
-                * and the TX pause can not be disabled 
+                * and the TX pause can not be disabled
                 */
                nb_rx_en = 0;
                for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
@@ -2184,7 +2658,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        default:
-               DEBUGOUT("Flow control param set incorrectly\n");
+               PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
@@ -2225,7 +2699,7 @@ out:
        return ret_val;
 }
 
-static int 
+static int
 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2237,7 +2711,7 @@ ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
        return ret_val;
 }
 
-static int 
+static int
 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
 {
        int err;
@@ -2249,29 +2723,29 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_dcb_config *dcb_config =
                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-       
+
        enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
                ixgbe_fc_none,
                ixgbe_fc_rx_pause,
                ixgbe_fc_tx_pause,
                ixgbe_fc_full
        };
-       
+
        PMD_INIT_FUNC_TRACE();
-       
+
        ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
        tc_num = map[pfc_conf->priority];
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
        /*
         * At least reserve one Ethernet frame for watermark
         * high_water/low_water in kilo bytes for ixgbe
         */
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((pfc_conf->fc.high_water > max_high_water) ||
-               (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+           (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2280,51 +2754,55 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        hw->fc.send_xon = pfc_conf->fc.send_xon;
        hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
        hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
-               
+
        err = ixgbe_dcb_pfc_enable(dev,tc_num);
-       
+
        /* Not negotiated is not an error case */
-       if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 
+       if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
                return 0;
 
-       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
        return -EIO;
-}      
+}
 
-static int 
+static int
 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-                               struct rte_eth_rss_reta *reta_conf)
-{      
-       uint8_t i,j,mask;
-       uint32_t reta;
-       struct ixgbe_hw *hw = 
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+                         struct rte_eth_rss_reta_entry64 *reta_conf,
+                         uint16_t reta_size)
+{
+       uint8_t i, j, mask;
+       uint32_t reta, r;
+       uint16_t idx, shift;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
-       /*  
-       * Update Redirection Table RETA[n],n=0...31,The redirection table has 
-       * 128-entries in 32 registers
-        */ 
-       for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < ETH_RSS_RETA_NUM_ENTRIES/2) 
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               IXGBE_4_BIT_MASK);
+               if (!mask)
+                       continue;
+               if (mask == IXGBE_4_BIT_MASK)
+                       r = 0;
                else
-                       mask = (uint8_t)((reta_conf->mask_hi >> 
-                               (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-               if (mask != 0) {
-                       reta = 0;
-                       if (mask != 0xF)
-                               reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-
-                       for (j = 0; j < 4; j++) {
-                               if (mask & (0x1 << j)) {
-                                       if (mask != 0xF)
-                                               reta &= ~(0xFF << 8 * j);
-                                       reta |= reta_conf->reta[i + j] << 8*j;
-                               }
-                       }
-                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta);
+                       r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+                       if (mask & (0x1 << j))
+                               reta |= reta_conf[idx].reta[shift + j] <<
+                                                       (CHAR_BIT * j);
+                       else
+                               reta |= r & (IXGBE_8_BIT_MASK <<
+                                               (CHAR_BIT * j));
                }
+               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
        }
 
        return 0;
@@ -2332,36 +2810,40 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 static int
 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-                               struct rte_eth_rss_reta *reta_conf)
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
 {
-       uint8_t i,j,mask;
+       uint8_t i, j, mask;
        uint32_t reta;
-       struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+       uint16_t idx, shift;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        PMD_INIT_FUNC_TRACE();
-       /* 
-        * Read Redirection Table RETA[n],n=0...31,The redirection table has 
-        * 128-entries in 32 registers
-        */
-       for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >> 
-                               (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-
-               if (mask != 0) {
-                       reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-                       for (j = 0; j < 4; j++) {
-                               if (mask & (0x1 << j))
-                                       reta_conf->reta[i + j] = 
-                                               (uint8_t)((reta >> 8 * j) & 0xFF);
-                       } 
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                               "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               IXGBE_4_BIT_MASK);
+               if (!mask)
+                       continue;
+
+               reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+                       if (mask & (0x1 << j))
+                               reta_conf[idx].reta[shift + j] =
+                                       ((reta >> (CHAR_BIT * j)) &
+                                               IXGBE_8_BIT_MASK);
                }
        }
 
-       return 0;               
+       return 0;
 }
 
 static void
@@ -2382,13 +2864,59 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       ixgbe_dev_info_get(dev, &dev_info);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+       /* switch to jumbo mode if needed */
+       if (frame_size > ETHER_MAX_LEN) {
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+       } else {
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       maxfrs &= 0x0000FFFF;
+       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+       return 0;
+}
+
 /*
  * Virtual Function operations
  */
 static void
 ixgbevf_intr_disable(struct ixgbe_hw *hw)
 {
-       PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+       PMD_INIT_FUNC_TRACE();
 
        /* Clear interrupt mask to stop from interrupts being generated */
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
@@ -2401,8 +2929,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-       PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-               dev->data->port_id);
+       PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+                    dev->data->port_id);
 
        /*
         * VF has no ability to enable/disable HW CRC
@@ -2410,12 +2938,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 1;
        }
 #else
        if (conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 0;
        }
 #endif
@@ -2426,24 +2954,28 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 static int
 ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int err, mask = 0;
-       
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+
+       PMD_INIT_FUNC_TRACE();
 
        hw->mac.ops.reset_hw(hw);
+       hw->mac.get_link_status = true;
+
+       /* negotiate mailbox API version to use with the PF. */
+       ixgbevf_negotiate_api(hw);
 
        ixgbevf_dev_tx_init(dev);
 
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbevf_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
                ixgbe_dev_clear_queues(dev);
                return err;
        }
-       
+
        /* Set vfta */
        ixgbevf_set_vfta_all(dev,1);
 
@@ -2462,17 +2994,20 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
-               
+       PMD_INIT_FUNC_TRACE();
+
        hw->adapter_stopped = TRUE;
        ixgbe_stop_adapter(hw);
 
-       /* 
-         * Clear what we set, but we still keep shadow_vfta to 
+       /*
+         * Clear what we set, but we still keep shadow_vfta to
          * restore after device starts
          */
        ixgbevf_set_vfta_all(dev,0);
 
+       /* Clear stored conf */
+       dev->data->scattered_rx = 0;
+
        ixgbe_dev_clear_queues(dev);
 }
 
@@ -2481,7 +3016,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+       PMD_INIT_FUNC_TRACE();
 
        ixgbe_reset_hw(hw);
 
@@ -2522,7 +3057,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
        uint32_t vid_idx = 0;
        uint32_t vid_bit = 0;
        int ret = 0;
-       
+
        PMD_INIT_FUNC_TRACE();
 
        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
@@ -2551,14 +3086,14 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
        uint32_t ctrl;
 
        PMD_INIT_FUNC_TRACE();
-       
+
        if(queue >= hw->mac.max_rx_queues)
                return;
 
        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
        if(on)
                ctrl |= IXGBE_RXDCTL_VME;
-       else 
+       else
                ctrl &= ~IXGBE_RXDCTL_VME;
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
 
@@ -2586,36 +3121,36 @@ static int
 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
 {
        uint32_t reg_val;
-       
+
        /* we only need to do this if VMDq is enabled */
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
-               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
                return (-1);
        }
-       
+
        return 0;
 }
 
-static uint32_t 
+static uint32_t
 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
 {
        uint32_t vector = 0;
        switch (hw->mac.mc_filter_type) {
        case 0:   /* use bits [47:36] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 4) | 
+               vector = ((uc_addr->addr_bytes[4] >> 4) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 4));
                break;
        case 1:   /* use bits [46:35] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 3) | 
+               vector = ((uc_addr->addr_bytes[4] >> 3) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 5));
                break;
        case 2:   /* use bits [45:34] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 2) | 
+               vector = ((uc_addr->addr_bytes[4] >> 2) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 6));
                break;
        case 3:   /* use bits [43:32] of the address */
-               vector = ((uc_addr->addr_bytes[4]) | 
+               vector = ((uc_addr->addr_bytes[4]) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 8));
                break;
        default:  /* Invalid mc_filter_type */
@@ -2627,7 +3162,7 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
        return vector;
 }
 
-static int 
+static int
 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
                               uint8_t on)
 {
@@ -2640,24 +3175,24 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
        const uint32_t ixgbe_uta_bit_shift = 5;
        const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
        const uint32_t bit1 = 0x1;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_uta_info *uta_info =
                IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
-       
+
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
                return (-ENOTSUP);
-       
+
        vector = ixgbe_uta_vector(hw,mac_addr);
        uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
        uta_shift = vector & ixgbe_uta_bit_mask;
-       
+
        rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
        if(rc == on)
                return 0;
-       
+
        reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
        if (on) {
                uta_info->uta_in_use++;
@@ -2668,15 +3203,15 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
                reg_val &= ~(bit1 << uta_shift);
                uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
        }
-       
+
        IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
-       
+
        if (uta_info->uta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
                                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
        else
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
-       
+
        return 0;
 }
 
@@ -2692,7 +3227,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
                return (-ENOTSUP);
-       
+
        if(on) {
                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
@@ -2705,44 +3240,55 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
                }
        }
        return 0;
-       
+
 }
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+       uint32_t new_val = orig_val;
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+               new_val |= IXGBE_VMOLR_AUPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+               new_val |= IXGBE_VMOLR_ROMPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+               new_val |= IXGBE_VMOLR_ROPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+               new_val |= IXGBE_VMOLR_BAM;
+       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+               new_val |= IXGBE_VMOLR_MPE;
+
+       return new_val;
+}
+
 static int
 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
                               uint16_t rx_mask, uint8_t on)
 {
        int val = 0;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-       
+
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-                       " on 82599 hardware and newer\n");
+                            " on 82599 hardware and newer");
                return (-ENOTSUP);
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
 
-       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
-               val |= IXGBE_VMOLR_AUPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
-               val |= IXGBE_VMOLR_ROMPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
-               val |= IXGBE_VMOLR_ROPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
-               val |= IXGBE_VMOLR_BAM;
-       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
-               val |= IXGBE_VMOLR_MPE;
+       val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
 
        if (on)
                vmolr |= val;
-       else 
+       else
                vmolr &= ~val;
 
        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-       
+
        return 0;
 }
 
@@ -2752,13 +3298,13 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        uint32_t reg,addr;
        uint32_t val;
        const uint8_t bit1 = 0x1;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
-       
+
        addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
        val = bit1 << pool;
@@ -2767,9 +3313,9 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                reg |= val;
        else
                reg &= ~val;
-       
+
        IXGBE_WRITE_REG(hw, addr,reg);
-       
+
        return 0;
 }
 
@@ -2779,13 +3325,13 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        uint32_t reg,addr;
        uint32_t val;
        const uint8_t bit1 = 0x1;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
-       
+
        addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
        val = bit1 << pool;
@@ -2794,13 +3340,13 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                reg |= val;
        else
                reg &= ~val;
-       
+
        IXGBE_WRITE_REG(hw, addr,reg);
-       
+
        return 0;
 }
 
-static int 
+static int
 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                        uint64_t pool_mask, uint8_t vlan_on)
 {
@@ -2808,14 +3354,14 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
        uint16_t pool_idx;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
-               if (pool_mask & ((uint64_t)(1ULL << pool_idx))) 
+               if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
                        ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
-                       if (ret < 0) 
-                               return ret;     
+                       if (ret < 0)
+                               return ret;
        }
 
        return ret;
@@ -2823,7 +3369,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
 
 static int
 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-                       struct rte_eth_vmdq_mirror_conf *mirror_conf, 
+                       struct rte_eth_vmdq_mirror_conf *mirror_conf,
                        uint8_t rule_id, uint8_t on)
 {
        uint32_t mr_ctl,vlvf;
@@ -2834,7 +3380,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        uint8_t i = 0;
        int reg_index = 0;
        uint64_t vlan_mask = 0;
-       
+
        const uint8_t pool_mask_offset = 32;
        const uint8_t vlan_mask_offset = 32;
        const uint8_t dst_pool_offset = 8;
@@ -2877,7 +3423,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                if (on) {
                        mv_lsb = vlan_mask & 0xFFFFFFFF;
                        mv_msb = vlan_mask >> vlan_mask_offset;
-                       
+
                        mr_info->mr_conf[rule_id].vlan.vlan_mask =
                                                mirror_conf->vlan.vlan_mask;
                        for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
@@ -2895,23 +3441,23 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        }
 
        /*
-        * if enable pool mirror, write related pool mask register,if disable 
+        * if enable pool mirror, write related pool mask register,if disable
         * pool mirror, clear PFMRVM register
         */
        if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
-               if (on) { 
+               if (on) {
                        mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
                        mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
-                       mr_info->mr_conf[rule_id].pool_mask = 
+                       mr_info->mr_conf[rule_id].pool_mask =
                                        mirror_conf->pool_mask;
-                       
+
                } else {
                        mp_lsb = 0;
                        mp_msb = 0;
                        mr_info->mr_conf[rule_id].pool_mask = 0;
                }
        }
-       
+
        /* read  mirror control register and recalculate it */
        mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
 
@@ -2927,7 +3473,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 
        /* write mirrror control  register */
        IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-       
+
         /* write pool mirrror control  register */
        if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
@@ -2944,19 +3490,19 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        return 0;
 }
 
-static int 
+static int
 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 {
        int mr_ctl = 0;
        uint32_t lsb_val = 0;
        uint32_t msb_val = 0;
        const uint8_t rule_mr_offset = 4;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_mirror_info *mr_info = 
+       struct ixgbe_mirror_info *mr_info =
                (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-       
+
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
 
@@ -2976,3 +3522,913 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 
        return 0;
 }
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+       uint16_t queue_idx, uint16_t tx_rate)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rf_dec, rf_int;
+       uint32_t bcnrc_val;
+       uint16_t link_speed = dev->data->dev_link.link_speed;
+
+       if (queue_idx >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+               rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+               rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+                               IXGBE_RTTBCNRC_RF_INT_MASK_M);
+               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       /*
+        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+        * set as 0x4.
+        */
+       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+                               IXGBE_MAX_JUMBO_FRAME_SIZE))
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_JUMBO_FRAME);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_DEFAULT);
+
+       /* Set RTTBCNRC of queue X */
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       uint32_t queue_stride =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+       uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+       uint16_t total_rate = 0;
+
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo != NULL) {
+               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else
+               return -EINVAL;
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /*
+                * Reset stored TX rate of the VF if it causes exceed
+                * link speed.
+                */
+               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+               return -EINVAL;
+       }
+
+       /* Set RTTBCNRC of each queue/pool for vf X  */
+       for (; queue_idx <= queue_end; queue_idx++) {
+               if (0x1 & q_msk)
+                       ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+               q_msk = q_msk >> 1;
+       }
+
+       return 0;
+}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+                    __attribute__((unused)) uint32_t index,
+                    __attribute__((unused)) uint32_t pool)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int diag;
+
+       /*
+        * On a 82599 VF, adding again the same MAC addr is not an idempotent
+        * operation. Trap this case to avoid exhausting the [very limited]
+        * set of PF resources used to store VF MAC addresses.
+        */
+       if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+               return;
+       diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+       if (diag == 0)
+               return;
+       PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+       struct ether_addr *mac_addr;
+       uint32_t i;
+       int diag;
+
+       /*
+        * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+        * not support the deletion of a given MAC address.
+        * Instead, it imposes to delete all MAC addresses, then to add again
+        * all MAC addresses with the exception of the one to be deleted.
+        */
+       (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+       /*
+        * Add again all MAC addresses, with the exception of the deleted one
+        * and of the permanent MAC address.
+        */
+       for (i = 0, mac_addr = dev->data->mac_addrs;
+            i < hw->mac.num_rar_entries; i++, mac_addr++) {
+               /* Skip the deleted MAC address */
+               if (i == index)
+                       continue;
+               /* Skip NULL MAC addresses */
+               if (is_zero_ether_addr(mac_addr))
+                       continue;
+               /* Skip the permanent MAC address */
+               if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+                       continue;
+               diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+               if (diag != 0)
+                       PMD_DRV_LOG(ERR,
+                                   "Adding again MAC address "
+                                   "%02x:%02x:%02x:%02x:%02x:%02x failed "
+                                   "diag=%d",
+                                   mac_addr->addr_bytes[0],
+                                   mac_addr->addr_bytes[1],
+                                   mac_addr->addr_bytes[2],
+                                   mac_addr->addr_bytes[3],
+                                   mac_addr->addr_bytes[4],
+                                   mac_addr->addr_bytes[5],
+                                   diag);
+       }
+}
+
+#define MAC_TYPE_FILTER_SUP(type)    do {\
+       if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+               (type) != ixgbe_mac_X550)\
+               return -ENOTSUP;\
+} while (0)
+
+static int
+ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+                       struct rte_eth_syn_filter *filter,
+                       bool add)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       if (add) {
+               if (synqf & IXGBE_SYN_FILTER_ENABLE)
+                       return -EINVAL;
+               synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+                       IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+               if (filter->hig_pri)
+                       synqf |= IXGBE_SYN_FILTER_SYNQFP;
+               else
+                       synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+       } else {
+               if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+                       return -ENOENT;
+               synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       IXGBE_WRITE_FLUSH(hw);
+       return 0;
+}
+
+static int
+ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+                       struct rte_eth_syn_filter *filter)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+               filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+               filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static int
+ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+                       enum rte_filter_op filter_op,
+                       void *arg)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+       if (filter_op == RTE_ETH_FILTER_NOP)
+               return 0;
+
+       if (arg == NULL) {
+               PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+                           filter_op);
+               return -EINVAL;
+       }
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_ADD:
+               ret = ixgbe_syn_filter_set(dev,
+                               (struct rte_eth_syn_filter *)arg,
+                               TRUE);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               ret = ixgbe_syn_filter_set(dev,
+                               (struct rte_eth_syn_filter *)arg,
+                               FALSE);
+               break;
+       case RTE_ETH_FILTER_GET:
+               ret = ixgbe_syn_filter_get(dev,
+                               (struct rte_eth_syn_filter *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+       if (protocol_value == IPPROTO_TCP)
+               return IXGBE_FILTER_PROTOCOL_TCP;
+       else if (protocol_value == IPPROTO_UDP)
+               return IXGBE_FILTER_PROTOCOL_UDP;
+       else if (protocol_value == IPPROTO_SCTP)
+               return IXGBE_FILTER_PROTOCOL_SCTP;
+       else
+               return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+                       struct ixgbe_5tuple_filter *filter)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       int i, idx, shift;
+       uint32_t ftqf, sdpqf;
+       uint32_t l34timir = 0;
+       uint8_t mask = 0xff;
+
+       /*
+        * look for an unused 5tuple filter index,
+        * and insert the filter to list.
+        */
+       for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
+               idx = i / (sizeof(uint32_t) * NBBY);
+               shift = i % (sizeof(uint32_t) * NBBY);
+               if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+                       filter_info->fivetuple_mask[idx] |= 1 << shift;
+                       filter->index = i;
+                       TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+                                         filter,
+                                         entries);
+                       break;
+               }
+       }
+       if (i >= IXGBE_MAX_FTQF_FILTERS) {
+               PMD_DRV_LOG(ERR, "5tuple filters are full.");
+               return -ENOSYS;
+       }
+
+       sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+                               IXGBE_SDPQF_DSTPORT_SHIFT);
+       sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+       ftqf = (uint32_t)(filter->filter_info.proto &
+               IXGBE_FTQF_PROTOCOL_MASK);
+       ftqf |= (uint32_t)((filter->filter_info.priority &
+               IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+       if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+               mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+       if (filter->filter_info.dst_ip_mask == 0)
+               mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+       if (filter->filter_info.src_port_mask == 0)
+               mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+       if (filter->filter_info.dst_port_mask == 0)
+               mask &= IXGBE_FTQF_DEST_PORT_MASK;
+       if (filter->filter_info.proto_mask == 0)
+               mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+       ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+       ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+       ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(idx), filter->filter_info.dst_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(idx), filter->filter_info.src_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(idx), sdpqf);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(idx), ftqf);
+
+       l34timir |= IXGBE_L34T_IMIR_RESERVE;
+       l34timir |= (uint32_t)(filter->queue <<
+                               IXGBE_L34T_IMIR_QUEUE_SHIFT);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+       return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       struct ixgbe_5tuple_filter *filter)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       uint16_t index = filter->index;
+
+       filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+                               ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+       TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+       rte_free(filter);
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct ixgbe_hw *hw;
+       uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       /*
+        * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+        * request of the version 2.0 of the mailbox API.
+        * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+        * of the mailbox API.
+        * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+        * prior to 3.11.33 which contains the following change:
+        * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+        */
+       ixgbevf_rlpml_set_vf(hw, max_frame);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+       return 0;
+}
+
+#define MAC_TYPE_FILTER_SUP_EXT(type)    do {\
+       if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+               return -ENOTSUP;\
+} while (0)
+
+static inline struct ixgbe_5tuple_filter *
+ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
+                       struct ixgbe_5tuple_filter_info *key)
+{
+       struct ixgbe_5tuple_filter *it;
+
+       TAILQ_FOREACH(it, filter_list, entries) {
+               if (memcmp(key, &it->filter_info,
+                       sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
+                       return it;
+               }
+       }
+       return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+                       struct ixgbe_5tuple_filter_info *filter_info)
+{
+       if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < IXGBE_5TUPLE_MIN_PRI)
+               return -EINVAL;
+
+       switch (filter->dst_ip_mask) {
+       case UINT32_MAX:
+               filter_info->dst_ip_mask = 0;
+               filter_info->dst_ip = filter->dst_ip;
+               break;
+       case 0:
+               filter_info->dst_ip_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->src_ip_mask) {
+       case UINT32_MAX:
+               filter_info->src_ip_mask = 0;
+               filter_info->src_ip = filter->src_ip;
+               break;
+       case 0:
+               filter_info->src_ip_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->dst_port_mask) {
+       case UINT16_MAX:
+               filter_info->dst_port_mask = 0;
+               filter_info->dst_port = filter->dst_port;
+               break;
+       case 0:
+               filter_info->dst_port_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->src_port_mask) {
+       case UINT16_MAX:
+               filter_info->src_port_mask = 0;
+               filter_info->src_port = filter->src_port;
+               break;
+       case 0:
+               filter_info->src_port_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid src_port mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->proto_mask) {
+       case UINT8_MAX:
+               filter_info->proto_mask = 0;
+               filter_info->proto =
+                       convert_protocol_type(filter->proto);
+               break;
+       case 0:
+               filter_info->proto_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid protocol mask.");
+               return -EINVAL;
+       }
+
+       filter_info->priority = (uint8_t)filter->priority;
+       return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ntuple_filter *ntuple_filter,
+                       bool add)
+{
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       struct ixgbe_5tuple_filter_info filter_5tuple;
+       struct ixgbe_5tuple_filter *filter;
+       int ret;
+
+       if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+               PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+               return -EINVAL;
+       }
+
+       memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+       ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+       if (ret < 0)
+               return ret;
+
+       filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+                                        &filter_5tuple);
+       if (filter != NULL && add) {
+               PMD_DRV_LOG(ERR, "filter exists.");
+               return -EEXIST;
+       }
+       if (filter == NULL && !add) {
+               PMD_DRV_LOG(ERR, "filter doesn't exist.");
+               return -ENOENT;
+       }
+
+       if (add) {
+               filter = rte_zmalloc("ixgbe_5tuple_filter",
+                               sizeof(struct ixgbe_5tuple_filter), 0);
+               if (filter == NULL)
+                       return -ENOMEM;
+               (void)rte_memcpy(&filter->filter_info,
+                                &filter_5tuple,
+                                sizeof(struct ixgbe_5tuple_filter_info));
+               filter->queue = ntuple_filter->queue;
+               ret = ixgbe_add_5tuple_filter(dev, filter);
+               if (ret < 0) {
+                       rte_free(filter);
+                       return ret;
+               }
+       } else
+               ixgbe_remove_5tuple_filter(dev, filter);
+
+       return 0;
+}
+
+/*
+ * get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ntuple_filter *ntuple_filter)
+{
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       struct ixgbe_5tuple_filter_info filter_5tuple;
+       struct ixgbe_5tuple_filter *filter;
+       int ret;
+
+       if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+               PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+               return -EINVAL;
+       }
+
+       memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+       ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+       if (ret < 0)
+               return ret;
+
+       filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+                                        &filter_5tuple);
+       if (filter == NULL) {
+               PMD_DRV_LOG(ERR, "filter doesn't exist.");
+               return -ENOENT;
+       }
+       ntuple_filter->queue = filter->queue;
+       return 0;
+}
+
+/*
+ * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+                               enum rte_filter_op filter_op,
+                               void *arg)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+       if (filter_op == RTE_ETH_FILTER_NOP)
+               return 0;
+
+       if (arg == NULL) {
+               PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+                           filter_op);
+               return -EINVAL;
+       }
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_ADD:
+               ret = ixgbe_add_del_ntuple_filter(dev,
+                       (struct rte_eth_ntuple_filter *)arg,
+                       TRUE);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               ret = ixgbe_add_del_ntuple_filter(dev,
+                       (struct rte_eth_ntuple_filter *)arg,
+                       FALSE);
+               break;
+       case RTE_ETH_FILTER_GET:
+               ret = ixgbe_get_ntuple_filter(dev,
+                       (struct rte_eth_ntuple_filter *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+                       uint16_t ethertype)
+{
+       int i;
+
+       for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+               if (filter_info->ethertype_filters[i] == ethertype &&
+                   (filter_info->ethertype_mask & (1 << i)))
+                       return i;
+       }
+       return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+                       uint16_t ethertype)
+{
+       int i;
+
+       for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+               if (!(filter_info->ethertype_mask & (1 << i))) {
+                       filter_info->ethertype_mask |= 1 << i;
+                       filter_info->ethertype_filters[i] = ethertype;
+                       return i;
+               }
+       }
+       return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+                       uint8_t idx)
+{
+       if (idx >= IXGBE_MAX_ETQF_FILTERS)
+               return -1;
+       filter_info->ethertype_mask &= ~(1 << idx);
+       filter_info->ethertype_filters[idx] = 0;
+       return idx;
+}
+
+static int
+ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter,
+                       bool add)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       uint32_t etqf = 0;
+       uint32_t etqs = 0;
+       int ret;
+
+       if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       if (filter->ether_type == ETHER_TYPE_IPv4 ||
+               filter->ether_type == ETHER_TYPE_IPv6) {
+               PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+                       " ethertype filter.", filter->ether_type);
+               return -EINVAL;
+       }
+
+       if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+               PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+               return -EINVAL;
+       }
+       if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+               PMD_DRV_LOG(ERR, "drop option is unsupported.");
+               return -EINVAL;
+       }
+
+       ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+       if (ret >= 0 && add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+                           filter->ether_type);
+               return -EEXIST;
+       }
+       if (ret < 0 && !add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+                           filter->ether_type);
+               return -ENOENT;
+       }
+
+       if (add) {
+               ret = ixgbe_ethertype_filter_insert(filter_info,
+                       filter->ether_type);
+               if (ret < 0) {
+                       PMD_DRV_LOG(ERR, "ethertype filters are full.");
+                       return -ENOSYS;
+               }
+               etqf = IXGBE_ETQF_FILTER_EN;
+               etqf |= (uint32_t)filter->ether_type;
+               etqs |= (uint32_t)((filter->queue <<
+                                   IXGBE_ETQS_RX_QUEUE_SHIFT) &
+                                   IXGBE_ETQS_RX_QUEUE);
+               etqs |= IXGBE_ETQS_QUEUE_EN;
+       } else {
+               ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+               if (ret < 0)
+                       return -ENOSYS;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       uint32_t etqf, etqs;
+       int ret;
+
+       ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+                           filter->ether_type);
+               return -ENOENT;
+       }
+
+       etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
+       if (etqf & IXGBE_ETQF_FILTER_EN) {
+               etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
+               filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
+               filter->flags = 0;
+               filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
+                              IXGBE_ETQS_RX_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
+/*
+ * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+                               enum rte_filter_op filter_op,
+                               void *arg)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+       if (filter_op == RTE_ETH_FILTER_NOP)
+               return 0;
+
+       if (arg == NULL) {
+               PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+                           filter_op);
+               return -EINVAL;
+       }
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_ADD:
+               ret = ixgbe_add_del_ethertype_filter(dev,
+                       (struct rte_eth_ethertype_filter *)arg,
+                       TRUE);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               ret = ixgbe_add_del_ethertype_filter(dev,
+                       (struct rte_eth_ethertype_filter *)arg,
+                       FALSE);
+               break;
+       case RTE_ETH_FILTER_GET:
+               ret = ixgbe_get_ethertype_filter(dev,
+                       (struct rte_eth_ethertype_filter *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int
+ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg)
+{
+       int ret = -EINVAL;
+
+       switch (filter_type) {
+       case RTE_ETH_FILTER_NTUPLE:
+               ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
+               break;
+       case RTE_ETH_FILTER_ETHERTYPE:
+               ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
+               break;
+       case RTE_ETH_FILTER_SYN:
+               ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
+               break;
+       case RTE_ETH_FILTER_FDIR:
+               ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+                                                       filter_type);
+               break;
+       }
+
+       return ret;
+}
+
+static struct rte_driver rte_ixgbe_driver = {
+       .type = PMD_PDEV,
+       .init = rte_ixgbe_pmd_init,
+};
+
+static struct rte_driver rte_ixgbevf_driver = {
+       .type = PMD_PDEV,
+       .init = rte_ixgbevf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_ixgbe_driver);
+PMD_REGISTER_DRIVER(rte_ixgbevf_driver);