maintainers: claim IP fragmentation and ACL
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index a50bce4..b341dd0 100644 (file)
@@ -1,13 +1,13 @@
 /*-
  *   BSD LICENSE
- * 
+ *
  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
  *   All rights reserved.
- * 
+ *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
  *   are met:
- * 
+ *
  *     * Redistributions of source code must retain the above copyright
  *       notice, this list of conditions and the following disclaimer.
  *     * Redistributions in binary form must reproduce the above copyright
@@ -17,7 +17,7 @@
  *     * Neither the name of Intel Corporation nor the names of its
  *       contributors may be used to endorse or promote products derived
  *       from this software without specific prior written permission.
- * 
+ *
  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -39,6 +39,7 @@
 #include <unistd.h>
 #include <stdarg.h>
 #include <inttypes.h>
+#include <netinet/in.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
@@ -58,6 +59,7 @@
 #include <rte_ethdev.h>
 #include <rte_atomic.h>
 #include <rte_malloc.h>
+#include <rte_random.h>
 #include <rte_dev.h>
 
 #include "ixgbe_logs.h"
@@ -66,6 +68,7 @@
 #include "ixgbe/ixgbe_common.h"
 #include "ixgbe_ethdev.h"
 #include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
 
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 
+#define IXGBE_MMW_SIZE_DEFAULT        0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
+
+/*
+ *  Default values for RX/TX configuration
+ */
+#define IXGBE_DEFAULT_RX_FREE_THRESH  32
+#define IXGBE_DEFAULT_RX_PTHRESH      8
+#define IXGBE_DEFAULT_RX_HTHRESH      8
+#define IXGBE_DEFAULT_RX_WTHRESH      0
+
+#define IXGBE_DEFAULT_TX_FREE_THRESH  32
+#define IXGBE_DEFAULT_TX_PTHRESH      32
+#define IXGBE_DEFAULT_TX_HTHRESH      0
+#define IXGBE_DEFAULT_TX_WTHRESH      0
+#define IXGBE_DEFAULT_TX_RSBIT_THRESH 32
+
+/* Bit shift and mask */
+#define IXGBE_4_BIT_WIDTH  (CHAR_BIT / 2)
+#define IXGBE_4_BIT_MASK   RTE_LEN2MASK(IXGBE_4_BIT_WIDTH, uint8_t)
+#define IXGBE_8_BIT_WIDTH  CHAR_BIT
+#define IXGBE_8_BIT_MASK   UINT8_MAX
 
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
@@ -97,6 +122,8 @@ static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
 static void ixgbe_dev_close(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -112,11 +139,15 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t stat_idx,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
-                               struct rte_eth_dev_info *dev_info);
+                              struct rte_eth_dev_info *dev_info);
+static void ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+                                struct rte_eth_dev_info *dev_info);
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
-static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, 
+static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
                uint16_t queue, bool on);
 static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
                int on);
@@ -128,14 +159,18 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
-static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-               struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-               struct rte_eth_rss_reta *reta_conf);
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-               struct rte_eth_rss_reta *reta_conf);    
+                       struct rte_eth_rss_reta_entry64 *reta_conf,
+                       uint16_t reta_size);
 static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
 static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
@@ -159,7 +194,7 @@ static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
 static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
                struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
-static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, 
+static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
                uint16_t queue, int on);
@@ -170,18 +205,53 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
 static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
                ether_addr* mac_addr,uint8_t on);
 static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
-static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool, 
+static int  ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev,  uint16_t pool,
                uint16_t rx_mask, uint8_t on);
 static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
 static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, 
+static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                uint64_t pool_mask,uint8_t vlan_on);
-static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, 
-               struct rte_eth_vmdq_mirror_conf *mirror_conf, 
+static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+               struct rte_eth_vmdq_mirror_conf *mirror_conf,
                uint8_t rule_id, uint8_t on);
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+               uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+               uint16_t tx_rate, uint64_t q_msk);
+
+static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+                                struct ether_addr *mac_addr,
+                                uint32_t index, uint32_t pool);
+static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev);
+static int ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       uint16_t index);
+static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t *rx_queue);
+
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter,
+                       bool add);
+static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+                               enum rte_filter_op filter_op,
+                               void *arg);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter);
+static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg);
+
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
  */
@@ -206,13 +276,13 @@ static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] |= 1 << bit;\
        }while(0)
-       
+
 #define IXGBE_CLEAR_HWSTRIP(h, q) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
                (h)->bitmap[idx] &= ~(1 << bit);\
        }while(0)
+
 #define IXGBE_GET_HWSTRIP(h, q, r) do{\
                uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
@@ -246,6 +316,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_configure        = ixgbe_dev_configure,
        .dev_start            = ixgbe_dev_start,
        .dev_stop             = ixgbe_dev_stop,
+       .dev_set_link_up    = ixgbe_dev_set_link_up,
+       .dev_set_link_down  = ixgbe_dev_set_link_down,
        .dev_close            = ixgbe_dev_close,
        .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
        .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
@@ -256,10 +328,15 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .stats_reset          = ixgbe_dev_stats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
        .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
+       .rx_queue_start       = ixgbe_dev_rx_queue_start,
+       .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
+       .tx_queue_start       = ixgbe_dev_tx_queue_start,
+       .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
@@ -268,18 +345,21 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
        .dev_led_on           = ixgbe_dev_led_on,
        .dev_led_off          = ixgbe_dev_led_off,
+       .flow_ctrl_get        = ixgbe_flow_ctrl_get,
        .flow_ctrl_set        = ixgbe_flow_ctrl_set,
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
        .mac_addr_remove      = ixgbe_remove_rar,
        .uc_hash_table_set    = ixgbe_uc_hash_table_set,
        .uc_all_hash_table_set  = ixgbe_uc_all_hash_table_set,
-       .mirror_rule_set        = ixgbe_mirror_rule_set,
-       .mirror_rule_reset      = ixgbe_mirror_rule_reset,
+       .mirror_rule_set      = ixgbe_mirror_rule_set,
+       .mirror_rule_reset    = ixgbe_mirror_rule_reset,
        .set_vf_rx_mode       = ixgbe_set_pool_rx_mode,
        .set_vf_rx            = ixgbe_set_pool_rx,
        .set_vf_tx            = ixgbe_set_pool_tx,
        .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
+       .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+       .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
        .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
        .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
        .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
@@ -301,6 +381,15 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .bypass_ver_show      = ixgbe_bypass_ver_show,
        .bypass_wd_reset      = ixgbe_bypass_wd_reset,
 #endif /* RTE_NIC_BYPASS */
+       .rss_hash_update      = ixgbe_dev_rss_hash_update,
+       .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+       .add_syn_filter          = ixgbe_add_syn_filter,
+       .remove_syn_filter       = ixgbe_remove_syn_filter,
+       .get_syn_filter          = ixgbe_get_syn_filter,
+       .add_5tuple_filter       = ixgbe_add_5tuple_filter,
+       .remove_5tuple_filter    = ixgbe_remove_5tuple_filter,
+       .get_5tuple_filter       = ixgbe_get_5tuple_filter,
+       .filter_ctrl             = ixgbe_dev_filter_ctrl,
 };
 
 /*
@@ -316,7 +405,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .stats_get            = ixgbevf_dev_stats_get,
        .stats_reset          = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
-       .dev_infos_get        = ixgbe_dev_info_get,
+       .dev_infos_get        = ixgbevf_dev_info_get,
+       .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
@@ -324,6 +414,8 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
+       .mac_addr_add         = ixgbevf_add_mac_addr,
+       .mac_addr_remove      = ixgbevf_remove_mac_addr,
 };
 
 /**
@@ -419,9 +511,9 @@ ixgbe_enable_intr(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+
        IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
        IXGBE_WRITE_FLUSH(hw);
 }
@@ -480,15 +572,19 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        uint32_t q_map;
        uint8_t n, offset;
 
-       if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
+       if ((hw->mac.type != ixgbe_mac_82599EB) &&
+               (hw->mac.type != ixgbe_mac_X540) &&
+               (hw->mac.type != ixgbe_mac_X550) &&
+               (hw->mac.type != ixgbe_mac_X550EM_x))
                return -ENOSYS;
 
-       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
 
        n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
        if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
-               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
                return -EIO;
        }
        offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
@@ -508,19 +604,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        else
                stat_mappings->rqsmr[n] |= qsmr_mask;
 
-       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
-                    "%s[%d] = 0x%08x\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
-                    is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+                    is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
        /* Now write the mapping in the appropriate register */
        if (is_rx) {
-               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
        }
        else {
-               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
        }
@@ -560,7 +657,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
                tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
                                 (uint8_t)(100/dcb_max_tc + (i & 1));
                tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = i;
-               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 
+               tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
                                 (uint8_t)(100/dcb_max_tc + (i & 1));
                tc->pfc = ixgbe_dcb_pfc_disabled;
        }
@@ -580,12 +677,14 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
        /* support all DCB capabilities in 82599 */
        dcb_config->support.capabilities = 0xFF;
 
-       /*we only support 4 Tcs for X540*/              
-       if (hw->mac.type == ixgbe_mac_X540) {
+       /*we only support 4 Tcs for X540, X550 */
+       if (hw->mac.type == ixgbe_mac_X540 ||
+               hw->mac.type == ixgbe_mac_X550 ||
+               hw->mac.type == ixgbe_mac_X550EM_x) {
                dcb_config->num_tcs.pg_tcs = 4;
                dcb_config->num_tcs.pfc_tcs = 4;
        }
-} 
+}
 
 /*
  * Ensure that all locks are released before first NVM or PHY access
@@ -603,7 +702,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
         */
        mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
        if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-                  DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
+               PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
        }
        ixgbe_release_swfw_semaphore(hw, mask);
 
@@ -615,7 +714,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
         */
        mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
        if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-                  DEBUGOUT("SWFW common locks released");
+               PMD_DRV_LOG(DEBUG, "SWFW common locks released");
        }
        ixgbe_release_swfw_semaphore(hw, mask);
 }
@@ -633,7 +732,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct ixgbe_vfta * shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-       struct ixgbe_hwstrip *hwstrip = 
+       struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
        struct ixgbe_dcb_config *dcb_config =
                IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
@@ -647,10 +746,24 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
        eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
 
-       /* for secondary processes, we don't initialise any further as primary
+       /*
+        * For secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
-        * RX function */
+        * RX and TX function.
+        */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+               struct igb_tx_queue *txq;
+               /* TX queue function in primary, set by last queue initialized
+                * Tx queue may not initialized by primary process */
+               if (eth_dev->data->tx_queues) {
+                       txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
+                       set_tx_function(eth_dev, txq);
+               } else {
+                       /* Use default TX function if we get here */
+                       PMD_INIT_LOG(INFO, "No TX queues configured yet. "
+                                          "Using default TX function.");
+               }
+
                if (eth_dev->data->scattered_rx)
                        eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                return 0;
@@ -661,11 +774,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP
        hw->allow_unsupported_sfp = 1;
-#endif
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
 #ifdef RTE_NIC_BYPASS
        diag = ixgbe_bypass_init_shared_code(hw);
 #else
@@ -727,11 +838,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        if (diag == IXGBE_ERR_EEPROM_VERSION) {
                PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
                    "LOM.  Please be aware there may be issues associated "
-                   "with your hardware.\n If you are experiencing problems "
+                   "with your hardware.");
+               PMD_INIT_LOG(ERR, "If you are experiencing problems "
                    "please contact your Intel or hardware representative "
-                   "who provided you with this hardware.\n");
+                   "who provided you with this hardware.");
        } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
        if (diag) {
                PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
                return -EIO;
@@ -756,7 +868,7 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
                        &eth_dev->data->mac_addrs[0]);
-       
+
        /* Allocate memory for storing hash filter MAC addresses */
        eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
                        IXGBE_VMDQ_NUM_UC_MAC, 0);
@@ -785,12 +897,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        IXGBE_WRITE_FLUSH(hw);
 
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-               PMD_INIT_LOG(DEBUG,
-                            "MAC: %d, PHY: %d, SFP+: %d<n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
                             (int) hw->mac.type, (int) hw->phy.type,
                             (int) hw->phy.sfp_type);
        else
-               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
                             (int) hw->mac.type, (int) hw->phy.type);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -835,6 +946,22 @@ ixgbevf_negotiate_api(struct ixgbe_hw *hw)
                ;
 }
 
+static void
+generate_random_mac_addr(struct ether_addr *mac_addr)
+{
+       uint64_t random;
+
+       /* Set Organizationally Unique Identifier (OUI) prefix. */
+       mac_addr->addr_bytes[0] = 0x00;
+       mac_addr->addr_bytes[1] = 0x09;
+       mac_addr->addr_bytes[2] = 0xC0;
+       /* Force indication of locally assigned MAC address. */
+       mac_addr->addr_bytes[0] |= ETHER_LOCAL_ADMIN_ADDR;
+       /* Generate the last 3 bytes of the MAC address with a random number. */
+       random = rte_rand();
+       memcpy(&mac_addr->addr_bytes[3], &random, 3);
+}
+
 /*
  * Virtual Function device init
  */
@@ -849,10 +976,11 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
        struct ixgbe_vfta * shadow_vfta =
                IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
-       struct ixgbe_hwstrip *hwstrip = 
+       struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+       struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
 
-       PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+       PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
        eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@@ -879,7 +1007,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* initialize the hw strip bitmap*/
        memset(hwstrip, 0, sizeof(*hwstrip));
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
        diag = ixgbe_init_shared_code(hw);
        if (diag != IXGBE_SUCCESS) {
                PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
@@ -892,16 +1020,16 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* Disable the interrupts for VF */
        ixgbevf_intr_disable(hw);
 
-       hw->mac.num_rar_entries = hw->mac.max_rx_queues;
+       hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
        diag = hw->mac.ops.reset_hw(hw);
 
-       if (diag != IXGBE_SUCCESS) {
+       /*
+        * The VF reset operation returns the IXGBE_ERR_INVALID_MAC_ADDR when
+        * the underlying PF driver has not assigned a MAC address to the VF.
+        * In this case, assign a random MAC address.
+        */
+       if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
                PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       RTE_LOG(ERR, PMD, "\tThe MAC address is not valid.\n"
-                                       "\tThe most likely cause of this error is that the VM host\n"
-                                       "\thas not assigned a valid MAC address to this VF device.\n"
-                                       "\tPlease consult the DPDK Release Notes (FAQ section) for\n"
-                                       "\ta possible solution to this problem.\n");
                return (diag);
        }
 
@@ -922,9 +1050,28 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                return -ENOMEM;
        }
 
+       /* Generate a random MAC address, if none was assigned by PF. */
+       if (is_zero_ether_addr(perm_addr)) {
+               generate_random_mac_addr(perm_addr);
+               diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
+               if (diag) {
+                       rte_free(eth_dev->data->mac_addrs);
+                       eth_dev->data->mac_addrs = NULL;
+                       return diag;
+               }
+               PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+               PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+                            "%02x:%02x:%02x:%02x:%02x:%02x",
+                            perm_addr->addr_bytes[0],
+                            perm_addr->addr_bytes[1],
+                            perm_addr->addr_bytes[2],
+                            perm_addr->addr_bytes[3],
+                            perm_addr->addr_bytes[4],
+                            perm_addr->addr_bytes[5]);
+       }
+
        /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
-                       &eth_dev->data->mac_addrs[0]);
+       ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
 
        /* reset the hardware with the new settings */
        diag = hw->mac.ops.start_hw(hw);
@@ -937,9 +1084,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        return (-EIO);
        }
 
-       PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
-                        eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
-                        "ixgbe_mac_82599_vf");
+       PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
        return 0;
 }
@@ -948,7 +1095,7 @@ static struct eth_driver rte_ixgbe_pmd = {
        {
                .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -961,7 +1108,7 @@ static struct eth_driver rte_ixgbevf_pmd = {
        {
                .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -989,7 +1136,7 @@ rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
 static int
 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
 {
-       DEBUGFUNC("rte_ixgbevf_pmd_init");
+       PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
        return (0);
@@ -1080,10 +1227,10 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
 }
 
-static void 
+static void
 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 {
-       struct ixgbe_hwstrip *hwstrip = 
+       struct ixgbe_hwstrip *hwstrip =
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
 
        if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
@@ -1194,7 +1341,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
 
                        /* record those setting for HW strip per queue */
-                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);                      
+                       ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
                }
        }
 }
@@ -1304,19 +1451,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
        int status;
-       
+       uint16_t vf, idx;
+
        PMD_INIT_FUNC_TRACE();
 
        /* IXGBE devices don't support half duplex */
        if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
                        (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-                               dev->data->dev_conf.link_duplex,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+                            dev->data->dev_conf.link_duplex,
+                            dev->data->port_id);
                return -EINVAL;
        }
 
@@ -1330,21 +1480,26 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        if (status != 0)
                return -1;
        hw->mac.ops.start_hw(hw);
+       hw->mac.get_link_status = true;
 
        /* configure PF module if SRIOV enabled */
        ixgbe_pf_host_configure(dev);
 
        /* initialize transmission unit */
        ixgbe_dev_tx_init(dev);
-      
+
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbe_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
                goto error;
        }
 
-       ixgbe_dev_rxtx_start(dev);
+       err = ixgbe_dev_rxtx_start(dev);
+       if (err < 0) {
+               PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
+               goto error;
+       }
 
        if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
                err = hw->mac.ops.setup_sfp(hw);
@@ -1363,6 +1518,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        err = ixgbe_check_link(hw, &speed, &link_up, 0);
        if (err)
                goto error;
+       dev->data->dev_link.link_status = link_up;
+
        err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
        if (err)
                goto error;
@@ -1387,13 +1544,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                speed = IXGBE_LINK_SPEED_10GB_FULL;
                break;
        default:
-               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
-                               dev->data->dev_conf.link_speed,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+                            dev->data->dev_conf.link_speed,
+                            dev->data->port_id);
                goto error;
        }
 
-       err = ixgbe_setup_link(hw, speed, negotiate, link_up);
+       err = ixgbe_setup_link(hw, speed, link_up);
        if (err)
                goto error;
 
@@ -1413,10 +1570,10 @@ skip_link_setup:
        if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
                ixgbe_vmdq_vlan_hw_filter_enable(dev);
-       }       
+       }
 
        /* Configure DCB hw */
-       ixgbe_configure_dcb(dev); 
+       ixgbe_configure_dcb(dev);
 
        if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
                err = ixgbe_fdir_configure(dev);
@@ -1424,6 +1581,16 @@ skip_link_setup:
                        goto error;
        }
 
+       /* Restore vf rate limit */
+       if (vfinfo != NULL) {
+               for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+                       for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+                               if (vfinfo[vf].tx_rate[idx] != 0)
+                                       ixgbe_set_vf_rate_limit(dev, vf,
+                                               vfinfo[vf].tx_rate[idx],
+                                               1 << idx);
+       }
+
        ixgbe_restore_statistics_mapping(dev);
 
        return (0);
@@ -1443,7 +1610,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        struct rte_eth_link link;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_vf_info *vfinfo = 
+       struct ixgbe_vf_info *vfinfo =
                *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
        int vf;
 
@@ -1459,7 +1626,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        /* stop adapter */
        ixgbe_stop_adapter(hw);
 
-       for (vf = 0; vfinfo != NULL && 
+       for (vf = 0; vfinfo != NULL &&
                     vf < dev->pci_dev->max_vfs; vf++)
                vfinfo[vf].clear_to_send = false;
 
@@ -1468,11 +1635,68 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 
        ixgbe_dev_clear_queues(dev);
 
+       /* Clear stored conf */
+       dev->data->scattered_rx = 0;
+
        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
        rte_ixgbe_dev_atomic_write_link_status(dev, &link);
 }
 
+/*
+ * Set device link up: enable tx laser.
+ */
+static int
+ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+               if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+                       /* Not suported in bypass mode */
+                       PMD_INIT_LOG(ERR, "Set link up is not supported "
+                                    "by device id 0x%x", hw->device_id);
+                       return -ENOTSUP;
+               }
+#endif
+               /* Turn on the laser */
+               ixgbe_enable_tx_laser(hw);
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+                    hw->device_id);
+       return -ENOTSUP;
+}
+
+/*
+ * Set device link down: disable tx laser.
+ */
+static int
+ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+               if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+                       /* Not suported in bypass mode */
+                       PMD_INIT_LOG(ERR, "Set link down is not supported "
+                                    "by device id 0x%x", hw->device_id);
+                       return -ENOTSUP;
+               }
+#endif
+               /* Turn off the laser */
+               ixgbe_disable_tx_laser(hw);
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+                    hw->device_id);
+       return -ENOTSUP;
+}
+
 /*
  * Reest and stop device.
  */
@@ -1655,9 +1879,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->ierrors = total_missed_rx + hw_stats->crcerrs +
-               hw_stats->rlec;
-
+       stats->ibadcrc  = hw_stats->crcerrs;
+       stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+       stats->imissed  = total_missed_rx;
+       stats->ierrors  = stats->ibadcrc +
+                         stats->ibadlen +
+                         stats->imissed +
+                         hw_stats->illerrc + hw_stats->errbc;
+
+       /* Tx Errors */
        stats->oerrors  = 0;
 
        /* XON/XOFF pause frames */
@@ -1716,7 +1946,6 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        if (stats == NULL)
                return;
 
-       memset(stats, 0, sizeof(*stats));
        stats->ipackets = hw_stats->vfgprc;
        stats->ibytes = hw_stats->vfgorc;
        stats->opackets = hw_stats->vfgptc;
@@ -1758,6 +1987,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                dev_info->max_vmdq_pools = ETH_16_POOLS;
        else
                dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->vmdq_queue_num = dev_info->max_rx_queues;
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_VLAN_STRIP |
                DEV_RX_OFFLOAD_IPV4_CKSUM |
@@ -1768,7 +1998,81 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                DEV_TX_OFFLOAD_UDP_CKSUM   |
                DEV_TX_OFFLOAD_TCP_CKSUM   |
-               DEV_TX_OFFLOAD_SCTP_CKSUM;
+               DEV_TX_OFFLOAD_SCTP_CKSUM  |
+               DEV_TX_OFFLOAD_TCP_TSO;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
+       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+}
+
+static void
+ixgbevf_dev_info_get(struct rte_eth_dev *dev,
+                    struct rte_eth_dev_info *dev_info)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
+       dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
+       dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
+       dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+       dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+       dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
+       dev_info->max_vfs = dev->pci_dev->max_vfs;
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               dev_info->max_vmdq_pools = ETH_16_POOLS;
+       else
+               dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+                               DEV_RX_OFFLOAD_IPV4_CKSUM |
+                               DEV_RX_OFFLOAD_UDP_CKSUM  |
+                               DEV_RX_OFFLOAD_TCP_CKSUM;
+       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+                               DEV_TX_OFFLOAD_IPV4_CKSUM  |
+                               DEV_TX_OFFLOAD_UDP_CKSUM   |
+                               DEV_TX_OFFLOAD_TCP_CKSUM   |
+                               DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+       dev_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_RX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_RX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_RX_WTHRESH,
+               },
+               .rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
+               .rx_drop_en = 0,
+       };
+
+       dev_info->default_txconf = (struct rte_eth_txconf) {
+               .tx_thresh = {
+                       .pthresh = IXGBE_DEFAULT_TX_PTHRESH,
+                       .hthresh = IXGBE_DEFAULT_TX_HTHRESH,
+                       .wthresh = IXGBE_DEFAULT_TX_WTHRESH,
+               },
+               .tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
+               .tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
+               .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+                               ETH_TXQ_FLAGS_NOOFFLOADS,
+       };
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -1777,7 +2081,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_link link, old;
-       ixgbe_link_speed link_speed;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        int link_up;
        int diag;
 
@@ -1801,6 +2105,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                return 0;
        }
 
+       if (link_speed == IXGBE_LINK_SPEED_UNKNOWN &&
+           !hw->mac.get_link_status) {
+               memcpy(&link, &old, sizeof(link));
+               return -1;
+       }
+
        if (link_up == 0) {
                rte_ixgbe_dev_atomic_write_link_status(dev, &link);
                if (link.link_status == old.link_status)
@@ -1935,7 +2245,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        /* read-on-clear nic registers here */
        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
        PMD_DRV_LOG(INFO, "eicr %x", eicr);
-       
+
        intr->flags = 0;
        if (eicr & IXGBE_EICR_LSC) {
                /* set flag for async link update */
@@ -1999,14 +2309,14 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
        int64_t timeout;
        struct rte_eth_link link;
-       int intr_enable_delay = false;  
+       int intr_enable_delay = false;
 
-       PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+       PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
        if (intr->flags & IXGBE_FLAG_MAILBOX) {
                ixgbe_pf_mbx_process(dev);
                intr->flags &= ~IXGBE_FLAG_MAILBOX;
-       } 
+       }
 
        if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
                /* get the link status before link update, for predicting later */
@@ -2023,11 +2333,11 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                else
                        /* handle it 4 sec later, wait it being stable */
                        timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
-               
+
                ixgbe_dev_link_status_print(dev);
 
                intr_enable_delay = true;
-       } 
+       }
 
        if (intr_enable_delay) {
                if (rte_eal_alarm_set(timeout * 1000,
@@ -2038,7 +2348,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
                ixgbe_enable_intr(dev);
                rte_intr_enable(&(dev->pci_dev->intr_handle));
        }
-                       
+
 
        return 0;
 }
@@ -2078,7 +2388,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
        }
 
-       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        ixgbe_enable_intr(dev);
        rte_intr_enable(&(dev->pci_dev->intr_handle));
 }
@@ -2122,6 +2432,55 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }
 
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * MFLCN register.
+        */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * FCCFG register.
+        */
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
+
+       return 0;
+}
+
 static int
 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
@@ -2140,8 +2499,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
        /*
         * At least reserve one Ethernet frame for watermark
@@ -2150,8 +2511,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((fc_conf->high_water > max_high_water) ||
                (fc_conf->high_water < fc_conf->low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2183,7 +2544,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                return 0;
        }
 
-       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
        return -EIO;
 }
 
@@ -2193,7 +2554,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
  *  @tc_num: traffic class number
  *  Enable flow control according to the current settings.
  */
-static int 
+static int
 ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
 {
        int ret_val = 0;
@@ -2202,7 +2563,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        uint32_t fcrtl, fcrth;
        uint8_t i;
        uint8_t nb_rx_en;
-       
+
        /* Validate the water mark configuration */
        if (!hw->fc.pause_time) {
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
@@ -2213,13 +2574,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
                if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
+
                if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
@@ -2238,7 +2599,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        case ixgbe_fc_none:
                /*
                 * If the count of enabled RX Priority Flow control >1,
-                * and the TX pause can not be disabled 
+                * and the TX pause can not be disabled
                 */
                nb_rx_en = 0;
                for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
@@ -2285,7 +2646,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        default:
-               DEBUGOUT("Flow control param set incorrectly\n");
+               PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
@@ -2326,7 +2687,7 @@ out:
        return ret_val;
 }
 
-static int 
+static int
 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2338,7 +2699,7 @@ ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
        return ret_val;
 }
 
-static int 
+static int
 ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
 {
        int err;
@@ -2350,29 +2711,29 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
                 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_dcb_config *dcb_config =
                 IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-       
+
        enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
                ixgbe_fc_none,
                ixgbe_fc_rx_pause,
                ixgbe_fc_tx_pause,
                ixgbe_fc_full
        };
-       
+
        PMD_INIT_FUNC_TRACE();
-       
+
        ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
        tc_num = map[pfc_conf->priority];
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
        /*
         * At least reserve one Ethernet frame for watermark
         * high_water/low_water in kilo bytes for ixgbe
         */
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((pfc_conf->fc.high_water > max_high_water) ||
-               (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+           (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2381,51 +2742,55 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        hw->fc.send_xon = pfc_conf->fc.send_xon;
        hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
        hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
-               
+
        err = ixgbe_dcb_pfc_enable(dev,tc_num);
-       
+
        /* Not negotiated is not an error case */
-       if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) 
+       if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
                return 0;
 
-       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
        return -EIO;
-}      
+}
 
-static int 
+static int
 ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
-                               struct rte_eth_rss_reta *reta_conf)
-{      
-       uint8_t i,j,mask;
-       uint32_t reta;
-       struct ixgbe_hw *hw = 
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+                         struct rte_eth_rss_reta_entry64 *reta_conf,
+                         uint16_t reta_size)
+{
+       uint8_t i, j, mask;
+       uint32_t reta, r;
+       uint16_t idx, shift;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
-       /*  
-       * Update Redirection Table RETA[n],n=0...31,The redirection table has 
-       * 128-entries in 32 registers
-        */ 
-       for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < ETH_RSS_RETA_NUM_ENTRIES/2) 
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += IXGBE_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               IXGBE_4_BIT_MASK);
+               if (!mask)
+                       continue;
+               if (mask == IXGBE_4_BIT_MASK)
+                       r = 0;
                else
-                       mask = (uint8_t)((reta_conf->mask_hi >> 
-                               (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-               if (mask != 0) {
-                       reta = 0;
-                       if (mask != 0xF)
-                               reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-
-                       for (j = 0; j < 4; j++) {
-                               if (mask & (0x1 << j)) {
-                                       if (mask != 0xF)
-                                               reta &= ~(0xFF << 8 * j);
-                                       reta |= reta_conf->reta[i + j] << 8*j;
-                               }
-                       }
-                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2),reta);
+                       r = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               for (j = 0, reta = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+                       if (mask & (0x1 << j))
+                               reta |= reta_conf[idx].reta[shift + j] <<
+                                                       (CHAR_BIT * j);
+                       else
+                               reta |= r & (IXGBE_8_BIT_MASK <<
+                                               (CHAR_BIT * j));
                }
+               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
        }
 
        return 0;
@@ -2433,36 +2798,40 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
 
 static int
 ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
-                               struct rte_eth_rss_reta *reta_conf)
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
 {
-       uint8_t i,j,mask;
+       uint8_t i, j, mask;
        uint32_t reta;
-       struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+       uint16_t idx, shift;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        PMD_INIT_FUNC_TRACE();
-       /* 
-        * Read Redirection Table RETA[n],n=0...31,The redirection table has 
-        * 128-entries in 32 registers
-        */
-       for(i = 0; i < ETH_RSS_RETA_NUM_ENTRIES; i += 4) {
-               if (i < ETH_RSS_RETA_NUM_ENTRIES/2)
-                       mask = (uint8_t)((reta_conf->mask_lo >> i) & 0xF);
-               else
-                       mask = (uint8_t)((reta_conf->mask_hi >> 
-                               (i - ETH_RSS_RETA_NUM_ENTRIES/2)) & 0xF);
-
-               if (mask != 0) {
-                       reta = IXGBE_READ_REG(hw,IXGBE_RETA(i >> 2));
-                       for (j = 0; j < 4; j++) {
-                               if (mask & (0x1 << j))
-                                       reta_conf->reta[i + j] = 
-                                               (uint8_t)((reta >> 8 * j) & 0xFF);
-                       } 
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                               "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IXGBE_4_BIT_WIDTH) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)((reta_conf[idx].mask >> shift) &
+                                               IXGBE_4_BIT_MASK);
+               if (!mask)
+                       continue;
+
+               reta = IXGBE_READ_REG(hw, IXGBE_RETA(i >> 2));
+               for (j = 0; j < IXGBE_4_BIT_WIDTH; j++) {
+                       if (mask & (0x1 << j))
+                               reta_conf[idx].reta[shift + j] =
+                                       ((reta >> (CHAR_BIT * j)) &
+                                               IXGBE_8_BIT_MASK);
                }
        }
 
-       return 0;               
+       return 0;
 }
 
 static void
@@ -2483,13 +2852,59 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       ixgbe_dev_info_get(dev, &dev_info);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+       /* switch to jumbo mode if needed */
+       if (frame_size > ETHER_MAX_LEN) {
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+       } else {
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       maxfrs &= 0x0000FFFF;
+       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+       return 0;
+}
+
 /*
  * Virtual Function operations
  */
 static void
 ixgbevf_intr_disable(struct ixgbe_hw *hw)
 {
-       PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+       PMD_INIT_FUNC_TRACE();
 
        /* Clear interrupt mask to stop from interrupts being generated */
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
@@ -2502,8 +2917,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-       PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-               dev->data->port_id);
+       PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+                    dev->data->port_id);
 
        /*
         * VF has no ability to enable/disable HW CRC
@@ -2511,12 +2926,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 1;
        }
 #else
        if (conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 0;
        }
 #endif
@@ -2527,13 +2942,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 static int
 ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = 
+       struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int err, mask = 0;
-       
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+
+       PMD_INIT_FUNC_TRACE();
 
        hw->mac.ops.reset_hw(hw);
+       hw->mac.get_link_status = true;
 
        /* negotiate mailbox API version to use with the PF. */
        ixgbevf_negotiate_api(hw);
@@ -2543,11 +2959,11 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbevf_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
                ixgbe_dev_clear_queues(dev);
                return err;
        }
-       
+
        /* Set vfta */
        ixgbevf_set_vfta_all(dev,1);
 
@@ -2566,17 +2982,20 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
-               
+       PMD_INIT_FUNC_TRACE();
+
        hw->adapter_stopped = TRUE;
        ixgbe_stop_adapter(hw);
 
-       /* 
-         * Clear what we set, but we still keep shadow_vfta to 
+       /*
+         * Clear what we set, but we still keep shadow_vfta to
          * restore after device starts
          */
        ixgbevf_set_vfta_all(dev,0);
 
+       /* Clear stored conf */
+       dev->data->scattered_rx = 0;
+
        ixgbe_dev_clear_queues(dev);
 }
 
@@ -2585,7 +3004,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+       PMD_INIT_FUNC_TRACE();
 
        ixgbe_reset_hw(hw);
 
@@ -2626,7 +3045,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
        uint32_t vid_idx = 0;
        uint32_t vid_bit = 0;
        int ret = 0;
-       
+
        PMD_INIT_FUNC_TRACE();
 
        /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
@@ -2655,14 +3074,14 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
        uint32_t ctrl;
 
        PMD_INIT_FUNC_TRACE();
-       
+
        if(queue >= hw->mac.max_rx_queues)
                return;
 
        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
        if(on)
                ctrl |= IXGBE_RXDCTL_VME;
-       else 
+       else
                ctrl &= ~IXGBE_RXDCTL_VME;
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
 
@@ -2690,36 +3109,36 @@ static int
 ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
 {
        uint32_t reg_val;
-       
+
        /* we only need to do this if VMDq is enabled */
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
-               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
                return (-1);
        }
-       
+
        return 0;
 }
 
-static uint32_t 
+static uint32_t
 ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
 {
        uint32_t vector = 0;
        switch (hw->mac.mc_filter_type) {
        case 0:   /* use bits [47:36] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 4) | 
+               vector = ((uc_addr->addr_bytes[4] >> 4) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 4));
                break;
        case 1:   /* use bits [46:35] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 3) | 
+               vector = ((uc_addr->addr_bytes[4] >> 3) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 5));
                break;
        case 2:   /* use bits [45:34] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 2) | 
+               vector = ((uc_addr->addr_bytes[4] >> 2) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 6));
                break;
        case 3:   /* use bits [43:32] of the address */
-               vector = ((uc_addr->addr_bytes[4]) | 
+               vector = ((uc_addr->addr_bytes[4]) |
                        (((uint16_t)uc_addr->addr_bytes[5]) << 8));
                break;
        default:  /* Invalid mc_filter_type */
@@ -2731,7 +3150,7 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
        return vector;
 }
 
-static int 
+static int
 ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
                               uint8_t on)
 {
@@ -2744,24 +3163,24 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
        const uint32_t ixgbe_uta_bit_shift = 5;
        const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
        const uint32_t bit1 = 0x1;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ixgbe_uta_info *uta_info =
                IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
-       
+
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
                return (-ENOTSUP);
-       
+
        vector = ixgbe_uta_vector(hw,mac_addr);
        uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
        uta_shift = vector & ixgbe_uta_bit_mask;
-       
+
        rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
        if(rc == on)
                return 0;
-       
+
        reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
        if (on) {
                uta_info->uta_in_use++;
@@ -2772,15 +3191,15 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
                reg_val &= ~(bit1 << uta_shift);
                uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
        }
-       
+
        IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
-       
+
        if (uta_info->uta_in_use > 0)
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
                                IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
        else
                IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
-       
+
        return 0;
 }
 
@@ -2796,7 +3215,7 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
                return (-ENOTSUP);
-       
+
        if(on) {
                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
@@ -2809,44 +3228,55 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
                }
        }
        return 0;
-       
+
+}
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+       uint32_t new_val = orig_val;
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+               new_val |= IXGBE_VMOLR_AUPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+               new_val |= IXGBE_VMOLR_ROMPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+               new_val |= IXGBE_VMOLR_ROPE;
+       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+               new_val |= IXGBE_VMOLR_BAM;
+       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+               new_val |= IXGBE_VMOLR_MPE;
+
+       return new_val;
 }
+
 static int
 ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
                               uint16_t rx_mask, uint8_t on)
 {
        int val = 0;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-       
+
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-                       " on 82599 hardware and newer\n");
+                            " on 82599 hardware and newer");
                return (-ENOTSUP);
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
 
-       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG )
-               val |= IXGBE_VMOLR_AUPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC )
-               val |= IXGBE_VMOLR_ROMPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
-               val |= IXGBE_VMOLR_ROPE;
-       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
-               val |= IXGBE_VMOLR_BAM;
-       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
-               val |= IXGBE_VMOLR_MPE;
+       val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
 
        if (on)
                vmolr |= val;
-       else 
+       else
                vmolr &= ~val;
 
        IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-       
+
        return 0;
 }
 
@@ -2856,13 +3286,13 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        uint32_t reg,addr;
        uint32_t val;
        const uint8_t bit1 = 0x1;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
-       
+
        addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
        val = bit1 << pool;
@@ -2871,9 +3301,9 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                reg |= val;
        else
                reg &= ~val;
-       
+
        IXGBE_WRITE_REG(hw, addr,reg);
-       
+
        return 0;
 }
 
@@ -2883,13 +3313,13 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
        uint32_t reg,addr;
        uint32_t val;
        const uint8_t bit1 = 0x1;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
-       
+
        addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
        val = bit1 << pool;
@@ -2898,13 +3328,13 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
                reg |= val;
        else
                reg &= ~val;
-       
+
        IXGBE_WRITE_REG(hw, addr,reg);
-       
+
        return 0;
 }
 
-static int 
+static int
 ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
                        uint64_t pool_mask, uint8_t vlan_on)
 {
@@ -2912,14 +3342,14 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
        uint16_t pool_idx;
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       
+
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
-               if (pool_mask & ((uint64_t)(1ULL << pool_idx))) 
+               if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
                        ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
-                       if (ret < 0) 
-                               return ret;     
+                       if (ret < 0)
+                               return ret;
        }
 
        return ret;
@@ -2927,7 +3357,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
 
 static int
 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-                       struct rte_eth_vmdq_mirror_conf *mirror_conf, 
+                       struct rte_eth_vmdq_mirror_conf *mirror_conf,
                        uint8_t rule_id, uint8_t on)
 {
        uint32_t mr_ctl,vlvf;
@@ -2938,7 +3368,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        uint8_t i = 0;
        int reg_index = 0;
        uint64_t vlan_mask = 0;
-       
+
        const uint8_t pool_mask_offset = 32;
        const uint8_t vlan_mask_offset = 32;
        const uint8_t dst_pool_offset = 8;
@@ -2981,7 +3411,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
                if (on) {
                        mv_lsb = vlan_mask & 0xFFFFFFFF;
                        mv_msb = vlan_mask >> vlan_mask_offset;
-                       
+
                        mr_info->mr_conf[rule_id].vlan.vlan_mask =
                                                mirror_conf->vlan.vlan_mask;
                        for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
@@ -2999,23 +3429,23 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        }
 
        /*
-        * if enable pool mirror, write related pool mask register,if disable 
+        * if enable pool mirror, write related pool mask register,if disable
         * pool mirror, clear PFMRVM register
         */
        if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
-               if (on) { 
+               if (on) {
                        mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
                        mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
-                       mr_info->mr_conf[rule_id].pool_mask = 
+                       mr_info->mr_conf[rule_id].pool_mask =
                                        mirror_conf->pool_mask;
-                       
+
                } else {
                        mp_lsb = 0;
                        mp_msb = 0;
                        mr_info->mr_conf[rule_id].pool_mask = 0;
                }
        }
-       
+
        /* read  mirror control register and recalculate it */
        mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
 
@@ -3031,7 +3461,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 
        /* write mirrror control  register */
        IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-       
+
         /* write pool mirrror control  register */
        if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
                IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
@@ -3048,19 +3478,19 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        return 0;
 }
 
-static int 
+static int
 ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 {
        int mr_ctl = 0;
        uint32_t lsb_val = 0;
        uint32_t msb_val = 0;
        const uint8_t rule_mr_offset = 4;
-       
+
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_mirror_info *mr_info = 
+       struct ixgbe_mirror_info *mr_info =
                (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-       
+
        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
 
@@ -3081,6 +3511,717 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
        return 0;
 }
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+       uint16_t queue_idx, uint16_t tx_rate)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rf_dec, rf_int;
+       uint32_t bcnrc_val;
+       uint16_t link_speed = dev->data->dev_link.link_speed;
+
+       if (queue_idx >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+               rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+               rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+                               IXGBE_RTTBCNRC_RF_INT_MASK_M);
+               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       /*
+        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+        * set as 0x4.
+        */
+       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+                               IXGBE_MAX_JUMBO_FRAME_SIZE))
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_JUMBO_FRAME);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_DEFAULT);
+
+       /* Set RTTBCNRC of queue X */
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       uint32_t queue_stride =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+       uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+       uint16_t total_rate = 0;
+
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo != NULL) {
+               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else
+               return -EINVAL;
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /*
+                * Reset stored TX rate of the VF if it causes exceed
+                * link speed.
+                */
+               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+               return -EINVAL;
+       }
+
+       /* Set RTTBCNRC of each queue/pool for vf X  */
+       for (; queue_idx <= queue_end; queue_idx++) {
+               if (0x1 & q_msk)
+                       ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+               q_msk = q_msk >> 1;
+       }
+
+       return 0;
+}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+                    __attribute__((unused)) uint32_t index,
+                    __attribute__((unused)) uint32_t pool)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int diag;
+
+       /*
+        * On a 82599 VF, adding again the same MAC addr is not an idempotent
+        * operation. Trap this case to avoid exhausting the [very limited]
+        * set of PF resources used to store VF MAC addresses.
+        */
+       if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+               return;
+       diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+       if (diag == 0)
+               return;
+       PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+       struct ether_addr *mac_addr;
+       uint32_t i;
+       int diag;
+
+       /*
+        * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+        * not support the deletion of a given MAC address.
+        * Instead, it imposes to delete all MAC addresses, then to add again
+        * all MAC addresses with the exception of the one to be deleted.
+        */
+       (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+       /*
+        * Add again all MAC addresses, with the exception of the deleted one
+        * and of the permanent MAC address.
+        */
+       for (i = 0, mac_addr = dev->data->mac_addrs;
+            i < hw->mac.num_rar_entries; i++, mac_addr++) {
+               /* Skip the deleted MAC address */
+               if (i == index)
+                       continue;
+               /* Skip NULL MAC addresses */
+               if (is_zero_ether_addr(mac_addr))
+                       continue;
+               /* Skip the permanent MAC address */
+               if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+                       continue;
+               diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+               if (diag != 0)
+                       PMD_DRV_LOG(ERR,
+                                   "Adding again MAC address "
+                                   "%02x:%02x:%02x:%02x:%02x:%02x failed "
+                                   "diag=%d",
+                                   mac_addr->addr_bytes[0],
+                                   mac_addr->addr_bytes[1],
+                                   mac_addr->addr_bytes[2],
+                                   mac_addr->addr_bytes[3],
+                                   mac_addr->addr_bytes[4],
+                                   mac_addr->addr_bytes[5],
+                                   diag);
+       }
+}
+
+/*
+ * add syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       if (synqf & IXGBE_SYN_FILTER_ENABLE)
+               return -EINVAL;
+
+       synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+               IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+       if (filter->hig_pri)
+               synqf |= IXGBE_SYN_FILTER_SYNQFP;
+       else
+               synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       return 0;
+}
+
+/*
+ * remove syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       return 0;
+}
+
+/*
+ * get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer to the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t *rx_queue)
+
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+       if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+               filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+               *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+       if (protocol_value == IPPROTO_TCP)
+               return IXGBE_FILTER_PROTOCOL_TCP;
+       else if (protocol_value == IPPROTO_UDP)
+               return IXGBE_FILTER_PROTOCOL_UDP;
+       else if (protocol_value == IPPROTO_SCTP)
+               return IXGBE_FILTER_PROTOCOL_SCTP;
+       else
+               return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+static inline uint8_t
+revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
+{
+       if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
+               return IPPROTO_TCP;
+       else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
+               return IPPROTO_UDP;
+       else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
+               return IPPROTO_SCTP;
+       else
+               return 0;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t ftqf, sdpqf = 0;
+       uint32_t l34timir = 0;
+       uint8_t mask = 0xff;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS ||
+               rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < IXGBE_5TUPLE_MIN_PRI)
+               return -EINVAL;  /* filter index is out of range. */
+
+       if (filter->tcp_flags) {
+               PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
+               return -EINVAL;
+       }
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
+               return -EINVAL;  /* filter index is in use. */
+
+       ftqf = 0;
+       sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
+       sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
+
+       ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
+               IXGBE_FTQF_PROTOCOL_MASK);
+       ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
+               IXGBE_FTQF_PRIORITY_SHIFT);
+       if (filter->src_ip_mask == 0) /* 0 means compare. */
+               mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+       if (filter->dst_ip_mask == 0)
+               mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+       if (filter->src_port_mask == 0)
+               mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+       if (filter->dst_port_mask == 0)
+               mask &= IXGBE_FTQF_DEST_PORT_MASK;
+       if (filter->protocol_mask == 0)
+               mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+       ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+       ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+       ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
+
+       l34timir |= IXGBE_L34T_IMIR_RESERVE;
+       l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
+       return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       uint16_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS)
+               return -EINVAL;  /* filter index is out of range. */
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+       return 0;
+}
+
+/*
+ * get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer of the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t sdpqf, ftqf, l34timir;
+       uint8_t mask;
+       enum ixgbe_5tuple_protocol proto;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS)
+               return -EINVAL;  /* filter index is out of range. */
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
+               proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
+               filter->protocol = revert_protocol_type(proto);
+               filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
+                                       IXGBE_FTQF_PRIORITY_MASK;
+               mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
+                                       IXGBE_FTQF_5TUPLE_MASK_MASK);
+               filter->src_ip_mask =
+                       (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+               filter->dst_ip_mask =
+                       (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+               filter->src_port_mask =
+                       (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+               filter->dst_port_mask =
+                       (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
+               filter->protocol_mask =
+                       (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+
+               sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
+               filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
+                                       IXGBE_SDPQF_DSTPORT_SHIFT;
+               filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
+               filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
+               filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
+
+               l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
+               *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
+                                       IXGBE_L34T_IMIR_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct ixgbe_hw *hw;
+       uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       /*
+        * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+        * request of the version 2.0 of the mailbox API.
+        * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+        * of the mailbox API.
+        * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+        * prior to 3.11.33 which contains the following change:
+        * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+        */
+       ixgbevf_rlpml_set_vf(hw, max_frame);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+       return 0;
+}
+
+#define MAC_TYPE_FILTER_SUP(type)    do {\
+       if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+               (type) != ixgbe_mac_X550)\
+               return -ENOTSUP;\
+} while (0)
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+                       uint16_t ethertype)
+{
+       int i;
+
+       for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+               if (filter_info->ethertype_filters[i] == ethertype &&
+                   (filter_info->ethertype_mask & (1 << i)))
+                       return i;
+       }
+       return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+                       uint16_t ethertype)
+{
+       int i;
+
+       for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+               if (!(filter_info->ethertype_mask & (1 << i))) {
+                       filter_info->ethertype_mask |= 1 << i;
+                       filter_info->ethertype_filters[i] = ethertype;
+                       return i;
+               }
+       }
+       return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+                       uint8_t idx)
+{
+       if (idx >= IXGBE_MAX_ETQF_FILTERS)
+               return -1;
+       filter_info->ethertype_mask &= ~(1 << idx);
+       filter_info->ethertype_filters[idx] = 0;
+       return idx;
+}
+
+static int
+ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter,
+                       bool add)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       uint32_t etqf = 0;
+       uint32_t etqs = 0;
+       int ret;
+
+       if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       if (filter->ether_type == ETHER_TYPE_IPv4 ||
+               filter->ether_type == ETHER_TYPE_IPv6) {
+               PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+                       " ethertype filter.", filter->ether_type);
+               return -EINVAL;
+       }
+
+       if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+               PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+               return -EINVAL;
+       }
+       if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+               PMD_DRV_LOG(ERR, "drop option is unsupported.");
+               return -EINVAL;
+       }
+
+       ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+       if (ret >= 0 && add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+                           filter->ether_type);
+               return -EEXIST;
+       }
+       if (ret < 0 && !add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+                           filter->ether_type);
+               return -ENOENT;
+       }
+
+       if (add) {
+               ret = ixgbe_ethertype_filter_insert(filter_info,
+                       filter->ether_type);
+               if (ret < 0) {
+                       PMD_DRV_LOG(ERR, "ethertype filters are full.");
+                       return -ENOSYS;
+               }
+               etqf = IXGBE_ETQF_FILTER_EN;
+               etqf |= (uint32_t)filter->ether_type;
+               etqs |= (uint32_t)((filter->queue <<
+                                   IXGBE_ETQS_RX_QUEUE_SHIFT) &
+                                   IXGBE_ETQS_RX_QUEUE);
+               etqs |= IXGBE_ETQS_QUEUE_EN;
+       } else {
+               ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+               if (ret < 0)
+                       return -ENOSYS;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_filter_info *filter_info =
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+       uint32_t etqf, etqs;
+       int ret;
+
+       ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+                           filter->ether_type);
+               return -ENOENT;
+       }
+
+       etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
+       if (etqf & IXGBE_ETQF_FILTER_EN) {
+               etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
+               filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
+               filter->flags = 0;
+               filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
+                              IXGBE_ETQS_RX_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
+/*
+ * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+                               enum rte_filter_op filter_op,
+                               void *arg)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret;
+
+       MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+       if (filter_op == RTE_ETH_FILTER_NOP)
+               return 0;
+
+       if (arg == NULL) {
+               PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+                           filter_op);
+               return -EINVAL;
+       }
+
+       switch (filter_op) {
+       case RTE_ETH_FILTER_ADD:
+               ret = ixgbe_add_del_ethertype_filter(dev,
+                       (struct rte_eth_ethertype_filter *)arg,
+                       TRUE);
+               break;
+       case RTE_ETH_FILTER_DELETE:
+               ret = ixgbe_add_del_ethertype_filter(dev,
+                       (struct rte_eth_ethertype_filter *)arg,
+                       FALSE);
+               break;
+       case RTE_ETH_FILTER_GET:
+               ret = ixgbe_get_ethertype_filter(dev,
+                       (struct rte_eth_ethertype_filter *)arg);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+               ret = -EINVAL;
+               break;
+       }
+       return ret;
+}
+
+static int
+ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg)
+{
+       int ret = -EINVAL;
+
+       switch (filter_type) {
+       case RTE_ETH_FILTER_ETHERTYPE:
+               ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+                                                       filter_type);
+               break;
+       }
+
+       return ret;
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,