ixgbe: allow unsupported SFP
[dpdk.git] / lib / librte_pmd_ixgbe / ixgbe_ethdev.c
index eb31bd9..a147d46 100644 (file)
@@ -39,6 +39,7 @@
 #include <unistd.h>
 #include <stdarg.h>
 #include <inttypes.h>
+#include <netinet/in.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_cycles.h>
@@ -88,6 +89,8 @@
 #define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
 #define IXGBE_VMDQ_NUM_UC_MAC         4096 /* Maximum nb. of UC MAC addr. */
 
+#define IXGBE_MMW_SIZE_DEFAULT        0x4
+#define IXGBE_MMW_SIZE_JUMBO_FRAME    0x14
 
 #define IXGBEVF_PMD_NAME "rte_ixgbevf_pmd" /* PMD name */
 
@@ -98,6 +101,8 @@ static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_up(struct rte_eth_dev *dev);
+static int  ixgbe_dev_set_link_down(struct rte_eth_dev *dev);
 static void ixgbe_dev_close(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
@@ -114,6 +119,9 @@ static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
                                             uint8_t is_rx);
 static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
                                struct rte_eth_dev_info *dev_info);
+
+static int ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+
 static int ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
                uint16_t vlan_id, int on);
 static void ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid_id);
@@ -129,8 +137,10 @@ static void ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev);
 
 static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
 static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
-static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
-               struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_get(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
+static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+                              struct rte_eth_fc_conf *fc_conf);
 static int ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
                struct rte_eth_pfc_conf *pfc_conf);
 static int ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
@@ -183,10 +193,34 @@ static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
 static int ixgbe_mirror_rule_reset(struct rte_eth_dev *dev,
                uint8_t rule_id);
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+               uint16_t queue_idx, uint16_t tx_rate);
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+               uint16_t tx_rate, uint64_t q_msk);
+
 static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
                                 struct ether_addr *mac_addr,
                                 uint32_t index, uint32_t pool);
 static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
+static int ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_syn_filter(struct rte_eth_dev *dev);
+static int ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_ethertype_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index);
+static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_ethertype_filter *filter, uint16_t *rx_queue);
+static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t rx_queue);
+static int ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       uint16_t index);
+static int ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t *rx_queue);
+
+static int ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -252,6 +286,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .dev_configure        = ixgbe_dev_configure,
        .dev_start            = ixgbe_dev_start,
        .dev_stop             = ixgbe_dev_stop,
+       .dev_set_link_up    = ixgbe_dev_set_link_up,
+       .dev_set_link_down  = ixgbe_dev_set_link_down,
        .dev_close            = ixgbe_dev_close,
        .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
        .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
@@ -262,6 +298,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .stats_reset          = ixgbe_dev_stats_reset,
        .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbe_dev_mtu_set,
        .vlan_filter_set      = ixgbe_vlan_filter_set,
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
@@ -278,6 +315,7 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .tx_queue_release     = ixgbe_dev_tx_queue_release,
        .dev_led_on           = ixgbe_dev_led_on,
        .dev_led_off          = ixgbe_dev_led_off,
+       .flow_ctrl_get        = ixgbe_flow_ctrl_get,
        .flow_ctrl_set        = ixgbe_flow_ctrl_set,
        .priority_flow_ctrl_set = ixgbe_priority_flow_ctrl_set,
        .mac_addr_add         = ixgbe_add_rar,
@@ -290,6 +328,8 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .set_vf_rx            = ixgbe_set_pool_rx,
        .set_vf_tx            = ixgbe_set_pool_tx,
        .set_vf_vlan_filter   = ixgbe_set_pool_vlan_filter,
+       .set_queue_rate_limit = ixgbe_set_queue_rate_limit,
+       .set_vf_rate_limit    = ixgbe_set_vf_rate_limit,
        .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
        .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
        .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
@@ -313,6 +353,15 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
 #endif /* RTE_NIC_BYPASS */
        .rss_hash_update      = ixgbe_dev_rss_hash_update,
        .rss_hash_conf_get    = ixgbe_dev_rss_hash_conf_get,
+       .add_syn_filter          = ixgbe_add_syn_filter,
+       .remove_syn_filter       = ixgbe_remove_syn_filter,
+       .get_syn_filter          = ixgbe_get_syn_filter,
+       .add_ethertype_filter    = ixgbe_add_ethertype_filter,
+       .remove_ethertype_filter = ixgbe_remove_ethertype_filter,
+       .get_ethertype_filter    = ixgbe_get_ethertype_filter,
+       .add_5tuple_filter       = ixgbe_add_5tuple_filter,
+       .remove_5tuple_filter    = ixgbe_remove_5tuple_filter,
+       .get_5tuple_filter       = ixgbe_get_5tuple_filter,
 };
 
 /*
@@ -329,6 +378,7 @@ static struct eth_dev_ops ixgbevf_eth_dev_ops = {
        .stats_reset          = ixgbevf_dev_stats_reset,
        .dev_close            = ixgbevf_dev_close,
        .dev_infos_get        = ixgbe_dev_info_get,
+       .mtu_set              = ixgbevf_dev_set_mtu,
        .vlan_filter_set      = ixgbevf_vlan_filter_set,
        .vlan_strip_queue_set = ixgbevf_vlan_strip_queue_set,
        .vlan_offload_set     = ixgbevf_vlan_offload_set,
@@ -497,12 +547,13 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        if ((hw->mac.type != ixgbe_mac_82599EB) && (hw->mac.type != ixgbe_mac_X540))
                return -ENOSYS;
 
-       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
 
        n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
        if (n >= IXGBE_NB_STAT_MAPPING_REGS) {
-               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded\n");
+               PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
                return -EIO;
        }
        offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
@@ -522,19 +573,20 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        else
                stat_mappings->rqsmr[n] |= qsmr_mask;
 
-       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d\n"
-                    "%s[%d] = 0x%08x\n",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX", queue_id, stat_idx,
-                    is_rx ? "RQSMR" : "TQSM",n, is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
+       PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
+                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    queue_id, stat_idx);
+       PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+                    is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
 
        /* Now write the mapping in the appropriate register */
        if (is_rx) {
-               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to RX IXGBE stat mapping reg:%d",
                             stat_mappings->rqsmr[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
        }
        else {
-               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d\n",
+               PMD_INIT_LOG(INFO, "Write 0x%x to TX IXGBE stat mapping reg:%d",
                             stat_mappings->tqsm[n], n);
                IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
        }
@@ -617,7 +669,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
         */
        mask = IXGBE_GSSR_PHY0_SM << hw->bus.func;
        if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-                  DEBUGOUT1("SWFW phy%d lock released", hw->bus.func);
+               PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released", hw->bus.func);
        }
        ixgbe_release_swfw_semaphore(hw, mask);
 
@@ -629,7 +681,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
         */
        mask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM;
        if (ixgbe_acquire_swfw_semaphore(hw, mask) < 0) {
-                  DEBUGOUT("SWFW common locks released");
+               PMD_DRV_LOG(DEBUG, "SWFW common locks released");
        }
        ixgbe_release_swfw_semaphore(hw, mask);
 }
@@ -675,11 +727,9 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
-#ifdef RTE_LIBRTE_IXGBE_ALLOW_UNSUPPORTED_SFP
        hw->allow_unsupported_sfp = 1;
-#endif
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
 #ifdef RTE_NIC_BYPASS
        diag = ixgbe_bypass_init_shared_code(hw);
 #else
@@ -741,11 +791,12 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        if (diag == IXGBE_ERR_EEPROM_VERSION) {
                PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
                    "LOM.  Please be aware there may be issues associated "
-                   "with your hardware.\n If you are experiencing problems "
+                   "with your hardware.");
+               PMD_INIT_LOG(ERR, "If you are experiencing problems "
                    "please contact your Intel or hardware representative "
-                   "who provided you with this hardware.\n");
+                   "who provided you with this hardware.");
        } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
        if (diag) {
                PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
                return -EIO;
@@ -799,12 +850,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        IXGBE_WRITE_FLUSH(hw);
 
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-               PMD_INIT_LOG(DEBUG,
-                            "MAC: %d, PHY: %d, SFP+: %d<n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d, SFP+: %d",
                             (int) hw->mac.type, (int) hw->phy.type,
                             (int) hw->phy.sfp_type);
        else
-               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d",
                             (int) hw->mac.type, (int) hw->phy.type);
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -883,7 +933,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
        struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
 
-       PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+       PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
        eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
@@ -910,7 +960,7 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
        /* initialize the hw strip bitmap*/
        memset(hwstrip, 0, sizeof(*hwstrip));
 
-       /* Initialize the shared code */
+       /* Initialize the shared code (base driver) */
        diag = ixgbe_init_shared_code(hw);
        if (diag != IXGBE_SUCCESS) {
                PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
@@ -962,16 +1012,15 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        eth_dev->data->mac_addrs = NULL;
                        return diag;
                }
-               RTE_LOG(INFO, PMD,
-                       "\tVF MAC address not assigned by Host PF\n"
-                       "\tAssign randomly generated MAC address "
-                       "%02x:%02x:%02x:%02x:%02x:%02x\n",
-                       perm_addr->addr_bytes[0],
-                       perm_addr->addr_bytes[1],
-                       perm_addr->addr_bytes[2],
-                       perm_addr->addr_bytes[3],
-                       perm_addr->addr_bytes[4],
-                       perm_addr->addr_bytes[5]);
+               PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
+               PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
+                            "%02x:%02x:%02x:%02x:%02x:%02x",
+                            perm_addr->addr_bytes[0],
+                            perm_addr->addr_bytes[1],
+                            perm_addr->addr_bytes[2],
+                            perm_addr->addr_bytes[3],
+                            perm_addr->addr_bytes[4],
+                            perm_addr->addr_bytes[5]);
        }
 
        /* Copy the permanent MAC address */
@@ -988,9 +1037,9 @@ eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
                        return (-EIO);
        }
 
-       PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
-                        eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
-                        "ixgbe_mac_82599_vf");
+       PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id, "ixgbe_mac_82599_vf");
 
        return 0;
 }
@@ -999,7 +1048,7 @@ static struct eth_driver rte_ixgbe_pmd = {
        {
                .name = "rte_ixgbe_pmd",
                .id_table = pci_id_ixgbe_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
        .eth_dev_init = eth_ixgbe_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -1012,7 +1061,7 @@ static struct eth_driver rte_ixgbevf_pmd = {
        {
                .name = "rte_ixgbevf_pmd",
                .id_table = pci_id_ixgbevf_map,
-               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
        .eth_dev_init = eth_ixgbevf_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
@@ -1040,7 +1089,7 @@ rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unuse
 static int
 rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
 {
-       DEBUGFUNC("rte_ixgbevf_pmd_init");
+       PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
        return (0);
@@ -1355,19 +1404,22 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
        int status;
+       uint16_t vf, idx;
 
        PMD_INIT_FUNC_TRACE();
 
        /* IXGBE devices don't support half duplex */
        if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
                        (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
-               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
-                               dev->data->dev_conf.link_duplex,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
+                            dev->data->dev_conf.link_duplex,
+                            dev->data->port_id);
                return -EINVAL;
        }
 
@@ -1391,7 +1443,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbe_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
                goto error;
        }
 
@@ -1438,13 +1490,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                speed = IXGBE_LINK_SPEED_10GB_FULL;
                break;
        default:
-               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu\n",
-                               dev->data->dev_conf.link_speed,
-                               dev->data->port_id);
+               PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
+                            dev->data->dev_conf.link_speed,
+                            dev->data->port_id);
                goto error;
        }
 
-       err = ixgbe_setup_link(hw, speed, negotiate, link_up);
+       err = ixgbe_setup_link(hw, speed, link_up);
        if (err)
                goto error;
 
@@ -1475,6 +1527,16 @@ skip_link_setup:
                        goto error;
        }
 
+       /* Restore vf rate limit */
+       if (vfinfo != NULL) {
+               for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+                       for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+                               if (vfinfo[vf].tx_rate[idx] != 0)
+                                       ixgbe_set_vf_rate_limit(dev, vf,
+                                               vfinfo[vf].tx_rate[idx],
+                                               1 << idx);
+       }
+
        ixgbe_restore_statistics_mapping(dev);
 
        return (0);
@@ -1524,6 +1586,60 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        rte_ixgbe_dev_atomic_write_link_status(dev, &link);
 }
 
+/*
+ * Set device link up: enable tx laser.
+ */
+static int
+ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+               if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+                       /* Not suported in bypass mode */
+                       PMD_INIT_LOG(ERR, "Set link up is not supported "
+                                    "by device id 0x%x", hw->device_id);
+                       return -ENOTSUP;
+               }
+#endif
+               /* Turn on the laser */
+               ixgbe_enable_tx_laser(hw);
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "Set link up is not supported by device id 0x%x",
+                    hw->device_id);
+       return -ENOTSUP;
+}
+
+/*
+ * Set device link down: disable tx laser.
+ */
+static int
+ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+#ifdef RTE_NIC_BYPASS
+               if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
+                       /* Not suported in bypass mode */
+                       PMD_INIT_LOG(ERR, "Set link down is not supported "
+                                    "by device id 0x%x", hw->device_id);
+                       return -ENOTSUP;
+               }
+#endif
+               /* Turn off the laser */
+               ixgbe_disable_tx_laser(hw);
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "Set link down is not supported by device id 0x%x",
+                    hw->device_id);
+       return -ENOTSUP;
+}
+
 /*
  * Reest and stop device.
  */
@@ -1706,9 +1822,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->ierrors = total_missed_rx + hw_stats->crcerrs +
-               hw_stats->rlec;
-
+       stats->ibadcrc  = hw_stats->crcerrs;
+       stats->ibadlen  = hw_stats->rlec + hw_stats->ruc + hw_stats->roc;
+       stats->imissed  = total_missed_rx;
+       stats->ierrors  = stats->ibadcrc +
+                         stats->ibadlen +
+                         stats->imissed +
+                         hw_stats->illerrc + hw_stats->errbc;
+
+       /* Tx Errors */
        stats->oerrors  = 0;
 
        /* XON/XOFF pause frames */
@@ -2052,7 +2174,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
        struct rte_eth_link link;
        int intr_enable_delay = false;
 
-       PMD_DRV_LOG(DEBUG, "intr action type %d\n", intr->flags);
+       PMD_DRV_LOG(DEBUG, "intr action type %d", intr->flags);
 
        if (intr->flags & IXGBE_FLAG_MAILBOX) {
                ixgbe_pf_mbx_process(dev);
@@ -2129,7 +2251,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
        }
 
-       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]\n", eicr);
+       PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        ixgbe_enable_intr(dev);
        rte_intr_enable(&(dev->pci_dev->intr_handle));
 }
@@ -2173,6 +2295,55 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
        return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }
 
+static int
+ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * MFLCN register.
+        */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       if (mflcn_reg & (IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_RFCE))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * FCCFG register.
+        */
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       if (fccfg_reg & (IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
+
+       return 0;
+}
+
 static int
 ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
@@ -2191,8 +2362,10 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        PMD_INIT_FUNC_TRACE();
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
+               return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
 
        /*
         * At least reserve one Ethernet frame for watermark
@@ -2201,8 +2374,8 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((fc_conf->high_water > max_high_water) ||
                (fc_conf->high_water < fc_conf->low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2234,7 +2407,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                return 0;
        }
 
-       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x", err);
        return -EIO;
 }
 
@@ -2264,13 +2437,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
        if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
                 /* High/Low water can not be 0 */
                if( (!hw->fc.high_water[tc_num])|| (!hw->fc.low_water[tc_num])) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
 
                if(hw->fc.low_water[tc_num] >= hw->fc.high_water[tc_num]) {
-                       PMD_INIT_LOG(ERR,"Invalid water mark configuration\n");
+                       PMD_INIT_LOG(ERR, "Invalid water mark configuration");
                        ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                        goto out;
                }
@@ -2336,7 +2509,7 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
                fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
                break;
        default:
-               DEBUGOUT("Flow control param set incorrectly\n");
+               PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
@@ -2414,16 +2587,16 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_RX_CONFIG, map);
        tc_num = map[pfc_conf->priority];
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc_num));
-       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
        /*
         * At least reserve one Ethernet frame for watermark
         * high_water/low_water in kilo bytes for ixgbe
         */
        max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
        if ((pfc_conf->fc.high_water > max_high_water) ||
-               (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
-               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
-               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+           (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
                return (-EINVAL);
        }
 
@@ -2439,7 +2612,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
        if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
                return 0;
 
-       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x \n", err);
+       PMD_INIT_LOG(ERR, "ixgbe_dcb_pfc_enable = 0x%x", err);
        return -EIO;
 }
 
@@ -2534,13 +2707,59 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
        ixgbe_clear_rar(hw, index);
 }
 
+static int
+ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       ixgbe_dev_info_get(dev, &dev_info);
+
+       /* check that mtu is within the allowed range */
+       if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+
+       /* switch to jumbo mode if needed */
+       if (frame_size > ETHER_MAX_LEN) {
+               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+       } else {
+               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+       maxfrs &= 0x0000FFFF;
+       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+
+       return 0;
+}
+
 /*
  * Virtual Function operations
  */
 static void
 ixgbevf_intr_disable(struct ixgbe_hw *hw)
 {
-       PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+       PMD_INIT_FUNC_TRACE();
 
        /* Clear interrupt mask to stop from interrupts being generated */
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
@@ -2553,8 +2772,8 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
        struct rte_eth_conf* conf = &dev->data->dev_conf;
 
-       PMD_INIT_LOG(DEBUG, "\nConfigured Virtual Function port id: %d\n",
-               dev->data->port_id);
+       PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
+                    dev->data->port_id);
 
        /*
         * VF has no ability to enable/disable HW CRC
@@ -2562,12 +2781,12 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
         */
 #ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
        if (!conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 1;
        }
 #else
        if (conf->rxmode.hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip\n");
+               PMD_INIT_LOG(INFO, "VF can't enable HW CRC Strip");
                conf->rxmode.hw_strip_crc = 0;
        }
 #endif
@@ -2582,7 +2801,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        int err, mask = 0;
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+       PMD_INIT_FUNC_TRACE();
 
        hw->mac.ops.reset_hw(hw);
 
@@ -2594,7 +2813,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
        /* This can fail when allocating mbufs for descriptor rings */
        err = ixgbevf_dev_rx_init(dev);
        if (err) {
-               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)\n", err);
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware (%d)", err);
                ixgbe_dev_clear_queues(dev);
                return err;
        }
@@ -2617,7 +2836,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+       PMD_INIT_FUNC_TRACE();
 
        hw->adapter_stopped = TRUE;
        ixgbe_stop_adapter(hw);
@@ -2636,7 +2855,7 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_close");
+       PMD_INIT_FUNC_TRACE();
 
        ixgbe_reset_hw(hw);
 
@@ -2745,7 +2964,7 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
        /* we only need to do this if VMDq is enabled */
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
-               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting\n");
+               PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
                return (-1);
        }
 
@@ -2874,7 +3093,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
 
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-                       " on 82599 hardware and newer\n");
+                            " on 82599 hardware and newer");
                return (-ENOTSUP);
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
@@ -3132,6 +3351,108 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
        return 0;
 }
 
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+       uint16_t queue_idx, uint16_t tx_rate)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rf_dec, rf_int;
+       uint32_t bcnrc_val;
+       uint16_t link_speed = dev->data->dev_link.link_speed;
+
+       if (queue_idx >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (tx_rate != 0) {
+               /* Calculate the rate factor values to set */
+               rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+               rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+               rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+               bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+               bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+                               IXGBE_RTTBCNRC_RF_INT_MASK_M);
+               bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       /*
+        * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+        * set as 0x4.
+        */
+       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+                               IXGBE_MAX_JUMBO_FRAME_SIZE))
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_JUMBO_FRAME);
+       else
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+                       IXGBE_MMW_SIZE_DEFAULT);
+
+       /* Set RTTBCNRC of queue X */
+       IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+       IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vf_info *vfinfo =
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       uint32_t queue_stride =
+               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+       uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+       uint16_t total_rate = 0;
+
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo != NULL) {
+               for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else
+               return -EINVAL;
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /*
+                * Reset stored TX rate of the VF if it causes exceed
+                * link speed.
+                */
+               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+               return -EINVAL;
+       }
+
+       /* Set RTTBCNRC of each queue/pool for vf X  */
+       for (; queue_idx <= queue_end; queue_idx++) {
+               if (0x1 & q_msk)
+                       ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+               q_msk = q_msk >> 1;
+       }
+
+       return 0;
+}
+
 static void
 ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                     __attribute__((unused)) uint32_t index,
@@ -3201,6 +3522,452 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
        }
 }
 
+/*
+ * add syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       if (synqf & IXGBE_SYN_FILTER_ENABLE)
+               return -EINVAL;
+
+       synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+               IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+       if (filter->hig_pri)
+               synqf |= IXGBE_SYN_FILTER_SYNQFP;
+       else
+               synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       return 0;
+}
+
+/*
+ * remove syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+       synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+
+       IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+       return 0;
+}
+
+/*
+ * get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer to the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+                       struct rte_syn_filter *filter, uint16_t *rx_queue)
+
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t synqf;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+       if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+               filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+               *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+/*
+ * add an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index, struct rte_ethertype_filter *filter,
+                       uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t etqf, etqs = 0;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_ETQF_FILTERS ||
+               rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+       if (etqf & IXGBE_ETQF_FILTER_EN)
+               return -EINVAL;  /* filter index is in use. */
+
+       etqf = 0;
+       etqf |= IXGBE_ETQF_FILTER_EN;
+       etqf |= (uint32_t)filter->ethertype;
+
+       if (filter->priority_en) {
+               if (filter->priority > IXGBE_ETQF_MAX_PRI)
+                       return -EINVAL;
+               etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP);
+               etqf |= IXGBE_ETQF_UP_EN;
+       }
+       etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE);
+       etqs |= IXGBE_ETQS_QUEUE_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs);
+       return 0;
+}
+
+/*
+ * remove an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_ETQF_FILTERS)
+               return -EINVAL;
+
+       IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0);
+
+       return 0;
+}
+
+/*
+ * get an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be gotten.
+ * *rx_queue: the ponited of the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+                       uint16_t index, struct rte_ethertype_filter *filter,
+                       uint16_t *rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t etqf, etqs;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_ETQF_FILTERS)
+               return -EINVAL;
+
+       etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+       etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index));
+       if (etqf & IXGBE_ETQF_FILTER_EN) {
+               filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE;
+               filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0;
+               if (filter->priority_en)
+                       filter->priority = (etqf & IXGBE_ETQF_UP) >> 16;
+               *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+       if (protocol_value == IPPROTO_TCP)
+               return IXGBE_FILTER_PROTOCOL_TCP;
+       else if (protocol_value == IPPROTO_UDP)
+               return IXGBE_FILTER_PROTOCOL_UDP;
+       else if (protocol_value == IPPROTO_SCTP)
+               return IXGBE_FILTER_PROTOCOL_SCTP;
+       else
+               return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+static inline uint8_t
+revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
+{
+       if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
+               return IPPROTO_TCP;
+       else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
+               return IPPROTO_UDP;
+       else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
+               return IPPROTO_SCTP;
+       else
+               return 0;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t ftqf, sdpqf = 0;
+       uint32_t l34timir = 0;
+       uint8_t mask = 0xff;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS ||
+               rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < IXGBE_5TUPLE_MIN_PRI)
+               return -EINVAL;  /* filter index is out of range. */
+
+       if (filter->tcp_flags) {
+               PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
+               return -EINVAL;
+       }
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
+               return -EINVAL;  /* filter index is in use. */
+
+       ftqf = 0;
+       sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
+       sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
+
+       ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
+               IXGBE_FTQF_PROTOCOL_MASK);
+       ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
+               IXGBE_FTQF_PRIORITY_SHIFT);
+       if (filter->src_ip_mask == 0) /* 0 means compare. */
+               mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+       if (filter->dst_ip_mask == 0)
+               mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+       if (filter->src_port_mask == 0)
+               mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+       if (filter->dst_port_mask == 0)
+               mask &= IXGBE_FTQF_DEST_PORT_MASK;
+       if (filter->protocol_mask == 0)
+               mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+       ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+       ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+       ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
+
+       l34timir |= IXGBE_L34T_IMIR_RESERVE;
+       l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
+       return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       uint16_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS)
+               return -EINVAL;  /* filter index is out of range. */
+
+       IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+       return 0;
+}
+
+/*
+ * get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer of the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+                       struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t sdpqf, ftqf, l34timir;
+       uint8_t mask;
+       enum ixgbe_5tuple_protocol proto;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       if (index >= IXGBE_MAX_FTQF_FILTERS)
+               return -EINVAL;  /* filter index is out of range. */
+
+       ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+       if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
+               proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
+               filter->protocol = revert_protocol_type(proto);
+               filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
+                                       IXGBE_FTQF_PRIORITY_MASK;
+               mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
+                                       IXGBE_FTQF_5TUPLE_MASK_MASK);
+               filter->src_ip_mask =
+                       (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+               filter->dst_ip_mask =
+                       (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+               filter->src_port_mask =
+                       (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+               filter->dst_port_mask =
+                       (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
+               filter->protocol_mask =
+                       (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+
+               sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
+               filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
+                                       IXGBE_SDPQF_DSTPORT_SHIFT;
+               filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
+               filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
+               filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
+
+               l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
+               *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
+                                       IXGBE_L34T_IMIR_QUEUE_SHIFT;
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct ixgbe_hw *hw;
+       uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+               return -EINVAL;
+
+       /* refuse mtu that requires the support of scattered packets when this
+        * feature has not been enabled before. */
+       if (!dev->data->scattered_rx &&
+           (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+               return -EINVAL;
+
+       /*
+        * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+        * request of the version 2.0 of the mailbox API.
+        * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+        * of the mailbox API.
+        * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+        * prior to 3.11.33 which contains the following change:
+        * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+        */
+       ixgbevf_rlpml_set_vf(hw, max_frame);
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+       return 0;
+}
+
 static struct rte_driver rte_ixgbe_driver = {
        .type = PMD_PDEV,
        .init = rte_ixgbe_pmd_init,