ethdev: make driver-only headers private
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index 5515fb3..23f9d17 100644 (file)
@@ -7,7 +7,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <rte_common.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
 
 #include <rte_interrupts.h>
 #include <rte_log.h>
 #include "base/txgbe.h"
 #include "txgbe_ethdev.h"
 #include "txgbe_rxtx.h"
+#include "txgbe_regs_group.h"
+
+static const struct reg_info txgbe_regs_general[] = {
+       {TXGBE_RST, 1, 1, "TXGBE_RST"},
+       {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
+       {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
+       {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
+       {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
+       {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_nvm[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_interrupt[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_fctl_others[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_rxdma[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_rx[] = {
+       {0, 0, 0, ""}
+};
+
+static struct reg_info txgbe_regs_tx[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_wakeup[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_dcb[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_mac[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_diagnostic[] = {
+       {0, 0, 0, ""},
+};
 
+/* PF registers */
+static const struct reg_info *txgbe_regs_others[] = {
+                               txgbe_regs_general,
+                               txgbe_regs_nvm,
+                               txgbe_regs_interrupt,
+                               txgbe_regs_fctl_others,
+                               txgbe_regs_rxdma,
+                               txgbe_regs_rx,
+                               txgbe_regs_tx,
+                               txgbe_regs_wakeup,
+                               txgbe_regs_dcb,
+                               txgbe_regs_mac,
+                               txgbe_regs_diagnostic,
+                               NULL};
+
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
 static int txgbe_dev_close(struct rte_eth_dev *dev);
@@ -43,6 +113,9 @@ static void txgbe_dev_interrupt_handler(void *param);
 static void txgbe_dev_interrupt_delayed_handler(void *param);
 static void txgbe_configure_msix(struct rte_eth_dev *dev);
 
+static int txgbe_filter_restore(struct rte_eth_dev *dev);
+static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
+
 #define TXGBE_SET_HWSTRIP(h, q) do {\
                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
@@ -336,6 +409,43 @@ txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
+static void
+txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
+{
+       int i;
+       u8 bwgp;
+       struct txgbe_dcb_tc_config *tc;
+
+       UNREFERENCED_PARAMETER(hw);
+
+       dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
+       dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
+       bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
+       for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
+               tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
+               tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
+               tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
+               tc->pfc = txgbe_dcb_pfc_disabled;
+       }
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc = &dcb_config->tc_config[0];
+       tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+       for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
+               dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
+               dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
+       }
+       dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
+       dcb_config->pfc_mode_enable = false;
+       dcb_config->vt_mode = true;
+       dcb_config->round_robin_enable = false;
+       /* support all DCB capabilities */
+       dcb_config->support.capabilities = 0xFF;
+}
+
 /*
  * Ensure that all locks are released before first NVM or PHY access
  */
@@ -366,15 +476,21 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
        struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+       struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        const struct rte_memzone *mz;
        uint32_t ctrl_ext;
        uint16_t csum;
-       int err;
+       int err, i, ret;
 
        PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &txgbe_eth_dev_ops;
+       eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
+       eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
+       eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
        eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
        eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
        eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
@@ -431,6 +547,26 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* Unlock any pending hardware semaphore */
        txgbe_swfw_lock_reset(hw);
 
+#ifdef RTE_LIB_SECURITY
+       /* Initialize security_ctx only for primary process*/
+       if (txgbe_ipsec_ctx_create(eth_dev))
+               return -ENOMEM;
+#endif
+
+       /* Initialize DCB configuration*/
+       memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
+       txgbe_dcb_init(hw, dcb_config);
+
+       /* Get Hardware Flow Control setting */
+       hw->fc.requested_mode = txgbe_fc_full;
+       hw->fc.current_mode = txgbe_fc_full;
+       hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
+       for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+               hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
+               hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
+       }
+       hw->fc.send_xon = 1;
+
        err = hw->rom.init_params(hw);
        if (err != 0) {
                PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
@@ -517,7 +653,14 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        memset(hwstrip, 0, sizeof(*hwstrip));
 
        /* initialize PF if max_vfs not zero */
-       txgbe_pf_host_init(eth_dev);
+       ret = txgbe_pf_host_init(eth_dev);
+       if (ret) {
+               rte_free(eth_dev->data->mac_addrs);
+               eth_dev->data->mac_addrs = NULL;
+               rte_free(eth_dev->data->hash_mac_addrs);
+               eth_dev->data->hash_mac_addrs = NULL;
+               return ret;
+       }
 
        ctrl_ext = rd32(hw, TXGBE_PORTCTL);
        /* let hardware know driver is loaded */
@@ -548,6 +691,28 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* enable support intr */
        txgbe_enable_intr(eth_dev);
 
+       /* initialize filter info */
+       memset(filter_info, 0,
+              sizeof(struct txgbe_filter_info));
+
+       /* initialize 5tuple filter list */
+       TAILQ_INIT(&filter_info->fivetuple_list);
+
+       /* initialize flow director filter list & hash */
+       txgbe_fdir_filter_init(eth_dev);
+
+       /* initialize l2 tunnel filter list & hash */
+       txgbe_l2_tn_filter_init(eth_dev);
+
+       /* initialize flow filter lists */
+       txgbe_filterlist_init();
+
+       /* initialize bandwidth configuration info */
+       memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
+
+       /* initialize Traffic Manager configuration */
+       txgbe_tm_conf_init(eth_dev);
+
        return 0;
 }
 
@@ -564,6 +729,135 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+       struct txgbe_5tuple_filter *p_5tuple;
+
+       while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+               TAILQ_REMOVE(&filter_info->fivetuple_list,
+                            p_5tuple,
+                            entries);
+               rte_free(p_5tuple);
+       }
+       memset(filter_info->fivetuple_mask, 0,
+              sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
+
+       return 0;
+}
+
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+       struct txgbe_fdir_filter *fdir_filter;
+
+       if (fdir_info->hash_map)
+               rte_free(fdir_info->hash_map);
+       if (fdir_info->hash_handle)
+               rte_hash_free(fdir_info->hash_handle);
+
+       while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+               TAILQ_REMOVE(&fdir_info->fdir_list,
+                            fdir_filter,
+                            entries);
+               rte_free(fdir_filter);
+       }
+
+       return 0;
+}
+
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+
+       if (l2_tn_info->hash_map)
+               rte_free(l2_tn_info->hash_map);
+       if (l2_tn_info->hash_handle)
+               rte_hash_free(l2_tn_info->hash_handle);
+
+       while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+               TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+                            l2_tn_filter,
+                            entries);
+               rte_free(l2_tn_filter);
+       }
+
+       return 0;
+}
+
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+       char fdir_hash_name[RTE_HASH_NAMESIZE];
+       struct rte_hash_parameters fdir_hash_params = {
+               .name = fdir_hash_name,
+               .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+               .key_len = sizeof(struct txgbe_atr_input),
+               .hash_func = rte_hash_crc,
+               .hash_func_init_val = 0,
+               .socket_id = rte_socket_id(),
+       };
+
+       TAILQ_INIT(&fdir_info->fdir_list);
+       snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+                "fdir_%s", TDEV_NAME(eth_dev));
+       fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+       if (!fdir_info->hash_handle) {
+               PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+               return -EINVAL;
+       }
+       fdir_info->hash_map = rte_zmalloc("txgbe",
+                                         sizeof(struct txgbe_fdir_filter *) *
+                                         TXGBE_MAX_FDIR_FILTER_NUM,
+                                         0);
+       if (!fdir_info->hash_map) {
+               PMD_INIT_LOG(ERR,
+                            "Failed to allocate memory for fdir hash map!");
+               return -ENOMEM;
+       }
+       fdir_info->mask_added = FALSE;
+
+       return 0;
+}
+
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+       char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+       struct rte_hash_parameters l2_tn_hash_params = {
+               .name = l2_tn_hash_name,
+               .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
+               .key_len = sizeof(struct txgbe_l2_tn_key),
+               .hash_func = rte_hash_crc,
+               .hash_func_init_val = 0,
+               .socket_id = rte_socket_id(),
+       };
+
+       TAILQ_INIT(&l2_tn_info->l2_tn_list);
+       snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+                "l2_tn_%s", TDEV_NAME(eth_dev));
+       l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+       if (!l2_tn_info->hash_handle) {
+               PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+               return -EINVAL;
+       }
+       l2_tn_info->hash_map = rte_zmalloc("txgbe",
+                                  sizeof(struct txgbe_l2_tn_filter *) *
+                                  TXGBE_MAX_L2_TN_FILTER_NUM,
+                                  0);
+       if (!l2_tn_info->hash_map) {
+               PMD_INIT_LOG(ERR,
+                       "Failed to allocate memory for L2 TN hash map!");
+               return -ENOMEM;
+       }
+       l2_tn_info->e_tag_en = FALSE;
+       l2_tn_info->e_tag_fwd_en = FALSE;
+       l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
+
+       return 0;
+}
+
 static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                struct rte_pci_device *pci_dev)
@@ -1260,6 +1554,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        int status;
        uint16_t vf, idx;
        uint32_t *link_speeds;
+       struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1346,6 +1641,17 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                txgbe_vmdq_vlan_hw_filter_enable(dev);
        }
 
+       /* Configure DCB hw */
+       txgbe_configure_pb(dev);
+       txgbe_configure_port(dev);
+       txgbe_configure_dcb(dev);
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+               err = txgbe_fdir_configure(dev);
+               if (err)
+                       goto error;
+       }
+
        /* Restore vf rate limit */
        if (vfinfo != NULL) {
                for (vf = 0; vf < pci_dev->max_vfs; vf++)
@@ -1449,6 +1755,13 @@ skip_link_setup:
 
        /* resume enabled intr since hw reset */
        txgbe_enable_intr(dev);
+       txgbe_l2_tunnel_conf(dev);
+       txgbe_filter_restore(dev);
+
+       if (tm_conf->root && !tm_conf->committed)
+               PMD_DRV_LOG(WARNING,
+                           "please call hierarchy_commit() "
+                           "before starting the port");
 
        /*
         * Update link status right before return, because it may
@@ -1476,11 +1789,13 @@ static int
 txgbe_dev_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        int vf;
+       struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
        if (hw->adapter_stopped)
                return 0;
@@ -1533,6 +1848,10 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
                intr_handle->intr_vec = NULL;
        }
 
+       /* reset hierarchy commit */
+       tm_conf->committed = false;
+
+       adapter->rss_reta_updated = 0;
        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
 
        hw->adapter_stopped = true;
@@ -1635,6 +1954,25 @@ txgbe_dev_close(struct rte_eth_dev *dev)
        rte_free(dev->data->hash_mac_addrs);
        dev->data->hash_mac_addrs = NULL;
 
+       /* remove all the fdir filters & hash */
+       txgbe_fdir_filter_uninit(dev);
+
+       /* remove all the L2 tunnel filters & hash */
+       txgbe_l2_tn_filter_uninit(dev);
+
+       /* Remove all ntuple filters of the device */
+       txgbe_ntuple_filter_uninit(dev);
+
+       /* clear all the filters list */
+       txgbe_filterlist_flush();
+
+       /* Remove all Traffic Manager configuration */
+       txgbe_tm_conf_uninit(dev);
+
+#ifdef RTE_LIB_SECURITY
+       rte_free(dev->security_ctx);
+#endif
+
        return ret;
 }
 
@@ -2001,9 +2339,8 @@ txgbe_get_offset_by_id(uint32_t id, uint32_t *offset)
                        nb * (TXGBE_NB_QP_STATS * sizeof(uint64_t));
                return 0;
        }
-       id -= TXGBE_NB_QP_STATS * TXGBE_MAX_QP;
 
-       return -(int)(id + 1);
+       return -1;
 }
 
 static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
@@ -2160,6 +2497,27 @@ txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       u16 eeprom_verh, eeprom_verl;
+       u32 etrack_id;
+       int ret;
+
+       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
+       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
+
+       etrack_id = (eeprom_verh << 16) | eeprom_verl;
+       ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+       ret += 1; /* add the size of '\0' */
+       if (fw_size < (u32)ret)
+               return ret;
+       else
+               return 0;
+}
+
 static int
 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -2339,6 +2697,65 @@ txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        return txgbe_dev_link_update_share(dev, wait_to_complete);
 }
 
+static int
+txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
+static int
+txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl &= (~TXGBE_PSRCTL_UCP);
+       if (dev->data->all_multicast == 1)
+               fctrl |= TXGBE_PSRCTL_MCP;
+       else
+               fctrl &= (~TXGBE_PSRCTL_MCP);
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
+static int
+txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl |= TXGBE_PSRCTL_MCP;
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
+static int
+txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       if (dev->data->promiscuous == 1)
+               return 0; /* must remain in all_multicast mode */
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl &= (~TXGBE_PSRCTL_MCP);
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
 /**
  * It clears the interrupt causes and enables the interrupt.
  * It will be called once only during nic initialized.
@@ -2633,96 +3050,396 @@ txgbe_dev_interrupt_handler(void *param)
 }
 
 static int
-txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
-                               uint32_t index, uint32_t pool)
+txgbe_dev_led_on(struct rte_eth_dev *dev)
 {
-       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       uint32_t enable_addr = 1;
+       struct txgbe_hw *hw;
 
-       return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
-                            pool, enable_addr);
+       hw = TXGBE_DEV_HW(dev);
+       return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
 }
 
-static void
-txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+static int
+txgbe_dev_led_off(struct rte_eth_dev *dev)
 {
-       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_hw *hw;
 
-       txgbe_clear_rar(hw, index);
+       hw = TXGBE_DEV_HW(dev);
+       return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
 }
 
 static int
-txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
-       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       struct txgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
 
-       txgbe_remove_rar(dev, 0);
-       txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+       hw = TXGBE_DEV_HW(dev);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * RXFCCFG register.
+        */
+       mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+       if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * TXFCCFG register.
+        */
+       fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+       if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
 
        return 0;
 }
 
-static uint32_t
-txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
+static int
+txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 {
-       uint32_t vector = 0;
+       struct txgbe_hw *hw;
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+               txgbe_fc_none,
+               txgbe_fc_rx_pause,
+               txgbe_fc_tx_pause,
+               txgbe_fc_full
+       };
 
-       switch (hw->mac.mc_filter_type) {
-       case 0:   /* use bits [47:36] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 4) |
-                       (((uint16_t)uc_addr->addr_bytes[5]) << 4));
-               break;
-       case 1:   /* use bits [46:35] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 3) |
-                       (((uint16_t)uc_addr->addr_bytes[5]) << 5));
-               break;
-       case 2:   /* use bits [45:34] of the address */
-               vector = ((uc_addr->addr_bytes[4] >> 2) |
-                       (((uint16_t)uc_addr->addr_bytes[5]) << 6));
-               break;
-       case 3:   /* use bits [43:32] of the address */
-               vector = ((uc_addr->addr_bytes[4]) |
-                       (((uint16_t)uc_addr->addr_bytes[5]) << 8));
-               break;
-       default:  /* Invalid mc_filter_type */
-               break;
+       PMD_INIT_FUNC_TRACE();
+
+       hw = TXGBE_DEV_HW(dev);
+       rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for txgbe
+        */
+       max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+       if (fc_conf->high_water > max_high_water ||
+           fc_conf->high_water < fc_conf->low_water) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+               return -EINVAL;
        }
 
-       /* vector can only be 12-bits or boundary will be exceeded */
-       vector &= 0xFFF;
-       return vector;
+       hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
+       hw->fc.pause_time     = fc_conf->pause_time;
+       hw->fc.high_water[0]  = fc_conf->high_water;
+       hw->fc.low_water[0]   = fc_conf->low_water;
+       hw->fc.send_xon       = fc_conf->send_xon;
+       hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
+
+       err = txgbe_fc_enable(hw);
+
+       /* Not negotiated is not an error case */
+       if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
+               wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
+                     (fc_conf->mac_ctrl_frame_fwd
+                      ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
+               txgbe_flush(hw);
+
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
+       return -EIO;
 }
 
 static int
-txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
-                       struct rte_ether_addr *mac_addr, uint8_t on)
+txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+               struct rte_eth_pfc_conf *pfc_conf)
 {
-       uint32_t vector;
-       uint32_t uta_idx;
-       uint32_t reg_val;
-       uint32_t uta_mask;
-       uint32_t psrctl;
-
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       uint8_t tc_num;
+       uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
 
-       /* The UTA table only exists on pf hardware */
-       if (hw->mac.type < txgbe_mac_raptor)
-               return -ENOTSUP;
+       enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+               txgbe_fc_none,
+               txgbe_fc_rx_pause,
+               txgbe_fc_tx_pause,
+               txgbe_fc_full
+       };
 
-       vector = txgbe_uta_vector(hw, mac_addr);
-       uta_idx = (vector >> 5) & 0x7F;
-       uta_mask = 0x1UL << (vector & 0x1F);
+       PMD_INIT_FUNC_TRACE();
 
-       if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
+       txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+       tc_num = map[pfc_conf->priority];
+       rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for txgbe
+        */
+       max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+       if (pfc_conf->fc.high_water > max_high_water ||
+           pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+               return -EINVAL;
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
+       hw->fc.pause_time = pfc_conf->fc.pause_time;
+       hw->fc.send_xon = pfc_conf->fc.send_xon;
+       hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
+       hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+       err = txgbe_dcb_pfc_enable(hw, tc_num);
+
+       /* Not negotiated is not an error case */
+       if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
                return 0;
 
-       reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
-       if (on) {
-               uta_info->uta_in_use++;
-               reg_val |= uta_mask;
-               uta_info->uta_shadow[uta_idx] |= uta_mask;
-       } else {
+       PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
+       return -EIO;
+}
+
+int
+txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_reta_entry64 *reta_conf,
+                         uint16_t reta_size)
+{
+       uint8_t i, j, mask;
+       uint32_t reta;
+       uint16_t idx, shift;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (!txgbe_rss_update_sp(hw->mac.type)) {
+               PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+                       "NIC.");
+               return -ENOTSUP;
+       }
+
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += 4) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+               if (!mask)
+                       continue;
+
+               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               for (j = 0; j < 4; j++) {
+                       if (RS8(mask, j, 0x1)) {
+                               reta  &= ~(MS32(8 * j, 0xFF));
+                               reta |= LS32(reta_conf[idx].reta[shift + j],
+                                               8 * j, 0xFF);
+                       }
+               }
+               wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+       }
+       adapter->rss_reta_updated = 1;
+
+       return 0;
+}
+
+int
+txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint8_t i, j, mask;
+       uint32_t reta;
+       uint16_t idx, shift;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += 4) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+               if (!mask)
+                       continue;
+
+               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               for (j = 0; j < 4; j++) {
+                       if (RS8(mask, j, 0x1))
+                               reta_conf[idx].reta[shift + j] =
+                                       (uint16_t)RS32(reta, 8 * j, 0xFF);
+               }
+       }
+
+       return 0;
+}
+
+static int
+txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+                               uint32_t index, uint32_t pool)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t enable_addr = 1;
+
+       return txgbe_set_rar(hw, index, mac_addr->addr_bytes,
+                            pool, enable_addr);
+}
+
+static void
+txgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       txgbe_clear_rar(hw, index);
+}
+
+static int
+txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+       txgbe_remove_rar(dev, 0);
+       txgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+       return 0;
+}
+
+static int
+txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+       struct rte_eth_dev_data *dev_data = dev->data;
+       int ret;
+
+       ret = txgbe_dev_info_get(dev, &dev_info);
+       if (ret != 0)
+               return ret;
+
+       /* check that mtu is within the allowed range */
+       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
+               return -EINVAL;
+
+       /* If device is started, refuse mtu that requires the support of
+        * scattered packets when this feature has not been enabled before.
+        */
+       if (dev_data->dev_started && !dev_data->scattered_rx &&
+           (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+               PMD_INIT_LOG(ERR, "Stop port first.");
+               return -EINVAL;
+       }
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       if (hw->mode)
+               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+                       TXGBE_FRAME_SIZE_MAX);
+       else
+               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+                       TXGBE_FRMSZ_MAX(frame_size));
+
+       return 0;
+}
+
+static uint32_t
+txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
+{
+       uint32_t vector = 0;
+
+       switch (hw->mac.mc_filter_type) {
+       case 0:   /* use bits [47:36] of the address */
+               vector = ((uc_addr->addr_bytes[4] >> 4) |
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+               break;
+       case 1:   /* use bits [46:35] of the address */
+               vector = ((uc_addr->addr_bytes[4] >> 3) |
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+               break;
+       case 2:   /* use bits [45:34] of the address */
+               vector = ((uc_addr->addr_bytes[4] >> 2) |
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+               break;
+       case 3:   /* use bits [43:32] of the address */
+               vector = ((uc_addr->addr_bytes[4]) |
+                       (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+               break;
+       default:  /* Invalid mc_filter_type */
+               break;
+       }
+
+       /* vector can only be 12-bits or boundary will be exceeded */
+       vector &= 0xFFF;
+       return vector;
+}
+
+static int
+txgbe_uc_hash_table_set(struct rte_eth_dev *dev,
+                       struct rte_ether_addr *mac_addr, uint8_t on)
+{
+       uint32_t vector;
+       uint32_t uta_idx;
+       uint32_t reg_val;
+       uint32_t uta_mask;
+       uint32_t psrctl;
+
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_uta_info *uta_info = TXGBE_DEV_UTA_INFO(dev);
+
+       /* The UTA table only exists on pf hardware */
+       if (hw->mac.type < txgbe_mac_raptor)
+               return -ENOTSUP;
+
+       vector = txgbe_uta_vector(hw, mac_addr);
+       uta_idx = (vector >> 5) & 0x7F;
+       uta_mask = 0x1UL << (vector & 0x1F);
+
+       if (!!on == !!(uta_info->uta_shadow[uta_idx] & uta_mask))
+               return 0;
+
+       reg_val = rd32(hw, TXGBE_UCADDRTBL(uta_idx));
+       if (on) {
+               uta_info->uta_in_use++;
+               reg_val |= uta_mask;
+               uta_info->uta_shadow[uta_idx] |= uta_mask;
+       } else {
                uta_info->uta_in_use--;
                reg_val &= ~uta_mask;
                uta_info->uta_shadow[uta_idx] &= ~uta_mask;
@@ -2962,30 +3679,1476 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
        return 0;
 }
 
-static u8 *
-txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
-                       u8 **mc_addr_ptr, u32 *vmdq)
+int
+txgbe_syn_filter_set(struct rte_eth_dev *dev,
+                       struct rte_eth_syn_filter *filter,
+                       bool add)
 {
-       u8 *mc_addr;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint32_t syn_info;
+       uint32_t synqf;
 
-       *vmdq = 0;
-       mc_addr = *mc_addr_ptr;
-       *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
-       return mc_addr;
+       if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       syn_info = filter_info->syn_info;
+
+       if (add) {
+               if (syn_info & TXGBE_SYNCLS_ENA)
+                       return -EINVAL;
+               synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+               synqf |= TXGBE_SYNCLS_ENA;
+
+               if (filter->hig_pri)
+                       synqf |= TXGBE_SYNCLS_HIPRIO;
+               else
+                       synqf &= ~TXGBE_SYNCLS_HIPRIO;
+       } else {
+               synqf = rd32(hw, TXGBE_SYNCLS);
+               if (!(syn_info & TXGBE_SYNCLS_ENA))
+                       return -ENOENT;
+               synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
+       }
+
+       filter_info->syn_info = synqf;
+       wr32(hw, TXGBE_SYNCLS, synqf);
+       txgbe_flush(hw);
+       return 0;
 }
 
-int
-txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
-                         struct rte_ether_addr *mc_addr_set,
-                         uint32_t nb_mc_addr)
+static inline enum txgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
 {
-       struct txgbe_hw *hw;
-       u8 *mc_addr_list;
+       if (protocol_value == IPPROTO_TCP)
+               return TXGBE_5TF_PROT_TCP;
+       else if (protocol_value == IPPROTO_UDP)
+               return TXGBE_5TF_PROT_UDP;
+       else if (protocol_value == IPPROTO_SCTP)
+               return TXGBE_5TF_PROT_SCTP;
+       else
+               return TXGBE_5TF_PROT_NONE;
+}
 
-       hw = TXGBE_DEV_HW(dev);
-       mc_addr_list = (u8 *)mc_addr_set;
-       return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
-                                        txgbe_dev_addr_list_itr, TRUE);
+/* inject a 5-tuple filter to HW */
+static inline void
+txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+                          struct txgbe_5tuple_filter *filter)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int i;
+       uint32_t ftqf, sdpqf;
+       uint32_t l34timir = 0;
+       uint32_t mask = TXGBE_5TFCTL0_MASK;
+
+       i = filter->index;
+       sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
+       sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
+
+       ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
+       ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
+       if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+               mask &= ~TXGBE_5TFCTL0_MSADDR;
+       if (filter->filter_info.dst_ip_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MDADDR;
+       if (filter->filter_info.src_port_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MSPORT;
+       if (filter->filter_info.dst_port_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MDPORT;
+       if (filter->filter_info.proto_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MPROTO;
+       ftqf |= mask;
+       ftqf |= TXGBE_5TFCTL0_MPOOL;
+       ftqf |= TXGBE_5TFCTL0_ENA;
+
+       wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
+       wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
+       wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+       wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+
+       l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+       wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: pointer to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+                       struct txgbe_5tuple_filter *filter)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i, idx, shift;
+
+       /*
+        * look for an unused 5tuple filter index,
+        * and insert the filter to list.
+        */
+       for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
+               idx = i / (sizeof(uint32_t) * NBBY);
+               shift = i % (sizeof(uint32_t) * NBBY);
+               if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+                       filter_info->fivetuple_mask[idx] |= 1 << shift;
+                       filter->index = i;
+                       TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+                                         filter,
+                                         entries);
+                       break;
+               }
+       }
+       if (i >= TXGBE_MAX_FTQF_FILTERS) {
+               PMD_DRV_LOG(ERR, "5tuple filters are full.");
+               return -ENOSYS;
+       }
+
+       txgbe_inject_5tuple_filter(dev, filter);
+
+       return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       struct txgbe_5tuple_filter *filter)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint16_t index = filter->index;
+
+       filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+                               ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+       TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+       rte_free(filter);
+
+       wr32(hw, TXGBE_5TFDADDR(index), 0);
+       wr32(hw, TXGBE_5TFSADDR(index), 0);
+       wr32(hw, TXGBE_5TFPORT(index), 0);
+       wr32(hw, TXGBE_5TFCTL0(index), 0);
+       wr32(hw, TXGBE_5TFCTL1(index), 0);
+}
+
+static inline struct txgbe_5tuple_filter *
+txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
+                       struct txgbe_5tuple_filter_info *key)
+{
+       struct txgbe_5tuple_filter *it;
+
+       TAILQ_FOREACH(it, filter_list, entries) {
+               if (memcmp(key, &it->filter_info,
+                       sizeof(struct txgbe_5tuple_filter_info)) == 0) {
+                       return it;
+               }
+       }
+       return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter
+ * to struct txgbe_5tuple_filter_info
+ */
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+                       struct txgbe_5tuple_filter_info *filter_info)
+{
+       if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > TXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < TXGBE_5TUPLE_MIN_PRI)
+               return -EINVAL;
+
+       switch (filter->dst_ip_mask) {
+       case UINT32_MAX:
+               filter_info->dst_ip_mask = 0;
+               filter_info->dst_ip = filter->dst_ip;
+               break;
+       case 0:
+               filter_info->dst_ip_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->src_ip_mask) {
+       case UINT32_MAX:
+               filter_info->src_ip_mask = 0;
+               filter_info->src_ip = filter->src_ip;
+               break;
+       case 0:
+               filter_info->src_ip_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->dst_port_mask) {
+       case UINT16_MAX:
+               filter_info->dst_port_mask = 0;
+               filter_info->dst_port = filter->dst_port;
+               break;
+       case 0:
+               filter_info->dst_port_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->src_port_mask) {
+       case UINT16_MAX:
+               filter_info->src_port_mask = 0;
+               filter_info->src_port = filter->src_port;
+               break;
+       case 0:
+               filter_info->src_port_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid src_port mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->proto_mask) {
+       case UINT8_MAX:
+               filter_info->proto_mask = 0;
+               filter_info->proto =
+                       convert_protocol_type(filter->proto);
+               break;
+       case 0:
+               filter_info->proto_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid protocol mask.");
+               return -EINVAL;
+       }
+
+       filter_info->priority = (uint8_t)filter->priority;
+       return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+int
+txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ntuple_filter *ntuple_filter,
+                       bool add)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter_info filter_5tuple;
+       struct txgbe_5tuple_filter *filter;
+       int ret;
+
+       if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+               PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+               return -EINVAL;
+       }
+
+       memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
+       ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+       if (ret < 0)
+               return ret;
+
+       filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+                                        &filter_5tuple);
+       if (filter != NULL && add) {
+               PMD_DRV_LOG(ERR, "filter exists.");
+               return -EEXIST;
+       }
+       if (filter == NULL && !add) {
+               PMD_DRV_LOG(ERR, "filter doesn't exist.");
+               return -ENOENT;
+       }
+
+       if (add) {
+               filter = rte_zmalloc("txgbe_5tuple_filter",
+                               sizeof(struct txgbe_5tuple_filter), 0);
+               if (filter == NULL)
+                       return -ENOMEM;
+               rte_memcpy(&filter->filter_info,
+                                &filter_5tuple,
+                                sizeof(struct txgbe_5tuple_filter_info));
+               filter->queue = ntuple_filter->queue;
+               ret = txgbe_add_5tuple_filter(dev, filter);
+               if (ret < 0) {
+                       rte_free(filter);
+                       return ret;
+               }
+       } else {
+               txgbe_remove_5tuple_filter(dev, filter);
+       }
+
+       return 0;
+}
+
+int
+txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter,
+                       bool add)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint32_t etqf = 0;
+       uint32_t etqs = 0;
+       int ret;
+       struct txgbe_ethertype_filter ethertype_filter;
+
+       if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+           filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+               PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+                       " ethertype filter.", filter->ether_type);
+               return -EINVAL;
+       }
+
+       if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+               PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+               return -EINVAL;
+       }
+       if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+               PMD_DRV_LOG(ERR, "drop option is unsupported.");
+               return -EINVAL;
+       }
+
+       ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+       if (ret >= 0 && add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+                           filter->ether_type);
+               return -EEXIST;
+       }
+       if (ret < 0 && !add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+                           filter->ether_type);
+               return -ENOENT;
+       }
+
+       if (add) {
+               etqf = TXGBE_ETFLT_ENA;
+               etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+               etqs |= TXGBE_ETCLS_QPID(filter->queue);
+               etqs |= TXGBE_ETCLS_QENA;
+
+               ethertype_filter.ethertype = filter->ether_type;
+               ethertype_filter.etqf = etqf;
+               ethertype_filter.etqs = etqs;
+               ethertype_filter.conf = FALSE;
+               ret = txgbe_ethertype_filter_insert(filter_info,
+                                                   &ethertype_filter);
+               if (ret < 0) {
+                       PMD_DRV_LOG(ERR, "ethertype filters are full.");
+                       return -ENOSPC;
+               }
+       } else {
+               ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+               if (ret < 0)
+                       return -ENOSYS;
+       }
+       wr32(hw, TXGBE_ETFLT(ret), etqf);
+       wr32(hw, TXGBE_ETCLS(ret), etqs);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg)
+{
+       int ret = 0;
+
+       switch (filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+               *(const void **)arg = &txgbe_flow_ops;
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+                                                       filter_type);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static u8 *
+txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
+                       u8 **mc_addr_ptr, u32 *vmdq)
+{
+       u8 *mc_addr;
+
+       *vmdq = 0;
+       mc_addr = *mc_addr_ptr;
+       *mc_addr_ptr = (mc_addr + sizeof(struct rte_ether_addr));
+       return mc_addr;
+}
+
+int
+txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+                         struct rte_ether_addr *mc_addr_set,
+                         uint32_t nb_mc_addr)
+{
+       struct txgbe_hw *hw;
+       u8 *mc_addr_list;
+
+       hw = TXGBE_DEV_HW(dev);
+       mc_addr_list = (u8 *)mc_addr_set;
+       return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+                                        txgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint64_t systime_cycles;
+
+       systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
+       systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
+
+       return systime_cycles;
+}
+
+static uint64_t
+txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint64_t rx_tstamp_cycles;
+
+       /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
+       rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
+       rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
+
+       return rx_tstamp_cycles;
+}
+
+static uint64_t
+txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint64_t tx_tstamp_cycles;
+
+       /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
+       tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
+       tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
+
+       return tx_tstamp_cycles;
+}
+
+static void
+txgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct rte_eth_link link;
+       uint32_t incval = 0;
+       uint32_t shift = 0;
+
+       /* Get current link speed. */
+       txgbe_dev_link_update(dev, 1);
+       rte_eth_linkstatus_get(dev, &link);
+
+       switch (link.link_speed) {
+       case ETH_SPEED_NUM_100M:
+               incval = TXGBE_INCVAL_100;
+               shift = TXGBE_INCVAL_SHIFT_100;
+               break;
+       case ETH_SPEED_NUM_1G:
+               incval = TXGBE_INCVAL_1GB;
+               shift = TXGBE_INCVAL_SHIFT_1GB;
+               break;
+       case ETH_SPEED_NUM_10G:
+       default:
+               incval = TXGBE_INCVAL_10GB;
+               shift = TXGBE_INCVAL_SHIFT_10GB;
+               break;
+       }
+
+       wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
+
+       memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+       adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+       adapter->systime_tc.cc_shift = shift;
+       adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+       adapter->rx_tstamp_tc.cc_shift = shift;
+       adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+       adapter->tx_tstamp_tc.cc_shift = shift;
+       adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       adapter->systime_tc.nsec += delta;
+       adapter->rx_tstamp_tc.nsec += delta;
+       adapter->tx_tstamp_tc.nsec += delta;
+
+       return 0;
+}
+
+static int
+txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+       uint64_t ns;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       ns = rte_timespec_to_ns(ts);
+       /* Set the timecounters to a new value. */
+       adapter->systime_tc.nsec = ns;
+       adapter->rx_tstamp_tc.nsec = ns;
+       adapter->tx_tstamp_tc.nsec = ns;
+
+       return 0;
+}
+
+static int
+txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+       uint64_t ns, systime_cycles;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       systime_cycles = txgbe_read_systime_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+       *ts = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+txgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t tsync_ctl;
+
+       /* Stop the timesync system time. */
+       wr32(hw, TXGBE_TSTIMEINC, 0x0);
+       /* Reset the timesync system time value. */
+       wr32(hw, TXGBE_TSTIMEL, 0x0);
+       wr32(hw, TXGBE_TSTIMEH, 0x0);
+
+       txgbe_start_timecounters(dev);
+
+       /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+       wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
+               RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
+
+       /* Enable timestamping of received PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
+       tsync_ctl |= TXGBE_TSRXCTL_ENA;
+       wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
+
+       /* Enable timestamping of transmitted PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
+       tsync_ctl |= TXGBE_TSTXCTL_ENA;
+       wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
+
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t tsync_ctl;
+
+       /* Disable timestamping of transmitted PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
+       tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
+       wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
+
+       /* Disable timestamping of received PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
+       tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
+       wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
+
+       /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+       wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
+
+       /* Stop incrementating the System Time registers. */
+       wr32(hw, TXGBE_TSTIMEINC, 0);
+
+       return 0;
+}
+
+static int
+txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                                struct timespec *timestamp,
+                                uint32_t flags __rte_unused)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       uint32_t tsync_rxctl;
+       uint64_t rx_tstamp_cycles;
+       uint64_t ns;
+
+       tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
+       if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
+               return -EINVAL;
+
+       rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return  0;
+}
+
+static int
+txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                                struct timespec *timestamp)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       uint32_t tsync_txctl;
+       uint64_t tx_tstamp_cycles;
+       uint64_t ns;
+
+       tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
+       if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
+               return -EINVAL;
+
+       tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+       int count = 0;
+       int g_ind = 0;
+       const struct reg_info *reg_group;
+       const struct reg_info **reg_set = txgbe_regs_others;
+
+       while ((reg_group = reg_set[g_ind++]))
+               count += txgbe_regs_group_count(reg_group);
+
+       return count;
+}
+
+static int
+txgbe_get_regs(struct rte_eth_dev *dev,
+             struct rte_dev_reg_info *regs)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t *data = regs->data;
+       int g_ind = 0;
+       int count = 0;
+       const struct reg_info *reg_group;
+       const struct reg_info **reg_set = txgbe_regs_others;
+
+       if (data == NULL) {
+               regs->length = txgbe_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
+       /* Support only full register dump */
+       if (regs->length == 0 ||
+           regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
+               regs->version = hw->mac.type << 24 |
+                               hw->revision_id << 16 |
+                               hw->device_id;
+               while ((reg_group = reg_set[g_ind++]))
+                       count += txgbe_read_regs_group(dev, &data[count],
+                                                     reg_group);
+               return 0;
+       }
+
+       return -ENOTSUP;
+}
+
+static int
+txgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       /* Return unit is byte count */
+       return hw->rom.word_size * 2;
+}
+
+static int
+txgbe_get_eeprom(struct rte_eth_dev *dev,
+               struct rte_dev_eeprom_info *in_eeprom)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_rom_info *eeprom = &hw->rom;
+       uint16_t *data = in_eeprom->data;
+       int first, length;
+
+       first = in_eeprom->offset >> 1;
+       length = in_eeprom->length >> 1;
+       if (first > hw->rom.word_size ||
+           ((first + length) > hw->rom.word_size))
+               return -EINVAL;
+
+       in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       return eeprom->readw_buffer(hw, first, length, data);
+}
+
+static int
+txgbe_set_eeprom(struct rte_eth_dev *dev,
+               struct rte_dev_eeprom_info *in_eeprom)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_rom_info *eeprom = &hw->rom;
+       uint16_t *data = in_eeprom->data;
+       int first, length;
+
+       first = in_eeprom->offset >> 1;
+       length = in_eeprom->length >> 1;
+       if (first > hw->rom.word_size ||
+           ((first + length) > hw->rom.word_size))
+               return -EINVAL;
+
+       in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       return eeprom->writew_buffer(hw,  first, length, data);
+}
+
+static int
+txgbe_get_module_info(struct rte_eth_dev *dev,
+                     struct rte_eth_dev_module_info *modinfo)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t status;
+       uint8_t sff8472_rev, addr_mode;
+       bool page_swap = false;
+
+       /* Check whether we support SFF-8472 or not */
+       status = hw->phy.read_i2c_eeprom(hw,
+                                            TXGBE_SFF_SFF_8472_COMP,
+                                            &sff8472_rev);
+       if (status != 0)
+               return -EIO;
+
+       /* addressing mode is not supported */
+       status = hw->phy.read_i2c_eeprom(hw,
+                                            TXGBE_SFF_SFF_8472_SWAP,
+                                            &addr_mode);
+       if (status != 0)
+               return -EIO;
+
+       if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
+               PMD_DRV_LOG(ERR,
+                           "Address change required to access page 0xA2, "
+                           "but not supported. Please report the module "
+                           "type to the driver maintainers.");
+               page_swap = true;
+       }
+
+       if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+               /* We have a SFP, but it does not support SFF-8472 */
+               modinfo->type = RTE_ETH_MODULE_SFF_8079;
+               modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+       } else {
+               /* We have a SFP which supports a revision of SFF-8472. */
+               modinfo->type = RTE_ETH_MODULE_SFF_8472;
+               modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+       }
+
+       return 0;
+}
+
+static int
+txgbe_get_module_eeprom(struct rte_eth_dev *dev,
+                       struct rte_dev_eeprom_info *info)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
+       uint8_t databyte = 0xFF;
+       uint8_t *data = info->data;
+       uint32_t i = 0;
+
+       if (info->length == 0)
+               return -EINVAL;
+
+       for (i = info->offset; i < info->offset + info->length; i++) {
+               if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+                       status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
+               else
+                       status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
+
+               if (status != 0)
+                       return -EIO;
+
+               data[i - info->offset] = databyte;
+       }
+
+       return 0;
+}
+
+bool
+txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
+{
+       switch (mac_type) {
+       case txgbe_mac_raptor:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int
+txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+                       struct rte_eth_dcb_info *dcb_info)
+{
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+       struct txgbe_dcb_tc_config *tc;
+       struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+       uint8_t nb_tcs;
+       uint8_t i, j;
+
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+               dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+       else
+               dcb_info->nb_tcs = 1;
+
+       tc_queue = &dcb_info->tc_queue;
+       nb_tcs = dcb_info->nb_tcs;
+
+       if (dcb_config->vt_mode) { /* vt is enabled */
+               struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+               if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+                       for (j = 0; j < nb_tcs; j++) {
+                               tc_queue->tc_rxq[0][j].base = j;
+                               tc_queue->tc_rxq[0][j].nb_queue = 1;
+                               tc_queue->tc_txq[0][j].base = j;
+                               tc_queue->tc_txq[0][j].nb_queue = 1;
+                       }
+               } else {
+                       for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                               for (j = 0; j < nb_tcs; j++) {
+                                       tc_queue->tc_rxq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_rxq[i][j].nb_queue = 1;
+                                       tc_queue->tc_txq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_txq[i][j].nb_queue = 1;
+                               }
+                       }
+               }
+       } else { /* vt is disabled */
+               struct rte_eth_dcb_rx_conf *rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+               if (dcb_info->nb_tcs == ETH_4_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 80;
+                       dcb_info->tc_queue.tc_txq[0][4].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][5].base = 104;
+                       dcb_info->tc_queue.tc_txq[0][6].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][7].base = 120;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+               }
+       }
+       for (i = 0; i < dcb_info->nb_tcs; i++) {
+               tc = &dcb_config->tc_config[i];
+               dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
+       }
+       return 0;
+}
+
+/* Update e-tag ether type */
+static int
+txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
+                           uint16_t ether_type)
+{
+       uint32_t etag_etype;
+
+       etag_etype = rd32(hw, TXGBE_EXTAG);
+       etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
+       etag_etype |= ether_type;
+       wr32(hw, TXGBE_EXTAG, etag_etype);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+/* Enable e-tag tunnel */
+static int
+txgbe_e_tag_enable(struct txgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       etag_etype = rd32(hw, TXGBE_PORTCTL);
+       etag_etype |= TXGBE_PORTCTL_ETAG;
+       wr32(hw, TXGBE_PORTCTL, etag_etype);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+                      struct txgbe_l2_tunnel_conf  *l2_tunnel)
+{
+       int ret = 0;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       rar_entries = hw->mac.num_rar_entries;
+
+       for (i = 1; i < rar_entries; i++) {
+               wr32(hw, TXGBE_ETHADDRIDX, i);
+               rar_high = rd32(hw, TXGBE_ETHADDRH);
+               rar_low  = rd32(hw, TXGBE_ETHADDRL);
+               if ((rar_high & TXGBE_ETHADDRH_VLD) &&
+                   (rar_high & TXGBE_ETHADDRH_ETAG) &&
+                   (TXGBE_ETHADDRL_ETAG(rar_low) ==
+                    l2_tunnel->tunnel_id)) {
+                       wr32(hw, TXGBE_ETHADDRL, 0);
+                       wr32(hw, TXGBE_ETHADDRH, 0);
+
+                       txgbe_clear_vmdq(hw, i, BIT_MASK32);
+
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int
+txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+                      struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       /* One entry for one tunnel. Try to remove potential existing entry. */
+       txgbe_e_tag_filter_del(dev, l2_tunnel);
+
+       rar_entries = hw->mac.num_rar_entries;
+
+       for (i = 1; i < rar_entries; i++) {
+               wr32(hw, TXGBE_ETHADDRIDX, i);
+               rar_high = rd32(hw, TXGBE_ETHADDRH);
+               if (rar_high & TXGBE_ETHADDRH_VLD) {
+                       continue;
+               } else {
+                       txgbe_set_vmdq(hw, i, l2_tunnel->pool);
+                       rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
+                       rar_low = l2_tunnel->tunnel_id;
+
+                       wr32(hw, TXGBE_ETHADDRL, rar_low);
+                       wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+                       return ret;
+               }
+       }
+
+       PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+                    " Please remove a rule before adding a new one.");
+       return -EINVAL;
+}
+
+static inline struct txgbe_l2_tn_filter *
+txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_key *key)
+{
+       int ret;
+
+       ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+       if (ret < 0)
+               return NULL;
+
+       return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_filter *l2_tn_filter)
+{
+       int ret;
+
+       ret = rte_hash_add_key(l2_tn_info->hash_handle,
+                              &l2_tn_filter->key);
+
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR,
+                           "Failed to insert L2 tunnel filter"
+                           " to hash table %d!",
+                           ret);
+               return ret;
+       }
+
+       l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+       TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+       return 0;
+}
+
+static inline int
+txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_key *key)
+{
+       int ret;
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+
+       ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR,
+                           "No such L2 tunnel filter to delete %d!",
+                           ret);
+               return ret;
+       }
+
+       l2_tn_filter = l2_tn_info->hash_map[ret];
+       l2_tn_info->hash_map[ret] = NULL;
+
+       TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+       rte_free(l2_tn_filter);
+
+       return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+                              struct txgbe_l2_tunnel_conf *l2_tunnel,
+                              bool restore)
+{
+       int ret;
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_key key;
+       struct txgbe_l2_tn_filter *node;
+
+       if (!restore) {
+               key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+               key.tn_id = l2_tunnel->tunnel_id;
+
+               node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+               if (node) {
+                       PMD_DRV_LOG(ERR,
+                                   "The L2 tunnel filter already exists!");
+                       return -EINVAL;
+               }
+
+               node = rte_zmalloc("txgbe_l2_tn",
+                                  sizeof(struct txgbe_l2_tn_filter),
+                                  0);
+               if (!node)
+                       return -ENOMEM;
+
+               rte_memcpy(&node->key,
+                                &key,
+                                sizeof(struct txgbe_l2_tn_key));
+               node->pool = l2_tunnel->pool;
+               ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
+               if (ret < 0) {
+                       rte_free(node);
+                       return ret;
+               }
+       }
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       if (!restore && ret < 0)
+               (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+       return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+                              struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+       int ret;
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_key key;
+
+       key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+       key.tn_id = l2_tunnel->tunnel_id;
+       ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+       if (ret < 0)
+               return ret;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_L2_TUNNEL_TYPE_E_TAG:
+               ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+       int ret = 0;
+       uint32_t ctrl;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       ctrl = rd32(hw, TXGBE_POOLCTL);
+       ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
+       if (en)
+               ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
+       wr32(hw, TXGBE_POOLCTL, ctrl);
+
+       return ret;
+}
+
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int ret = 0;
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+               break;
+       case RTE_TUNNEL_TYPE_GENEVE:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+               break;
+       case RTE_TUNNEL_TYPE_TEREDO:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       txgbe_flush(hw);
+
+       return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int ret = 0;
+       uint16_t cur_port;
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_TUNNEL_TYPE_VXLAN:
+               cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORT, 0);
+               wr32(hw, TXGBE_VXLANPORTGPE, 0);
+               break;
+       case RTE_TUNNEL_TYPE_GENEVE:
+               cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_GENEVEPORT, 0);
+               break;
+       case RTE_TUNNEL_TYPE_TEREDO:
+               cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_TEREDOPORT, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       txgbe_flush(hw);
+
+       return ret;
+}
+
+/* restore n-tuple filter */
+static inline void
+txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter *node;
+
+       TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+               txgbe_inject_5tuple_filter(dev, node);
+       }
+}
+
+/* restore ethernet type filter */
+static inline void
+txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i;
+
+       for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+               if (filter_info->ethertype_mask & (1 << i)) {
+                       wr32(hw, TXGBE_ETFLT(i),
+                                       filter_info->ethertype_filters[i].etqf);
+                       wr32(hw, TXGBE_ETCLS(i),
+                                       filter_info->ethertype_filters[i].etqs);
+                       txgbe_flush(hw);
+               }
+       }
+}
+
+/* restore SYN filter */
+static inline void
+txgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint32_t synqf;
+
+       synqf = filter_info->syn_info;
+
+       if (synqf & TXGBE_SYNCLS_ENA) {
+               wr32(hw, TXGBE_SYNCLS, synqf);
+               txgbe_flush(hw);
+       }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_filter *node;
+       struct txgbe_l2_tunnel_conf l2_tn_conf;
+
+       TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+               l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+               l2_tn_conf.tunnel_id      = node->key.tn_id;
+               l2_tn_conf.pool           = node->pool;
+               (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+       }
+}
+
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->rss_info.conf.queue_num)
+               txgbe_config_rss_filter(dev,
+                       &filter_info->rss_info, TRUE);
+}
+
+static int
+txgbe_filter_restore(struct rte_eth_dev *dev)
+{
+       txgbe_ntuple_filter_restore(dev);
+       txgbe_ethertype_filter_restore(dev);
+       txgbe_syn_filter_restore(dev);
+       txgbe_fdir_filter_restore(dev);
+       txgbe_l2_tn_filter_restore(dev);
+       txgbe_rss_filter_restore(dev);
+
+       return 0;
+}
+
+static void
+txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       if (l2_tn_info->e_tag_en)
+               (void)txgbe_e_tag_enable(hw);
+
+       if (l2_tn_info->e_tag_fwd_en)
+               (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
+
+       (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter *p_5tuple;
+
+       while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+               txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i;
+
+       for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+               if (filter_info->ethertype_mask & (1 << i) &&
+                   !filter_info->ethertype_filters[i].conf) {
+                       (void)txgbe_ethertype_filter_remove(filter_info,
+                                                           (uint8_t)i);
+                       wr32(hw, TXGBE_ETFLT(i), 0);
+                       wr32(hw, TXGBE_ETCLS(i), 0);
+                       txgbe_flush(hw);
+               }
+       }
+}
+
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+               filter_info->syn_info = 0;
+
+               wr32(hw, TXGBE_SYNCLS, 0);
+               txgbe_flush(hw);
+       }
+}
+
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+       struct txgbe_l2_tunnel_conf l2_tn_conf;
+       int ret = 0;
+
+       while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+               l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+               l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
+               l2_tn_conf.pool           = l2_tn_filter->pool;
+               ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
 }
 
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
@@ -2997,6 +5160,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .dev_set_link_down          = txgbe_dev_set_link_down,
        .dev_close                  = txgbe_dev_close,
        .dev_reset                  = txgbe_dev_reset,
+       .promiscuous_enable         = txgbe_dev_promiscuous_enable,
+       .promiscuous_disable        = txgbe_dev_promiscuous_disable,
+       .allmulticast_enable        = txgbe_dev_allmulticast_enable,
+       .allmulticast_disable       = txgbe_dev_allmulticast_disable,
        .link_update                = txgbe_dev_link_update,
        .stats_get                  = txgbe_dev_stats_get,
        .xstats_get                 = txgbe_dev_xstats_get,
@@ -3006,7 +5173,9 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .xstats_get_names           = txgbe_dev_xstats_get_names,
        .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
        .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
+       .fw_version_get             = txgbe_fw_version_get,
        .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
+       .mtu_set                    = txgbe_dev_mtu_set,
        .vlan_filter_set            = txgbe_vlan_filter_set,
        .vlan_tpid_set              = txgbe_vlan_tpid_set,
        .vlan_offload_set           = txgbe_vlan_offload_set,
@@ -3021,15 +5190,43 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .rx_queue_release           = txgbe_dev_rx_queue_release,
        .tx_queue_setup             = txgbe_dev_tx_queue_setup,
        .tx_queue_release           = txgbe_dev_tx_queue_release,
+       .dev_led_on                 = txgbe_dev_led_on,
+       .dev_led_off                = txgbe_dev_led_off,
+       .flow_ctrl_get              = txgbe_flow_ctrl_get,
+       .flow_ctrl_set              = txgbe_flow_ctrl_set,
+       .priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
        .mac_addr_add               = txgbe_add_rar,
        .mac_addr_remove            = txgbe_remove_rar,
        .mac_addr_set               = txgbe_set_default_mac_addr,
        .uc_hash_table_set          = txgbe_uc_hash_table_set,
        .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
        .set_queue_rate_limit       = txgbe_set_queue_rate_limit,
+       .reta_update                = txgbe_dev_rss_reta_update,
+       .reta_query                 = txgbe_dev_rss_reta_query,
+       .rss_hash_update            = txgbe_dev_rss_hash_update,
+       .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
+       .filter_ctrl                = txgbe_dev_filter_ctrl,
        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
        .rxq_info_get               = txgbe_rxq_info_get,
        .txq_info_get               = txgbe_txq_info_get,
+       .timesync_enable            = txgbe_timesync_enable,
+       .timesync_disable           = txgbe_timesync_disable,
+       .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
+       .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
+       .get_reg                    = txgbe_get_regs,
+       .get_eeprom_length          = txgbe_get_eeprom_length,
+       .get_eeprom                 = txgbe_get_eeprom,
+       .set_eeprom                 = txgbe_set_eeprom,
+       .get_module_info            = txgbe_get_module_info,
+       .get_module_eeprom          = txgbe_get_module_eeprom,
+       .get_dcb_info               = txgbe_dev_get_dcb_info,
+       .timesync_adjust_time       = txgbe_timesync_adjust_time,
+       .timesync_read_time         = txgbe_timesync_read_time,
+       .timesync_write_time        = txgbe_timesync_write_time,
+       .udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
+       .udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
+       .tm_ops_get                 = txgbe_tm_ops_get,
+       .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
 };
 
 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);