mbuf: mark old VLAN offload flags as deprecated
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index 5f1ba8f..0b0f9db 100644 (file)
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2020
+ * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <stdio.h>
@@ -7,7 +8,7 @@
 #include <stdint.h>
 #include <string.h>
 #include <rte_common.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_pci.h>
 
 #include <rte_interrupts.h>
 #include <rte_log.h>
@@ -16,6 +17,7 @@
 #include <rte_memory.h>
 #include <rte_eal.h>
 #include <rte_alarm.h>
+#include <rte_kvargs.h>
 
 #include "txgbe_logs.h"
 #include "base/txgbe.h"
@@ -88,6 +90,8 @@ static const struct reg_info *txgbe_regs_others[] = {
                                txgbe_regs_diagnostic,
                                NULL};
 
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
@@ -103,8 +107,10 @@ static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
+static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
-static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+                                     struct rte_intr_handle *handle);
 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                                      struct rte_intr_handle *handle);
 static void txgbe_dev_interrupt_handler(void *param);
@@ -112,6 +118,7 @@ static void txgbe_dev_interrupt_delayed_handler(void *param);
 static void txgbe_configure_msix(struct rte_eth_dev *dev);
 
 static int txgbe_filter_restore(struct rte_eth_dev *dev);
+static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 
 #define TXGBE_SET_HWSTRIP(h, q) do {\
                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
@@ -135,8 +142,8 @@ static int txgbe_filter_restore(struct rte_eth_dev *dev);
  * The set of PCI devices this driver supports
  */
 static const struct rte_pci_id pci_id_txgbe_map[] = {
-       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
-       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
@@ -466,6 +473,71 @@ txgbe_swfw_lock_reset(struct txgbe_hw *hw)
        hw->mac.release_swfw_sync(hw, mask);
 }
 
+static int
+txgbe_handle_devarg(__rte_unused const char *key, const char *value,
+                 void *extra_args)
+{
+       uint16_t *n = extra_args;
+
+       if (value == NULL || extra_args == NULL)
+               return -EINVAL;
+
+       *n = (uint16_t)strtoul(value, NULL, 10);
+       if (*n == USHRT_MAX && errno == ERANGE)
+               return -1;
+
+       return 0;
+}
+
+static void
+txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
+{
+       struct rte_kvargs *kvlist;
+       u16 auto_neg = 1;
+       u16 poll = 0;
+       u16 present = 1;
+       u16 sgmii = 0;
+       u16 ffe_set = 0;
+       u16 ffe_main = 27;
+       u16 ffe_pre = 8;
+       u16 ffe_post = 44;
+
+       if (devargs == NULL)
+               goto null;
+
+       kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
+       if (kvlist == NULL)
+               goto null;
+
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
+                          &txgbe_handle_devarg, &auto_neg);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
+                          &txgbe_handle_devarg, &poll);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
+                          &txgbe_handle_devarg, &present);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
+                          &txgbe_handle_devarg, &sgmii);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
+                          &txgbe_handle_devarg, &ffe_set);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
+                          &txgbe_handle_devarg, &ffe_main);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
+                          &txgbe_handle_devarg, &ffe_pre);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
+                          &txgbe_handle_devarg, &ffe_post);
+       rte_kvargs_free(kvlist);
+
+null:
+       hw->devarg.auto_neg = auto_neg;
+       hw->devarg.poll = poll;
+       hw->devarg.present = present;
+       hw->devarg.sgmii = sgmii;
+       hw->phy.ffe_set = ffe_set;
+       hw->phy.ffe_main = ffe_main;
+       hw->phy.ffe_pre = ffe_pre;
+       hw->phy.ffe_post = ffe_post;
+}
+
 static int
 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 {
@@ -534,6 +606,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        hw->isb_dma = TMZ_PADDR(mz);
        hw->isb_mem = TMZ_VADDR(mz);
 
+       txgbe_parse_devargs(hw, pci_dev->device.devargs);
        /* Initialize the shared code (base driver) */
        err = txgbe_init_shared_code(hw);
        if (err != 0) {
@@ -544,6 +617,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* Unlock any pending hardware semaphore */
        txgbe_swfw_lock_reset(hw);
 
+#ifdef RTE_LIB_SECURITY
+       /* Initialize security_ctx only for primary process*/
+       if (txgbe_ipsec_ctx_create(eth_dev))
+               return -ENOMEM;
+#endif
+
        /* Initialize DCB configuration*/
        memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
        txgbe_dcb_init(hw, dcb_config);
@@ -689,12 +768,21 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* initialize 5tuple filter list */
        TAILQ_INIT(&filter_info->fivetuple_list);
 
+       /* initialize flow director filter list & hash */
+       txgbe_fdir_filter_init(eth_dev);
+
        /* initialize l2 tunnel filter list & hash */
        txgbe_l2_tn_filter_init(eth_dev);
 
+       /* initialize flow filter lists */
+       txgbe_filterlist_init();
+
        /* initialize bandwidth configuration info */
        memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
+       /* initialize Traffic Manager configuration */
+       txgbe_tm_conf_init(eth_dev);
+
        return 0;
 }
 
@@ -728,6 +816,26 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+       struct txgbe_fdir_filter *fdir_filter;
+
+       if (fdir_info->hash_map)
+               rte_free(fdir_info->hash_map);
+       if (fdir_info->hash_handle)
+               rte_hash_free(fdir_info->hash_handle);
+
+       while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+               TAILQ_REMOVE(&fdir_info->fdir_list,
+                            fdir_filter,
+                            entries);
+               rte_free(fdir_filter);
+       }
+
+       return 0;
+}
+
 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
 {
        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -748,6 +856,41 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+       char fdir_hash_name[RTE_HASH_NAMESIZE];
+       struct rte_hash_parameters fdir_hash_params = {
+               .name = fdir_hash_name,
+               .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+               .key_len = sizeof(struct txgbe_atr_input),
+               .hash_func = rte_hash_crc,
+               .hash_func_init_val = 0,
+               .socket_id = rte_socket_id(),
+       };
+
+       TAILQ_INIT(&fdir_info->fdir_list);
+       snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+                "fdir_%s", TDEV_NAME(eth_dev));
+       fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+       if (!fdir_info->hash_handle) {
+               PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+               return -EINVAL;
+       }
+       fdir_info->hash_map = rte_zmalloc("txgbe",
+                                         sizeof(struct txgbe_fdir_filter *) *
+                                         TXGBE_MAX_FDIR_FILTER_NUM,
+                                         0);
+       if (!fdir_info->hash_map) {
+               PMD_INIT_LOG(ERR,
+                            "Failed to allocate memory for fdir hash map!");
+               return -ENOMEM;
+       }
+       fdir_info->mask_added = FALSE;
+
+       return 0;
+}
+
 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 {
        struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -789,32 +932,10 @@ static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                struct rte_pci_device *pci_dev)
 {
-       struct rte_eth_dev *pf_ethdev;
-       struct rte_eth_devargs eth_da;
-       int retval;
-
-       if (pci_dev->device.devargs) {
-               retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
-                               &eth_da);
-               if (retval)
-                       return retval;
-       } else {
-               memset(&eth_da, 0, sizeof(eth_da));
-       }
-
-       retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+       return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
                        sizeof(struct txgbe_adapter),
                        eth_dev_pci_specific_init, pci_dev,
                        eth_txgbe_dev_init, NULL);
-
-       if (retval || eth_da.nb_representor_ports < 1)
-               return retval;
-
-       pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
-       if (pf_ethdev == NULL)
-               return -ENODEV;
-
-       return 0;
 }
 
 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
@@ -823,7 +944,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
 
        ethdev = rte_eth_dev_allocated(pci_dev->device.name);
        if (!ethdev)
-               return -ENODEV;
+               return 0;
 
        return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
 }
@@ -877,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
        rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
        rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
        rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-       if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
                restart = (rxcfg & TXGBE_RXCFG_ENA) &&
                        !(rxcfg & TXGBE_RXCFG_VLAN);
                rxcfg |= TXGBE_RXCFG_VLAN;
@@ -912,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
        vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
        qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
        switch (vlan_type) {
-       case ETH_VLAN_TYPE_INNER:
+       case RTE_ETH_VLAN_TYPE_INNER:
                if (vlan_ext) {
                        wr32m(hw, TXGBE_VLANCTL,
                                TXGBE_VLANCTL_TPID_MASK,
@@ -932,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
                                TXGBE_TAGTPID_LSB(tpid));
                }
                break;
-       case ETH_VLAN_TYPE_OUTER:
+       case RTE_ETH_VLAN_TYPE_OUTER:
                if (vlan_ext) {
                        /* Only the high 16-bits is valid */
                        wr32m(hw, TXGBE_EXTAG,
@@ -1017,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
        if (on) {
                rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        } else {
                rxq->vlan_flags = PKT_RX_VLAN;
-               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        }
 }
 
@@ -1066,7 +1187,6 @@ txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 
        ctrl = rd32(hw, TXGBE_PORTCTL);
        ctrl &= ~TXGBE_PORTCTL_VLANEXT;
-       ctrl &= ~TXGBE_PORTCTL_QINQ;
        wr32(hw, TXGBE_PORTCTL, ctrl);
 }
 
@@ -1074,17 +1194,38 @@ static void
 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-       struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
        uint32_t ctrl;
 
        PMD_INIT_FUNC_TRACE();
 
        ctrl  = rd32(hw, TXGBE_PORTCTL);
        ctrl |= TXGBE_PORTCTL_VLANEXT;
-       if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
-           txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
-               ctrl |= TXGBE_PORTCTL_QINQ;
+       wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, TXGBE_PORTCTL);
+       ctrl &= ~TXGBE_PORTCTL_QINQ;
+       wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, TXGBE_PORTCTL);
+       ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
        wr32(hw, TXGBE_PORTCTL, ctrl);
 }
 
@@ -1099,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
 
-               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        txgbe_vlan_strip_queue_set(dev, i, 1);
                else
                        txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1113,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        struct txgbe_rx_queue *rxq;
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
                rxmode = &dev->data->dev_conf.rxmode;
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
-                               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+                               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
                        }
                else
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
-                               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
                        }
        }
 }
@@ -1134,23 +1275,30 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        rxmode = &dev->data->dev_conf.rxmode;
 
-       if (mask & ETH_VLAN_STRIP_MASK)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK)
                txgbe_vlan_hw_strip_config(dev);
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        txgbe_vlan_hw_filter_enable(dev);
                else
                        txgbe_vlan_hw_filter_disable(dev);
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
                        txgbe_vlan_hw_extend_enable(dev);
                else
                        txgbe_vlan_hw_extend_disable(dev);
        }
 
+       if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+                       txgbe_qinq_hw_strip_enable(dev);
+               else
+                       txgbe_qinq_hw_strip_disable(dev);
+       }
+
        return 0;
 }
 
@@ -1183,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
        switch (nb_rx_q) {
        case 1:
        case 2:
-               RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+               RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
                break;
        case 4:
-               RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+               RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
                break;
        default:
                return -EINVAL;
@@ -1209,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
        if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
                /* check multi-queue mode */
                switch (dev_conf->rxmode.mq_mode) {
-               case ETH_MQ_RX_VMDQ_DCB:
-                       PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+               case RTE_ETH_MQ_RX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
                        break;
-               case ETH_MQ_RX_VMDQ_DCB_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
                        PMD_INIT_LOG(ERR, "SRIOV active,"
                                        " unsupported mq_mode rx %d.",
                                        dev_conf->rxmode.mq_mode);
                        return -EINVAL;
-               case ETH_MQ_RX_RSS:
-               case ETH_MQ_RX_VMDQ_RSS:
-                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+               case RTE_ETH_MQ_RX_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_RSS:
+                       dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
                        if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
                                if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
                                        PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1230,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                        return -EINVAL;
                                }
                        break;
-               case ETH_MQ_RX_VMDQ_ONLY:
-               case ETH_MQ_RX_NONE:
+               case RTE_ETH_MQ_RX_VMDQ_ONLY:
+               case RTE_ETH_MQ_RX_NONE:
                        /* if nothing mq mode configure, use default scheme */
                        dev->data->dev_conf.rxmode.mq_mode =
-                               ETH_MQ_RX_VMDQ_ONLY;
+                               RTE_ETH_MQ_RX_VMDQ_ONLY;
                        break;
-               default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+               default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
                        /* SRIOV only works in VMDq enable mode */
                        PMD_INIT_LOG(ERR, "SRIOV is active,"
                                        " wrong mq_mode rx %d.",
@@ -1245,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                }
 
                switch (dev_conf->txmode.mq_mode) {
-               case ETH_MQ_TX_VMDQ_DCB:
-                       PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-                       dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+               case RTE_ETH_MQ_TX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+                       dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
                        break;
-               default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+               default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
                        dev->data->dev_conf.txmode.mq_mode =
-                               ETH_MQ_TX_VMDQ_ONLY;
+                               RTE_ETH_MQ_TX_VMDQ_ONLY;
                        break;
                }
 
@@ -1266,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                        return -EINVAL;
                }
        } else {
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
                        PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
                                          " not supported.");
                        return -EINVAL;
                }
                /* check configuration for vmdb+dcb mode */
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_conf *conf;
 
                        if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1281,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                        conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                       if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+                              conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
                                                " nb_queue_pools must be %d or %d.",
-                                               ETH_16_POOLS, ETH_32_POOLS);
+                                               RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
                                return -EINVAL;
                        }
                }
-               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
                        if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1298,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                        conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                       if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+                              conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
                                                " nb_queue_pools != %d and"
                                                " nb_queue_pools != %d.",
-                                               ETH_16_POOLS, ETH_32_POOLS);
+                                               RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
                                return -EINVAL;
                        }
                }
 
                /* For DCB mode check our configuration before we go further */
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
                        const struct rte_eth_dcb_rx_conf *conf;
 
                        conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-                       if (!(conf->nb_tcs == ETH_4_TCS ||
-                              conf->nb_tcs == ETH_8_TCS)) {
+                       if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+                              conf->nb_tcs == RTE_ETH_8_TCS)) {
                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
                                                " and nb_tcs != %d.",
-                                               ETH_4_TCS, ETH_8_TCS);
+                                               RTE_ETH_4_TCS, RTE_ETH_8_TCS);
                                return -EINVAL;
                        }
                }
 
-               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
                        const struct rte_eth_dcb_tx_conf *conf;
 
                        conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-                       if (!(conf->nb_tcs == ETH_4_TCS ||
-                              conf->nb_tcs == ETH_8_TCS)) {
+                       if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+                              conf->nb_tcs == RTE_ETH_8_TCS)) {
                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
                                                " and nb_tcs != %d.",
-                                               ETH_4_TCS, ETH_8_TCS);
+                                               RTE_ETH_4_TCS, RTE_ETH_8_TCS);
                                return -EINVAL;
                        }
                }
@@ -1347,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* multiple queue mode checking */
        ret  = txgbe_check_mq_mode(dev);
@@ -1381,6 +1529,7 @@ txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
        gpie |= TXGBE_GPIOBIT_6;
        wr32(hw, TXGBE_GPIOINTEN, gpie);
        intr->mask_misc |= TXGBE_ICRMISC_GPIO;
+       intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
 }
 
 int
@@ -1481,20 +1630,10 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        int status;
        uint16_t vf, idx;
        uint32_t *link_speeds;
+       struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
        PMD_INIT_FUNC_TRACE();
 
-       /* TXGBE devices don't support:
-        *    - half duplex (checked afterwards for valid speeds)
-        *    - fixed speed: TODO implement
-        */
-       if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
-               PMD_INIT_LOG(ERR,
-               "Invalid link_speeds for port %u, fix speed not supported",
-                               dev->data->port_id);
-               return -EINVAL;
-       }
-
        /* Stop the link setup handler before resetting the HW. */
        rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
 
@@ -1515,6 +1654,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                return -1;
        hw->mac.start_hw(hw);
        hw->mac.get_link_status = true;
+       hw->dev_start = true;
 
        /* configure PF module if SRIOV enabled */
        txgbe_pf_host_configure(dev);
@@ -1554,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
        err = txgbe_vlan_offload_config(dev, mask);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
                goto error;
        }
 
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+       if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
                txgbe_vmdq_vlan_hw_filter_enable(dev);
        }
@@ -1572,6 +1712,12 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        txgbe_configure_port(dev);
        txgbe_configure_dcb(dev);
 
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+               err = txgbe_fdir_configure(dev);
+               if (err)
+                       goto error;
+       }
+
        /* Restore vf rate limit */
        if (vfinfo != NULL) {
                for (vf = 0; vf < pci_dev->max_vfs; vf++)
@@ -1607,7 +1753,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                hw->mac.enable_tx_laser(hw);
        }
 
-       err = hw->mac.check_link(hw, &speed, &link_up, 0);
+       if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
+               err = hw->mac.check_link(hw, &speed, &link_up, 0);
        if (err)
                goto error;
        dev->data->dev_link.link_status = link_up;
@@ -1616,30 +1763,30 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        if (err)
                goto error;
 
-       allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-                       ETH_LINK_SPEED_10G;
+       allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+                       RTE_ETH_LINK_SPEED_10G;
 
        link_speeds = &dev->data->dev_conf.link_speeds;
-       if (*link_speeds & ~allowed_speeds) {
+       if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
                PMD_INIT_LOG(ERR, "Invalid link setting");
                goto error;
        }
 
        speed = 0x0;
-       if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+       if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
                speed = (TXGBE_LINK_SPEED_100M_FULL |
                         TXGBE_LINK_SPEED_1GB_FULL |
                         TXGBE_LINK_SPEED_10GB_FULL);
        } else {
-               if (*link_speeds & ETH_LINK_SPEED_10G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
                        speed |= TXGBE_LINK_SPEED_10GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_5G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
                        speed |= TXGBE_LINK_SPEED_5GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_2_5G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
                        speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_1G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
                        speed |= TXGBE_LINK_SPEED_1GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_100M)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
                        speed |= TXGBE_LINK_SPEED_100M_FULL;
        }
 
@@ -1650,6 +1797,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 skip_link_setup:
 
        if (rte_intr_allow_others(intr_handle)) {
+               txgbe_dev_misc_interrupt_setup(dev);
                /* check if lsc interrupt is enabled */
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        txgbe_dev_lsc_interrupt_setup(dev, TRUE);
@@ -1675,8 +1823,14 @@ skip_link_setup:
 
        /* resume enabled intr since hw reset */
        txgbe_enable_intr(dev);
+       txgbe_l2_tunnel_conf(dev);
        txgbe_filter_restore(dev);
 
+       if (tm_conf->root && !tm_conf->committed)
+               PMD_DRV_LOG(WARNING,
+                           "please call hierarchy_commit() "
+                           "before starting the port");
+
        /*
         * Update link status right before return, because it may
         * start link configuration process in a separate thread.
@@ -1709,6 +1863,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        int vf;
+       struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
        if (hw->adapter_stopped)
                return 0;
@@ -1761,11 +1916,15 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
                intr_handle->intr_vec = NULL;
        }
 
+       /* reset hierarchy commit */
+       tm_conf->committed = false;
+
        adapter->rss_reta_updated = 0;
        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
 
        hw->adapter_stopped = true;
        dev->data->dev_started = 0;
+       hw->dev_start = false;
 
        return 0;
 }
@@ -1864,12 +2023,25 @@ txgbe_dev_close(struct rte_eth_dev *dev)
        rte_free(dev->data->hash_mac_addrs);
        dev->data->hash_mac_addrs = NULL;
 
+       /* remove all the fdir filters & hash */
+       txgbe_fdir_filter_uninit(dev);
+
        /* remove all the L2 tunnel filters & hash */
        txgbe_l2_tn_filter_uninit(dev);
 
        /* Remove all ntuple filters of the device */
        txgbe_ntuple_filter_uninit(dev);
 
+       /* clear all the filters list */
+       txgbe_filterlist_flush();
+
+       /* Remove all Traffic Manager configuration */
+       txgbe_tm_conf_uninit(dev);
+
+#ifdef RTE_LIB_SECURITY
+       rte_free(dev->security_ctx);
+#endif
+
        return ret;
 }
 
@@ -1977,6 +2149,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw,
 
        hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
        hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
+       hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
        hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
 
        /* MAC Stats */
@@ -2125,7 +2298,8 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->imissed  = hw_stats->rx_total_missed_packets;
+       stats->imissed  = hw_stats->rx_total_missed_packets +
+                         hw_stats->rx_dma_drop;
        stats->ierrors  = hw_stats->rx_crc_errors +
                          hw_stats->rx_mac_short_packet_dropped +
                          hw_stats->rx_length_errors +
@@ -2267,8 +2441,8 @@ static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
 }
 
 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
-       struct rte_eth_xstat_name *xstats_names,
        const uint64_t *ids,
+       struct rte_eth_xstat_name *xstats_names,
        unsigned int limit)
 {
        unsigned int i;
@@ -2398,18 +2572,17 @@ static int
 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       u16 eeprom_verh, eeprom_verl;
        u32 etrack_id;
        int ret;
 
-       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
-       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
+       hw->phy.get_fw_version(hw, &etrack_id);
 
-       etrack_id = (eeprom_verh << 16) | eeprom_verl;
        ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+       if (ret < 0)
+               return -EINVAL;
 
        ret += 1; /* add the size of '\0' */
-       if (fw_size < (u32)ret)
+       if (fw_size < (size_t)ret)
                return ret;
        else
                return 0;
@@ -2428,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
        dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
        dev_info->max_vfs = pci_dev->max_vfs;
-       dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
        dev_info->vmdq_queue_num = dev_info->max_rx_queues;
        dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2461,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->tx_desc_lim = tx_desc_lim;
 
        dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+       dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
        dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-       dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-       dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+       dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
        /* Driver-preferred Rx/Tx parameters */
        dev_info->default_rxportconf.burst_size = 32;
@@ -2522,10 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
        int wait = 1;
 
        memset(&link, 0, sizeof(link));
-       link.link_status = ETH_LINK_DOWN;
-       link.link_speed = ETH_SPEED_NUM_NONE;
-       link.link_duplex = ETH_LINK_HALF_DUPLEX;
-       link.link_autoneg = ETH_LINK_AUTONEG;
+       link.link_status = RTE_ETH_LINK_DOWN;
+       link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+       link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+                       RTE_ETH_LINK_AUTONEG);
 
        hw->mac.get_link_status = true;
 
@@ -2539,49 +2713,54 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
        err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
        if (err != 0) {
-               link.link_speed = ETH_SPEED_NUM_100M;
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                return rte_eth_linkstatus_set(dev, &link);
        }
 
        if (link_up == 0) {
-               if (hw->phy.media_type == txgbe_media_type_fiber) {
+               if ((hw->subsystem_device_id & 0xFF) ==
+                               TXGBE_DEV_ID_KR_KX_KX4) {
+                       hw->mac.bp_down_event(hw);
+               } else if (hw->phy.media_type == txgbe_media_type_fiber) {
                        intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
                        rte_eal_alarm_set(10,
                                txgbe_dev_setup_link_alarm_handler, dev);
                }
                return rte_eth_linkstatus_set(dev, &link);
+       } else if (!hw->dev_start) {
+               return rte_eth_linkstatus_set(dev, &link);
        }
 
        intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-       link.link_status = ETH_LINK_UP;
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_status = RTE_ETH_LINK_UP;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        switch (link_speed) {
        default:
        case TXGBE_LINK_SPEED_UNKNOWN:
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
 
        case TXGBE_LINK_SPEED_100M_FULL:
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
 
        case TXGBE_LINK_SPEED_1GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_1G;
+               link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
 
        case TXGBE_LINK_SPEED_2_5GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_2_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
                break;
 
        case TXGBE_LINK_SPEED_5GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_5G;
                break;
 
        case TXGBE_LINK_SPEED_10GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_10G;
+               link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        }
 
@@ -2680,6 +2859,20 @@ txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
        return 0;
 }
 
+static int
+txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+       u64 mask;
+
+       mask = TXGBE_ICR_MASK;
+       mask &= (1ULL << TXGBE_MISC_VEC_ID);
+       intr->mask |= mask;
+       intr->mask_misc |= TXGBE_ICRMISC_GPIO;
+       intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
+       return 0;
+}
+
 /**
  * It clears the interrupt causes and enables the interrupt.
  * It will be called once only during nic initialized.
@@ -2695,9 +2888,11 @@ static int
 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 {
        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+       u64 mask;
 
-       intr->mask[0] |= TXGBE_ICR_MASK;
-       intr->mask[1] |= TXGBE_ICR_MASK;
+       mask = TXGBE_ICR_MASK;
+       mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
+       intr->mask |= mask;
 
        return 0;
 }
@@ -2734,12 +2929,17 @@ txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
  *  - On failure, a negative value.
  */
 static int
-txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+                               struct rte_intr_handle *intr_handle)
 {
        uint32_t eicr;
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
 
+       if (intr_handle->type != RTE_INTR_HANDLE_UIO &&
+                       intr_handle->type != RTE_INTR_HANDLE_VFIO_MSIX)
+               wr32(hw, TXGBE_PX_INTA, 1);
+
        /* clear all cause mask */
        txgbe_disable_intr(hw);
 
@@ -2753,6 +2953,9 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        if (eicr & TXGBE_ICRMISC_LSC)
                intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
 
+       if (eicr & TXGBE_ICRMISC_ANDONE)
+               intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
+
        if (eicr & TXGBE_ICRMISC_VFMBX)
                intr->flags |= TXGBE_FLAG_MAILBOX;
 
@@ -2787,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
                                        (int)(dev->data->port_id),
                                        (unsigned int)link.link_speed,
-                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+                       link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
                                        "full-duplex" : "half-duplex");
        } else {
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -2830,6 +3033,13 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
        }
 
+       if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
+               if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
+                       hw->mac.kr_handle(hw);
+                       intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
+               }
+       }
+
        if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
                struct rte_eth_link link;
 
@@ -2843,6 +3053,11 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                        /* handle it 1 sec later, wait it being stable */
                        timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
                /* likely to down */
+               else if ((hw->subsystem_device_id & 0xFF) ==
+                               TXGBE_DEV_ID_KR_KX_KX4 &&
+                               hw->devarg.auto_neg == 1)
+                       /* handle it 2 sec later for backplane AN73 */
+                       timeout = 2000;
                else
                        /* handle it 4 sec later, wait it being stable */
                        timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
@@ -2853,10 +3068,12 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                                      (void *)dev) < 0) {
                        PMD_DRV_LOG(ERR, "Error setting alarm");
                } else {
-                       /* remember original mask */
-                       intr->mask_misc_orig = intr->mask_misc;
                        /* only disable lsc interrupt */
                        intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
+
+                       intr->mask_orig = intr->mask;
+                       /* only disable all misc interrupts */
+                       intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
                }
        }
 
@@ -2917,8 +3134,10 @@ txgbe_dev_interrupt_delayed_handler(void *param)
        }
 
        /* restore original mask */
-       intr->mask_misc = intr->mask_misc_orig;
-       intr->mask_misc_orig = 0;
+       intr->mask_misc |= TXGBE_ICRMISC_LSC;
+
+       intr->mask = intr->mask_orig;
+       intr->mask_orig = 0;
 
        PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        txgbe_enable_intr(dev);
@@ -2942,7 +3161,7 @@ txgbe_dev_interrupt_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
-       txgbe_dev_interrupt_get_status(dev);
+       txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
        txgbe_dev_interrupt_action(dev, dev->intr_handle);
 }
 
@@ -3002,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                tx_pause = 0;
 
        if (rx_pause && tx_pause)
-               fc_conf->mode = RTE_FC_FULL;
+               fc_conf->mode = RTE_ETH_FC_FULL;
        else if (rx_pause)
-               fc_conf->mode = RTE_FC_RX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
        else if (tx_pause)
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
        else
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
 
        return 0;
 }
@@ -3140,21 +3359,21 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                return -ENOTSUP;
        }
 
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i += 4) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
                if (!mask)
                        continue;
 
-               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
                for (j = 0; j < 4; j++) {
                        if (RS8(mask, j, 0x1)) {
                                reta  &= ~(MS32(8 * j, 0xFF));
@@ -3162,7 +3381,7 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                                                8 * j, 0xFF);
                        }
                }
-               wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+               wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
        }
        adapter->rss_reta_updated = 1;
 
@@ -3181,21 +3400,21 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i += 4) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
                if (!mask)
                        continue;
 
-               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               reta = rd32at(hw, TXGBE_REG_RSSTBL, i >> 2);
                for (j = 0; j < 4; j++) {
                        if (RS8(mask, j, 0x1))
                                reta_conf[idx].reta[shift + j] =
@@ -3240,18 +3459,8 @@ static int
 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct rte_eth_dev_info dev_info;
        uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
        struct rte_eth_dev_data *dev_data = dev->data;
-       int ret;
-
-       ret = txgbe_dev_info_get(dev, &dev_info);
-       if (ret != 0)
-               return ret;
-
-       /* check that mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
-               return -EINVAL;
 
        /* If device is started, refuse mtu that requires the support of
         * scattered packets when this feature has not been enabled before.
@@ -3263,9 +3472,6 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        if (hw->mode)
                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
                        TXGBE_FRAME_SIZE_MAX);
@@ -3370,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
                return -ENOTSUP;
 
        if (on) {
-               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+               for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
                        wr32(hw, TXGBE_UCADDRTBL(i), ~0);
                }
        } else {
-               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+               for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = 0;
                        wr32(hw, TXGBE_UCADDRTBL(i), 0);
                }
@@ -3399,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
        uint32_t new_val = orig_val;
 
-       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
                new_val |= TXGBE_POOLETHCTL_UTA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
                new_val |= TXGBE_POOLETHCTL_MCHA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
                new_val |= TXGBE_POOLETHCTL_UCHA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
                new_val |= TXGBE_POOLETHCTL_BCA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
                new_val |= TXGBE_POOLETHCTL_MCP;
 
        return new_val;
@@ -3973,27 +4179,11 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
 }
 
 static int
-txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
-                    enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op,
-                    void *arg)
+txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+                      const struct rte_flow_ops **ops)
 {
-       int ret = 0;
-
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               *(const void **)arg = &txgbe_flow_ops;
-               break;
-       default:
-               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
-                                                       filter_type);
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
+       *ops = &txgbe_flow_ops;
+       return 0;
 }
 
 static u8 *
@@ -4018,7 +4208,7 @@ txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
 
        hw = TXGBE_DEV_HW(dev);
        mc_addr_list = (u8 *)mc_addr_set;
-       return txgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+       return hw->mac.update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
                                         txgbe_dev_addr_list_itr, TRUE);
 }
 
@@ -4074,15 +4264,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
        rte_eth_linkstatus_get(dev, &link);
 
        switch (link.link_speed) {
-       case ETH_SPEED_NUM_100M:
+       case RTE_ETH_SPEED_NUM_100M:
                incval = TXGBE_INCVAL_100;
                shift = TXGBE_INCVAL_SHIFT_100;
                break;
-       case ETH_SPEED_NUM_1G:
+       case RTE_ETH_SPEED_NUM_1G:
                incval = TXGBE_INCVAL_1GB;
                shift = TXGBE_INCVAL_SHIFT_1GB;
                break;
-       case ETH_SPEED_NUM_10G:
+       case RTE_ETH_SPEED_NUM_10G:
        default:
                incval = TXGBE_INCVAL_10GB;
                shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4421,6 +4611,7 @@ txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
 {
        switch (mac_type) {
        case txgbe_mac_raptor:
+       case txgbe_mac_raptor_vf:
                return 1;
        default:
                return 0;
@@ -4437,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        uint8_t nb_tcs;
        uint8_t i, j;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
                dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
        else
                dcb_info->nb_tcs = 1;
@@ -4448,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        if (dcb_config->vt_mode) { /* vt is enabled */
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
                if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
                        for (j = 0; j < nb_tcs; j++) {
@@ -4472,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        } else { /* vt is disabled */
                struct rte_eth_dcb_rx_conf *rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-               if (dcb_info->nb_tcs == ETH_4_TCS) {
+               if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
                        for (i = 0; i < dcb_info->nb_tcs; i++) {
                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4487,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
                        dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
                        dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
                        dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+               } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
                        for (i = 0; i < dcb_info->nb_tcs; i++) {
                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4517,6 +4708,384 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        return 0;
 }
 
+/* Update e-tag ether type */
+static int
+txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
+                           uint16_t ether_type)
+{
+       uint32_t etag_etype;
+
+       etag_etype = rd32(hw, TXGBE_EXTAG);
+       etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
+       etag_etype |= ether_type;
+       wr32(hw, TXGBE_EXTAG, etag_etype);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+/* Enable e-tag tunnel */
+static int
+txgbe_e_tag_enable(struct txgbe_hw *hw)
+{
+       uint32_t etag_etype;
+
+       etag_etype = rd32(hw, TXGBE_PORTCTL);
+       etag_etype |= TXGBE_PORTCTL_ETAG;
+       wr32(hw, TXGBE_PORTCTL, etag_etype);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+                      struct txgbe_l2_tunnel_conf  *l2_tunnel)
+{
+       int ret = 0;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       rar_entries = hw->mac.num_rar_entries;
+
+       for (i = 1; i < rar_entries; i++) {
+               wr32(hw, TXGBE_ETHADDRIDX, i);
+               rar_high = rd32(hw, TXGBE_ETHADDRH);
+               rar_low  = rd32(hw, TXGBE_ETHADDRL);
+               if ((rar_high & TXGBE_ETHADDRH_VLD) &&
+                   (rar_high & TXGBE_ETHADDRH_ETAG) &&
+                   (TXGBE_ETHADDRL_ETAG(rar_low) ==
+                    l2_tunnel->tunnel_id)) {
+                       wr32(hw, TXGBE_ETHADDRL, 0);
+                       wr32(hw, TXGBE_ETHADDRH, 0);
+
+                       txgbe_clear_vmdq(hw, i, BIT_MASK32);
+
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
+static int
+txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+                      struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+       int ret = 0;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t i, rar_entries;
+       uint32_t rar_low, rar_high;
+
+       /* One entry for one tunnel. Try to remove potential existing entry. */
+       txgbe_e_tag_filter_del(dev, l2_tunnel);
+
+       rar_entries = hw->mac.num_rar_entries;
+
+       for (i = 1; i < rar_entries; i++) {
+               wr32(hw, TXGBE_ETHADDRIDX, i);
+               rar_high = rd32(hw, TXGBE_ETHADDRH);
+               if (rar_high & TXGBE_ETHADDRH_VLD) {
+                       continue;
+               } else {
+                       txgbe_set_vmdq(hw, i, l2_tunnel->pool);
+                       rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
+                       rar_low = l2_tunnel->tunnel_id;
+
+                       wr32(hw, TXGBE_ETHADDRL, rar_low);
+                       wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+                       return ret;
+               }
+       }
+
+       PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+                    " Please remove a rule before adding a new one.");
+       return -EINVAL;
+}
+
+static inline struct txgbe_l2_tn_filter *
+txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_key *key)
+{
+       int ret;
+
+       ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+       if (ret < 0)
+               return NULL;
+
+       return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_filter *l2_tn_filter)
+{
+       int ret;
+
+       ret = rte_hash_add_key(l2_tn_info->hash_handle,
+                              &l2_tn_filter->key);
+
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR,
+                           "Failed to insert L2 tunnel filter"
+                           " to hash table %d!",
+                           ret);
+               return ret;
+       }
+
+       l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+       TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+       return 0;
+}
+
+static inline int
+txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+                         struct txgbe_l2_tn_key *key)
+{
+       int ret;
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+
+       ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+       if (ret < 0) {
+               PMD_DRV_LOG(ERR,
+                           "No such L2 tunnel filter to delete %d!",
+                           ret);
+               return ret;
+       }
+
+       l2_tn_filter = l2_tn_info->hash_map[ret];
+       l2_tn_info->hash_map[ret] = NULL;
+
+       TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+       rte_free(l2_tn_filter);
+
+       return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+                              struct txgbe_l2_tunnel_conf *l2_tunnel,
+                              bool restore)
+{
+       int ret;
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_key key;
+       struct txgbe_l2_tn_filter *node;
+
+       if (!restore) {
+               key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+               key.tn_id = l2_tunnel->tunnel_id;
+
+               node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+               if (node) {
+                       PMD_DRV_LOG(ERR,
+                                   "The L2 tunnel filter already exists!");
+                       return -EINVAL;
+               }
+
+               node = rte_zmalloc("txgbe_l2_tn",
+                                  sizeof(struct txgbe_l2_tn_filter),
+                                  0);
+               if (!node)
+                       return -ENOMEM;
+
+               rte_memcpy(&node->key,
+                                &key,
+                                sizeof(struct txgbe_l2_tn_key));
+               node->pool = l2_tunnel->pool;
+               ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
+               if (ret < 0) {
+                       rte_free(node);
+                       return ret;
+               }
+       }
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
+               ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       if (!restore && ret < 0)
+               (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+       return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+                              struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+       int ret;
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_key key;
+
+       key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+       key.tn_id = l2_tunnel->tunnel_id;
+       ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+       if (ret < 0)
+               return ret;
+
+       switch (l2_tunnel->l2_tunnel_type) {
+       case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
+               ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+       int ret = 0;
+       uint32_t ctrl;
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       ctrl = rd32(hw, TXGBE_POOLCTL);
+       ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
+       if (en)
+               ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
+       wr32(hw, TXGBE_POOLCTL, ctrl);
+
+       return ret;
+}
+
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int ret = 0;
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+               break;
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+               break;
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+               break;
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       txgbe_flush(hw);
+
+       return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+                             struct rte_eth_udp_tunnel *udp_tunnel)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int ret = 0;
+       uint16_t cur_port;
+
+       if (udp_tunnel == NULL)
+               return -EINVAL;
+
+       switch (udp_tunnel->prot_type) {
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
+               cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORT, 0);
+               break;
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
+               cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_GENEVEPORT, 0);
+               break;
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
+               cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_TEREDOPORT, 0);
+               break;
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
+               cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORTGPE, 0);
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "Invalid tunnel type");
+               ret = -EINVAL;
+               break;
+       }
+
+       txgbe_flush(hw);
+
+       return ret;
+}
+
 /* restore n-tuple filter */
 static inline void
 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -4564,12 +5133,124 @@ txgbe_syn_filter_restore(struct rte_eth_dev *dev)
        }
 }
 
+/* restore L2 tunnel filter */
+static inline void
+txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_filter *node;
+       struct txgbe_l2_tunnel_conf l2_tn_conf;
+
+       TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+               l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+               l2_tn_conf.tunnel_id      = node->key.tn_id;
+               l2_tn_conf.pool           = node->pool;
+               (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+       }
+}
+
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->rss_info.conf.queue_num)
+               txgbe_config_rss_filter(dev,
+                       &filter_info->rss_info, TRUE);
+}
+
 static int
 txgbe_filter_restore(struct rte_eth_dev *dev)
 {
        txgbe_ntuple_filter_restore(dev);
        txgbe_ethertype_filter_restore(dev);
        txgbe_syn_filter_restore(dev);
+       txgbe_fdir_filter_restore(dev);
+       txgbe_l2_tn_filter_restore(dev);
+       txgbe_rss_filter_restore(dev);
+
+       return 0;
+}
+
+static void
+txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       if (l2_tn_info->e_tag_en)
+               (void)txgbe_e_tag_enable(hw);
+
+       if (l2_tn_info->e_tag_fwd_en)
+               (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
+
+       (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter *p_5tuple;
+
+       while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+               txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i;
+
+       for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+               if (filter_info->ethertype_mask & (1 << i) &&
+                   !filter_info->ethertype_filters[i].conf) {
+                       (void)txgbe_ethertype_filter_remove(filter_info,
+                                                           (uint8_t)i);
+                       wr32(hw, TXGBE_ETFLT(i), 0);
+                       wr32(hw, TXGBE_ETCLS(i), 0);
+                       txgbe_flush(hw);
+               }
+       }
+}
+
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+       if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+               filter_info->syn_info = 0;
+
+               wr32(hw, TXGBE_SYNCLS, 0);
+               txgbe_flush(hw);
+       }
+}
+
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+       struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+       struct txgbe_l2_tn_filter *l2_tn_filter;
+       struct txgbe_l2_tunnel_conf l2_tn_conf;
+       int ret = 0;
+
+       while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+               l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+               l2_tn_conf.tunnel_id      = l2_tn_filter->key.tn_id;
+               l2_tn_conf.pool           = l2_tn_filter->pool;
+               ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+               if (ret < 0)
+                       return ret;
+       }
 
        return 0;
 }
@@ -4628,7 +5309,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .reta_query                 = txgbe_dev_rss_reta_query,
        .rss_hash_update            = txgbe_dev_rss_hash_update,
        .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
-       .filter_ctrl                = txgbe_dev_filter_ctrl,
+       .flow_ops_get               = txgbe_dev_flow_ops_get,
        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
        .rxq_info_get               = txgbe_rxq_info_get,
        .txq_info_get               = txgbe_txq_info_get,
@@ -4646,23 +5327,36 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .timesync_adjust_time       = txgbe_timesync_adjust_time,
        .timesync_read_time         = txgbe_timesync_read_time,
        .timesync_write_time        = txgbe_timesync_write_time,
+       .udp_tunnel_port_add        = txgbe_dev_udp_tunnel_port_add,
+       .udp_tunnel_port_del        = txgbe_dev_udp_tunnel_port_del,
+       .tm_ops_get                 = txgbe_tm_ops_get,
        .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
 };
 
 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
-
-RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
-RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
+RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
+                             TXGBE_DEVARG_BP_AUTO "=<0|1>"
+                             TXGBE_DEVARG_KR_POLL "=<0|1>"
+                             TXGBE_DEVARG_KR_PRESENT "=<0|1>"
+                             TXGBE_DEVARG_KX_SGMII "=<0|1>"
+                             TXGBE_DEVARG_FFE_SET "=<0-4>"
+                             TXGBE_DEVARG_FFE_MAIN "=<uint16>"
+                             TXGBE_DEVARG_FFE_PRE "=<uint16>"
+                             TXGBE_DEVARG_FFE_POST "=<uint16>");
+
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
 
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
-       RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
 #endif
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
-       RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
 #endif
 
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
-       RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);
 #endif