mbuf: mark old VLAN offload flags as deprecated
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index 90137d0..0b0f9db 100644 (file)
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2020
+ * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <stdio.h>
@@ -16,6 +17,7 @@
 #include <rte_memory.h>
 #include <rte_eal.h>
 #include <rte_alarm.h>
+#include <rte_kvargs.h>
 
 #include "txgbe_logs.h"
 #include "base/txgbe.h"
@@ -105,8 +107,10 @@ static void txgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
 static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
+static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
-static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+                                     struct rte_intr_handle *handle);
 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                                      struct rte_intr_handle *handle);
 static void txgbe_dev_interrupt_handler(void *param);
@@ -138,8 +142,8 @@ static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
  * The set of PCI devices this driver supports
  */
 static const struct rte_pci_id pci_id_txgbe_map[] = {
-       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_RAPTOR_SFP) },
-       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820_SFP) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_SP1000) },
+       { RTE_PCI_DEVICE(PCI_VENDOR_ID_WANGXUN, TXGBE_DEV_ID_WX1820) },
        { .vendor_id = 0, /* sentinel */ },
 };
 
@@ -469,6 +473,71 @@ txgbe_swfw_lock_reset(struct txgbe_hw *hw)
        hw->mac.release_swfw_sync(hw, mask);
 }
 
+static int
+txgbe_handle_devarg(__rte_unused const char *key, const char *value,
+                 void *extra_args)
+{
+       uint16_t *n = extra_args;
+
+       if (value == NULL || extra_args == NULL)
+               return -EINVAL;
+
+       *n = (uint16_t)strtoul(value, NULL, 10);
+       if (*n == USHRT_MAX && errno == ERANGE)
+               return -1;
+
+       return 0;
+}
+
+static void
+txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
+{
+       struct rte_kvargs *kvlist;
+       u16 auto_neg = 1;
+       u16 poll = 0;
+       u16 present = 1;
+       u16 sgmii = 0;
+       u16 ffe_set = 0;
+       u16 ffe_main = 27;
+       u16 ffe_pre = 8;
+       u16 ffe_post = 44;
+
+       if (devargs == NULL)
+               goto null;
+
+       kvlist = rte_kvargs_parse(devargs->args, txgbe_valid_arguments);
+       if (kvlist == NULL)
+               goto null;
+
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_BP_AUTO,
+                          &txgbe_handle_devarg, &auto_neg);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_POLL,
+                          &txgbe_handle_devarg, &poll);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_KR_PRESENT,
+                          &txgbe_handle_devarg, &present);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_KX_SGMII,
+                          &txgbe_handle_devarg, &sgmii);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_SET,
+                          &txgbe_handle_devarg, &ffe_set);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_MAIN,
+                          &txgbe_handle_devarg, &ffe_main);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_PRE,
+                          &txgbe_handle_devarg, &ffe_pre);
+       rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
+                          &txgbe_handle_devarg, &ffe_post);
+       rte_kvargs_free(kvlist);
+
+null:
+       hw->devarg.auto_neg = auto_neg;
+       hw->devarg.poll = poll;
+       hw->devarg.present = present;
+       hw->devarg.sgmii = sgmii;
+       hw->phy.ffe_set = ffe_set;
+       hw->phy.ffe_main = ffe_main;
+       hw->phy.ffe_pre = ffe_pre;
+       hw->phy.ffe_post = ffe_post;
+}
+
 static int
 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 {
@@ -537,6 +606,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        hw->isb_dma = TMZ_PADDR(mz);
        hw->isb_mem = TMZ_VADDR(mz);
 
+       txgbe_parse_devargs(hw, pci_dev->device.devargs);
        /* Initialize the shared code (base driver) */
        err = txgbe_init_shared_code(hw);
        if (err != 0) {
@@ -862,32 +932,10 @@ static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                struct rte_pci_device *pci_dev)
 {
-       struct rte_eth_dev *pf_ethdev;
-       struct rte_eth_devargs eth_da;
-       int retval;
-
-       if (pci_dev->device.devargs) {
-               retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
-                               &eth_da);
-               if (retval)
-                       return retval;
-       } else {
-               memset(&eth_da, 0, sizeof(eth_da));
-       }
-
-       retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+       return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
                        sizeof(struct txgbe_adapter),
                        eth_dev_pci_specific_init, pci_dev,
                        eth_txgbe_dev_init, NULL);
-
-       if (retval || eth_da.nb_representor_ports < 1)
-               return retval;
-
-       pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
-       if (pf_ethdev == NULL)
-               return -ENODEV;
-
-       return 0;
 }
 
 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
@@ -896,7 +944,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
 
        ethdev = rte_eth_dev_allocated(pci_dev->device.name);
        if (!ethdev)
-               return -ENODEV;
+               return 0;
 
        return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
 }
@@ -950,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
        rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
        rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
        rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-       if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
                restart = (rxcfg & TXGBE_RXCFG_ENA) &&
                        !(rxcfg & TXGBE_RXCFG_VLAN);
                rxcfg |= TXGBE_RXCFG_VLAN;
@@ -985,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
        vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
        qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
        switch (vlan_type) {
-       case ETH_VLAN_TYPE_INNER:
+       case RTE_ETH_VLAN_TYPE_INNER:
                if (vlan_ext) {
                        wr32m(hw, TXGBE_VLANCTL,
                                TXGBE_VLANCTL_TPID_MASK,
@@ -1005,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
                                TXGBE_TAGTPID_LSB(tpid));
                }
                break;
-       case ETH_VLAN_TYPE_OUTER:
+       case RTE_ETH_VLAN_TYPE_OUTER:
                if (vlan_ext) {
                        /* Only the high 16-bits is valid */
                        wr32m(hw, TXGBE_EXTAG,
@@ -1090,10 +1138,10 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
 
        if (on) {
                rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        } else {
                rxq->vlan_flags = PKT_RX_VLAN;
-               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        }
 }
 
@@ -1139,7 +1187,6 @@ txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 
        ctrl = rd32(hw, TXGBE_PORTCTL);
        ctrl &= ~TXGBE_PORTCTL_VLANEXT;
-       ctrl &= ~TXGBE_PORTCTL_QINQ;
        wr32(hw, TXGBE_PORTCTL, ctrl);
 }
 
@@ -1147,17 +1194,38 @@ static void
 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-       struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
        uint32_t ctrl;
 
        PMD_INIT_FUNC_TRACE();
 
        ctrl  = rd32(hw, TXGBE_PORTCTL);
        ctrl |= TXGBE_PORTCTL_VLANEXT;
-       if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
-           txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
-               ctrl |= TXGBE_PORTCTL_QINQ;
+       wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, TXGBE_PORTCTL);
+       ctrl &= ~TXGBE_PORTCTL_QINQ;
+       wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, TXGBE_PORTCTL);
+       ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
        wr32(hw, TXGBE_PORTCTL, ctrl);
 }
 
@@ -1172,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
 
-               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        txgbe_vlan_strip_queue_set(dev, i, 1);
                else
                        txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1186,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        struct txgbe_rx_queue *rxq;
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
                rxmode = &dev->data->dev_conf.rxmode;
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
-                               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+                               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
                        }
                else
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
-                               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
                        }
        }
 }
@@ -1207,23 +1275,30 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        rxmode = &dev->data->dev_conf.rxmode;
 
-       if (mask & ETH_VLAN_STRIP_MASK)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK)
                txgbe_vlan_hw_strip_config(dev);
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        txgbe_vlan_hw_filter_enable(dev);
                else
                        txgbe_vlan_hw_filter_disable(dev);
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
                        txgbe_vlan_hw_extend_enable(dev);
                else
                        txgbe_vlan_hw_extend_disable(dev);
        }
 
+       if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+                       txgbe_qinq_hw_strip_enable(dev);
+               else
+                       txgbe_qinq_hw_strip_disable(dev);
+       }
+
        return 0;
 }
 
@@ -1256,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
        switch (nb_rx_q) {
        case 1:
        case 2:
-               RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+               RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
                break;
        case 4:
-               RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+               RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
                break;
        default:
                return -EINVAL;
@@ -1282,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
        if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
                /* check multi-queue mode */
                switch (dev_conf->rxmode.mq_mode) {
-               case ETH_MQ_RX_VMDQ_DCB:
-                       PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+               case RTE_ETH_MQ_RX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
                        break;
-               case ETH_MQ_RX_VMDQ_DCB_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
                        PMD_INIT_LOG(ERR, "SRIOV active,"
                                        " unsupported mq_mode rx %d.",
                                        dev_conf->rxmode.mq_mode);
                        return -EINVAL;
-               case ETH_MQ_RX_RSS:
-               case ETH_MQ_RX_VMDQ_RSS:
-                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+               case RTE_ETH_MQ_RX_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_RSS:
+                       dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
                        if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
                                if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
                                        PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1303,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                        return -EINVAL;
                                }
                        break;
-               case ETH_MQ_RX_VMDQ_ONLY:
-               case ETH_MQ_RX_NONE:
+               case RTE_ETH_MQ_RX_VMDQ_ONLY:
+               case RTE_ETH_MQ_RX_NONE:
                        /* if nothing mq mode configure, use default scheme */
                        dev->data->dev_conf.rxmode.mq_mode =
-                               ETH_MQ_RX_VMDQ_ONLY;
+                               RTE_ETH_MQ_RX_VMDQ_ONLY;
                        break;
-               default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+               default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
                        /* SRIOV only works in VMDq enable mode */
                        PMD_INIT_LOG(ERR, "SRIOV is active,"
                                        " wrong mq_mode rx %d.",
@@ -1318,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                }
 
                switch (dev_conf->txmode.mq_mode) {
-               case ETH_MQ_TX_VMDQ_DCB:
-                       PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-                       dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+               case RTE_ETH_MQ_TX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+                       dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
                        break;
-               default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+               default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
                        dev->data->dev_conf.txmode.mq_mode =
-                               ETH_MQ_TX_VMDQ_ONLY;
+                               RTE_ETH_MQ_TX_VMDQ_ONLY;
                        break;
                }
 
@@ -1339,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                        return -EINVAL;
                }
        } else {
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
                        PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
                                          " not supported.");
                        return -EINVAL;
                }
                /* check configuration for vmdb+dcb mode */
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_conf *conf;
 
                        if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1354,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                        conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                       if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+                              conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
                                                " nb_queue_pools must be %d or %d.",
-                                               ETH_16_POOLS, ETH_32_POOLS);
+                                               RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
                                return -EINVAL;
                        }
                }
-               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
                        if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1371,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                        conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                       if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+                              conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
                                                " nb_queue_pools != %d and"
                                                " nb_queue_pools != %d.",
-                                               ETH_16_POOLS, ETH_32_POOLS);
+                                               RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
                                return -EINVAL;
                        }
                }
 
                /* For DCB mode check our configuration before we go further */
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
                        const struct rte_eth_dcb_rx_conf *conf;
 
                        conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-                       if (!(conf->nb_tcs == ETH_4_TCS ||
-                              conf->nb_tcs == ETH_8_TCS)) {
+                       if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+                              conf->nb_tcs == RTE_ETH_8_TCS)) {
                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
                                                " and nb_tcs != %d.",
-                                               ETH_4_TCS, ETH_8_TCS);
+                                               RTE_ETH_4_TCS, RTE_ETH_8_TCS);
                                return -EINVAL;
                        }
                }
 
-               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
                        const struct rte_eth_dcb_tx_conf *conf;
 
                        conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-                       if (!(conf->nb_tcs == ETH_4_TCS ||
-                              conf->nb_tcs == ETH_8_TCS)) {
+                       if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+                              conf->nb_tcs == RTE_ETH_8_TCS)) {
                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
                                                " and nb_tcs != %d.",
-                                               ETH_4_TCS, ETH_8_TCS);
+                                               RTE_ETH_4_TCS, RTE_ETH_8_TCS);
                                return -EINVAL;
                        }
                }
@@ -1420,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* multiple queue mode checking */
        ret  = txgbe_check_mq_mode(dev);
@@ -1454,6 +1529,7 @@ txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
        gpie |= TXGBE_GPIOBIT_6;
        wr32(hw, TXGBE_GPIOINTEN, gpie);
        intr->mask_misc |= TXGBE_ICRMISC_GPIO;
+       intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
 }
 
 int
@@ -1558,17 +1634,6 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       /* TXGBE devices don't support:
-        *    - half duplex (checked afterwards for valid speeds)
-        *    - fixed speed: TODO implement
-        */
-       if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
-               PMD_INIT_LOG(ERR,
-               "Invalid link_speeds for port %u, fix speed not supported",
-                               dev->data->port_id);
-               return -EINVAL;
-       }
-
        /* Stop the link setup handler before resetting the HW. */
        rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
 
@@ -1589,6 +1654,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                return -1;
        hw->mac.start_hw(hw);
        hw->mac.get_link_status = true;
+       hw->dev_start = true;
 
        /* configure PF module if SRIOV enabled */
        txgbe_pf_host_configure(dev);
@@ -1628,15 +1694,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
        err = txgbe_vlan_offload_config(dev, mask);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
                goto error;
        }
 
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+       if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
                txgbe_vmdq_vlan_hw_filter_enable(dev);
        }
@@ -1687,7 +1753,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                hw->mac.enable_tx_laser(hw);
        }
 
-       err = hw->mac.check_link(hw, &speed, &link_up, 0);
+       if ((hw->subsystem_device_id & 0xFF) != TXGBE_DEV_ID_KR_KX_KX4)
+               err = hw->mac.check_link(hw, &speed, &link_up, 0);
        if (err)
                goto error;
        dev->data->dev_link.link_status = link_up;
@@ -1696,30 +1763,30 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        if (err)
                goto error;
 
-       allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-                       ETH_LINK_SPEED_10G;
+       allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+                       RTE_ETH_LINK_SPEED_10G;
 
        link_speeds = &dev->data->dev_conf.link_speeds;
-       if (*link_speeds & ~allowed_speeds) {
+       if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
                PMD_INIT_LOG(ERR, "Invalid link setting");
                goto error;
        }
 
        speed = 0x0;
-       if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+       if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
                speed = (TXGBE_LINK_SPEED_100M_FULL |
                         TXGBE_LINK_SPEED_1GB_FULL |
                         TXGBE_LINK_SPEED_10GB_FULL);
        } else {
-               if (*link_speeds & ETH_LINK_SPEED_10G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
                        speed |= TXGBE_LINK_SPEED_10GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_5G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
                        speed |= TXGBE_LINK_SPEED_5GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_2_5G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
                        speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_1G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
                        speed |= TXGBE_LINK_SPEED_1GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_100M)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
                        speed |= TXGBE_LINK_SPEED_100M_FULL;
        }
 
@@ -1730,6 +1797,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 skip_link_setup:
 
        if (rte_intr_allow_others(intr_handle)) {
+               txgbe_dev_misc_interrupt_setup(dev);
                /* check if lsc interrupt is enabled */
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        txgbe_dev_lsc_interrupt_setup(dev, TRUE);
@@ -1856,6 +1924,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
 
        hw->adapter_stopped = true;
        dev->data->dev_started = 0;
+       hw->dev_start = false;
 
        return 0;
 }
@@ -2080,6 +2149,7 @@ txgbe_read_stats_registers(struct txgbe_hw *hw,
 
        hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
        hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
+       hw_stats->rx_dma_drop += rd32(hw, TXGBE_DMARXDROP);
        hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
 
        /* MAC Stats */
@@ -2228,7 +2298,8 @@ txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
        }
 
        /* Rx Errors */
-       stats->imissed  = hw_stats->rx_total_missed_packets;
+       stats->imissed  = hw_stats->rx_total_missed_packets +
+                         hw_stats->rx_dma_drop;
        stats->ierrors  = hw_stats->rx_crc_errors +
                          hw_stats->rx_mac_short_packet_dropped +
                          hw_stats->rx_length_errors +
@@ -2370,8 +2441,8 @@ static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
 }
 
 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
-       struct rte_eth_xstat_name *xstats_names,
        const uint64_t *ids,
+       struct rte_eth_xstat_name *xstats_names,
        unsigned int limit)
 {
        unsigned int i;
@@ -2501,18 +2572,17 @@ static int
 txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       u16 eeprom_verh, eeprom_verl;
        u32 etrack_id;
        int ret;
 
-       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
-       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
+       hw->phy.get_fw_version(hw, &etrack_id);
 
-       etrack_id = (eeprom_verh << 16) | eeprom_verl;
        ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+       if (ret < 0)
+               return -EINVAL;
 
        ret += 1; /* add the size of '\0' */
-       if (fw_size < (u32)ret)
+       if (fw_size < (size_t)ret)
                return ret;
        else
                return 0;
@@ -2531,7 +2601,7 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
        dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
        dev_info->max_vfs = pci_dev->max_vfs;
-       dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
        dev_info->vmdq_queue_num = dev_info->max_rx_queues;
        dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
@@ -2564,11 +2634,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->tx_desc_lim = tx_desc_lim;
 
        dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+       dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
        dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-       dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-       dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+       dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
        /* Driver-preferred Rx/Tx parameters */
        dev_info->default_rxportconf.burst_size = 32;
@@ -2625,10 +2695,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
        int wait = 1;
 
        memset(&link, 0, sizeof(link));
-       link.link_status = ETH_LINK_DOWN;
-       link.link_speed = ETH_SPEED_NUM_NONE;
-       link.link_duplex = ETH_LINK_HALF_DUPLEX;
-       link.link_autoneg = ETH_LINK_AUTONEG;
+       link.link_status = RTE_ETH_LINK_DOWN;
+       link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+       link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+                       RTE_ETH_LINK_AUTONEG);
 
        hw->mac.get_link_status = true;
 
@@ -2642,49 +2713,54 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
        err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
        if (err != 0) {
-               link.link_speed = ETH_SPEED_NUM_100M;
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                return rte_eth_linkstatus_set(dev, &link);
        }
 
        if (link_up == 0) {
-               if (hw->phy.media_type == txgbe_media_type_fiber) {
+               if ((hw->subsystem_device_id & 0xFF) ==
+                               TXGBE_DEV_ID_KR_KX_KX4) {
+                       hw->mac.bp_down_event(hw);
+               } else if (hw->phy.media_type == txgbe_media_type_fiber) {
                        intr->flags |= TXGBE_FLAG_NEED_LINK_CONFIG;
                        rte_eal_alarm_set(10,
                                txgbe_dev_setup_link_alarm_handler, dev);
                }
                return rte_eth_linkstatus_set(dev, &link);
+       } else if (!hw->dev_start) {
+               return rte_eth_linkstatus_set(dev, &link);
        }
 
        intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-       link.link_status = ETH_LINK_UP;
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_status = RTE_ETH_LINK_UP;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        switch (link_speed) {
        default:
        case TXGBE_LINK_SPEED_UNKNOWN:
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
 
        case TXGBE_LINK_SPEED_100M_FULL:
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
 
        case TXGBE_LINK_SPEED_1GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_1G;
+               link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
 
        case TXGBE_LINK_SPEED_2_5GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_2_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
                break;
 
        case TXGBE_LINK_SPEED_5GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_5G;
                break;
 
        case TXGBE_LINK_SPEED_10GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_10G;
+               link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        }
 
@@ -2783,6 +2859,20 @@ txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
        return 0;
 }
 
+static int
+txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+       u64 mask;
+
+       mask = TXGBE_ICR_MASK;
+       mask &= (1ULL << TXGBE_MISC_VEC_ID);
+       intr->mask |= mask;
+       intr->mask_misc |= TXGBE_ICRMISC_GPIO;
+       intr->mask_misc |= TXGBE_ICRMISC_ANDONE;
+       return 0;
+}
+
 /**
  * It clears the interrupt causes and enables the interrupt.
  * It will be called once only during nic initialized.
@@ -2798,9 +2888,11 @@ static int
 txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
 {
        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+       u64 mask;
 
-       intr->mask[0] |= TXGBE_ICR_MASK;
-       intr->mask[1] |= TXGBE_ICR_MASK;
+       mask = TXGBE_ICR_MASK;
+       mask &= ~((1ULL << TXGBE_RX_VEC_START) - 1);
+       intr->mask |= mask;
 
        return 0;
 }
@@ -2837,12 +2929,17 @@ txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
  *  - On failure, a negative value.
  */
 static int
-txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+                               struct rte_intr_handle *intr_handle)
 {
        uint32_t eicr;
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
 
+       if (intr_handle->type != RTE_INTR_HANDLE_UIO &&
+                       intr_handle->type != RTE_INTR_HANDLE_VFIO_MSIX)
+               wr32(hw, TXGBE_PX_INTA, 1);
+
        /* clear all cause mask */
        txgbe_disable_intr(hw);
 
@@ -2856,6 +2953,9 @@ txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
        if (eicr & TXGBE_ICRMISC_LSC)
                intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE;
 
+       if (eicr & TXGBE_ICRMISC_ANDONE)
+               intr->flags |= TXGBE_FLAG_NEED_AN_CONFIG;
+
        if (eicr & TXGBE_ICRMISC_VFMBX)
                intr->flags |= TXGBE_FLAG_MAILBOX;
 
@@ -2890,7 +2990,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
                                        (int)(dev->data->port_id),
                                        (unsigned int)link.link_speed,
-                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+                       link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
                                        "full-duplex" : "half-duplex");
        } else {
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -2933,6 +3033,13 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                intr->flags &= ~TXGBE_FLAG_PHY_INTERRUPT;
        }
 
+       if (intr->flags & TXGBE_FLAG_NEED_AN_CONFIG) {
+               if (hw->devarg.auto_neg == 1 && hw->devarg.poll == 0) {
+                       hw->mac.kr_handle(hw);
+                       intr->flags &= ~TXGBE_FLAG_NEED_AN_CONFIG;
+               }
+       }
+
        if (intr->flags & TXGBE_FLAG_NEED_LINK_UPDATE) {
                struct rte_eth_link link;
 
@@ -2946,6 +3053,11 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                        /* handle it 1 sec later, wait it being stable */
                        timeout = TXGBE_LINK_UP_CHECK_TIMEOUT;
                /* likely to down */
+               else if ((hw->subsystem_device_id & 0xFF) ==
+                               TXGBE_DEV_ID_KR_KX_KX4 &&
+                               hw->devarg.auto_neg == 1)
+                       /* handle it 2 sec later for backplane AN73 */
+                       timeout = 2000;
                else
                        /* handle it 4 sec later, wait it being stable */
                        timeout = TXGBE_LINK_DOWN_CHECK_TIMEOUT;
@@ -2956,10 +3068,12 @@ txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                                      (void *)dev) < 0) {
                        PMD_DRV_LOG(ERR, "Error setting alarm");
                } else {
-                       /* remember original mask */
-                       intr->mask_misc_orig = intr->mask_misc;
                        /* only disable lsc interrupt */
                        intr->mask_misc &= ~TXGBE_ICRMISC_LSC;
+
+                       intr->mask_orig = intr->mask;
+                       /* only disable all misc interrupts */
+                       intr->mask &= ~(1ULL << TXGBE_MISC_VEC_ID);
                }
        }
 
@@ -3020,8 +3134,10 @@ txgbe_dev_interrupt_delayed_handler(void *param)
        }
 
        /* restore original mask */
-       intr->mask_misc = intr->mask_misc_orig;
-       intr->mask_misc_orig = 0;
+       intr->mask_misc |= TXGBE_ICRMISC_LSC;
+
+       intr->mask = intr->mask_orig;
+       intr->mask_orig = 0;
 
        PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
        txgbe_enable_intr(dev);
@@ -3045,7 +3161,7 @@ txgbe_dev_interrupt_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
-       txgbe_dev_interrupt_get_status(dev);
+       txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
        txgbe_dev_interrupt_action(dev, dev->intr_handle);
 }
 
@@ -3105,13 +3221,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                tx_pause = 0;
 
        if (rx_pause && tx_pause)
-               fc_conf->mode = RTE_FC_FULL;
+               fc_conf->mode = RTE_ETH_FC_FULL;
        else if (rx_pause)
-               fc_conf->mode = RTE_FC_RX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
        else if (tx_pause)
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
        else
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
 
        return 0;
 }
@@ -3243,16 +3359,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                return -ENOTSUP;
        }
 
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i += 4) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
                if (!mask)
                        continue;
@@ -3284,16 +3400,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i += 4) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
                if (!mask)
                        continue;
@@ -3343,18 +3459,8 @@ static int
 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct rte_eth_dev_info dev_info;
        uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
        struct rte_eth_dev_data *dev_data = dev->data;
-       int ret;
-
-       ret = txgbe_dev_info_get(dev, &dev_info);
-       if (ret != 0)
-               return ret;
-
-       /* check that mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
-               return -EINVAL;
 
        /* If device is started, refuse mtu that requires the support of
         * scattered packets when this feature has not been enabled before.
@@ -3366,9 +3472,6 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        if (hw->mode)
                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
                        TXGBE_FRAME_SIZE_MAX);
@@ -3473,12 +3576,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
                return -ENOTSUP;
 
        if (on) {
-               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+               for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
                        wr32(hw, TXGBE_UCADDRTBL(i), ~0);
                }
        } else {
-               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+               for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = 0;
                        wr32(hw, TXGBE_UCADDRTBL(i), 0);
                }
@@ -3502,15 +3605,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
        uint32_t new_val = orig_val;
 
-       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
                new_val |= TXGBE_POOLETHCTL_UTA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
                new_val |= TXGBE_POOLETHCTL_MCHA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
                new_val |= TXGBE_POOLETHCTL_UCHA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
                new_val |= TXGBE_POOLETHCTL_BCA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
                new_val |= TXGBE_POOLETHCTL_MCP;
 
        return new_val;
@@ -4076,27 +4179,11 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
 }
 
 static int
-txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
-                    enum rte_filter_type filter_type,
-                    enum rte_filter_op filter_op,
-                    void *arg)
+txgbe_dev_flow_ops_get(__rte_unused struct rte_eth_dev *dev,
+                      const struct rte_flow_ops **ops)
 {
-       int ret = 0;
-
-       switch (filter_type) {
-       case RTE_ETH_FILTER_GENERIC:
-               if (filter_op != RTE_ETH_FILTER_GET)
-                       return -EINVAL;
-               *(const void **)arg = &txgbe_flow_ops;
-               break;
-       default:
-               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
-                                                       filter_type);
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
+       *ops = &txgbe_flow_ops;
+       return 0;
 }
 
 static u8 *
@@ -4177,15 +4264,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
        rte_eth_linkstatus_get(dev, &link);
 
        switch (link.link_speed) {
-       case ETH_SPEED_NUM_100M:
+       case RTE_ETH_SPEED_NUM_100M:
                incval = TXGBE_INCVAL_100;
                shift = TXGBE_INCVAL_SHIFT_100;
                break;
-       case ETH_SPEED_NUM_1G:
+       case RTE_ETH_SPEED_NUM_1G:
                incval = TXGBE_INCVAL_1GB;
                shift = TXGBE_INCVAL_SHIFT_1GB;
                break;
-       case ETH_SPEED_NUM_10G:
+       case RTE_ETH_SPEED_NUM_10G:
        default:
                incval = TXGBE_INCVAL_10GB;
                shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4541,7 +4628,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        uint8_t nb_tcs;
        uint8_t i, j;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
                dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
        else
                dcb_info->nb_tcs = 1;
@@ -4552,7 +4639,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        if (dcb_config->vt_mode) { /* vt is enabled */
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
                if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
                        for (j = 0; j < nb_tcs; j++) {
@@ -4576,9 +4663,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        } else { /* vt is disabled */
                struct rte_eth_dcb_rx_conf *rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-               if (dcb_info->nb_tcs == ETH_4_TCS) {
+               if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
                        for (i = 0; i < dcb_info->nb_tcs; i++) {
                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4591,7 +4678,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
                        dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
                        dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
                        dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+               } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
                        for (i = 0; i < dcb_info->nb_tcs; i++) {
                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4821,7 +4908,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
        }
 
        switch (l2_tunnel->l2_tunnel_type) {
-       case RTE_L2_TUNNEL_TYPE_E_TAG:
+       case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
                ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
                break;
        default:
@@ -4852,7 +4939,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
                return ret;
 
        switch (l2_tunnel->l2_tunnel_type) {
-       case RTE_L2_TUNNEL_TYPE_E_TAG:
+       case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
                ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
                break;
        default:
@@ -4892,16 +4979,15 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                if (udp_tunnel->udp_port == 0) {
                        PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
                        ret = -EINVAL;
                        break;
                }
                wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
-               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                if (udp_tunnel->udp_port == 0) {
                        PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
                        ret = -EINVAL;
@@ -4909,7 +4995,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
                break;
-       case RTE_TUNNEL_TYPE_TEREDO:
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
                if (udp_tunnel->udp_port == 0) {
                        PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
                        ret = -EINVAL;
@@ -4917,6 +5003,14 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
                break;
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+               break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
                ret = -EINVAL;
@@ -4941,7 +5035,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
                if (cur_port != udp_tunnel->udp_port) {
                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -4950,9 +5044,8 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                        break;
                }
                wr32(hw, TXGBE_VXLANPORT, 0);
-               wr32(hw, TXGBE_VXLANPORTGPE, 0);
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
                if (cur_port != udp_tunnel->udp_port) {
                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -4962,7 +5055,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_GENEVEPORT, 0);
                break;
-       case RTE_TUNNEL_TYPE_TEREDO:
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
                cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
                if (cur_port != udp_tunnel->udp_port) {
                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -4972,6 +5065,16 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_TEREDOPORT, 0);
                break;
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
+               cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORTGPE, 0);
+               break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
                ret = -EINVAL;
@@ -5206,7 +5309,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .reta_query                 = txgbe_dev_rss_reta_query,
        .rss_hash_update            = txgbe_dev_rss_hash_update,
        .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
-       .filter_ctrl                = txgbe_dev_filter_ctrl,
+       .flow_ops_get               = txgbe_dev_flow_ops_get,
        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
        .rxq_info_get               = txgbe_rxq_info_get,
        .txq_info_get               = txgbe_txq_info_get,
@@ -5233,17 +5336,27 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_txgbe, pci_id_txgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_txgbe, "* igb_uio | uio_pci_generic | vfio-pci");
-
-RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
-RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
+RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
+                             TXGBE_DEVARG_BP_AUTO "=<0|1>"
+                             TXGBE_DEVARG_KR_POLL "=<0|1>"
+                             TXGBE_DEVARG_KR_PRESENT "=<0|1>"
+                             TXGBE_DEVARG_KX_SGMII "=<0|1>"
+                             TXGBE_DEVARG_FFE_SET "=<0-4>"
+                             TXGBE_DEVARG_FFE_MAIN "=<uint16>"
+                             TXGBE_DEVARG_FFE_PRE "=<uint16>"
+                             TXGBE_DEVARG_FFE_POST "=<uint16>");
+
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
 
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
-       RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
 #endif
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
-       RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
 #endif
 
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
-       RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);
 #endif