net: add macro for VLAN header length
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index 8dbe3da..47d0e6e 100644 (file)
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2020
+ * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
+ * Copyright(c) 2010-2017 Intel Corporation
  */
 
 #include <stdio.h>
@@ -108,7 +109,8 @@ static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
 static int txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
 static int txgbe_dev_misc_interrupt_setup(struct rte_eth_dev *dev);
 static int txgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
-static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+                                     struct rte_intr_handle *handle);
 static int txgbe_dev_interrupt_action(struct rte_eth_dev *dev,
                                      struct rte_intr_handle *handle);
 static void txgbe_dev_interrupt_handler(void *param);
@@ -493,7 +495,7 @@ txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
        struct rte_kvargs *kvlist;
        u16 auto_neg = 1;
        u16 poll = 0;
-       u16 present = 1;
+       u16 present = 0;
        u16 sgmii = 0;
        u16 ffe_set = 0;
        u16 ffe_main = 27;
@@ -546,7 +548,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
        struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
        struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        const struct rte_memzone *mz;
        uint32_t ctrl_ext;
        uint16_t csum;
@@ -930,34 +932,10 @@ static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                struct rte_pci_device *pci_dev)
 {
-       struct rte_eth_dev *pf_ethdev;
-       struct rte_eth_devargs eth_da;
-       int retval;
-
-       if (pci_dev->device.devargs) {
-               retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
-                               &eth_da);
-               if (retval)
-                       return retval;
-       } else {
-               memset(&eth_da, 0, sizeof(eth_da));
-       }
-
-       retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+       return rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
                        sizeof(struct txgbe_adapter),
                        eth_dev_pci_specific_init, pci_dev,
                        eth_txgbe_dev_init, NULL);
-
-       if (retval || eth_da.nb_representor_ports < 1)
-               return retval;
-       if (eth_da.type != RTE_ETH_REPRESENTOR_VF)
-               return -ENOTSUP;
-
-       pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
-       if (pf_ethdev == NULL)
-               return -ENODEV;
-
-       return 0;
 }
 
 static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
@@ -966,7 +944,7 @@ static int eth_txgbe_pci_remove(struct rte_pci_device *pci_dev)
 
        ethdev = rte_eth_dev_allocated(pci_dev->device.name);
        if (!ethdev)
-               return -ENODEV;
+               return 0;
 
        return rte_eth_dev_destroy(ethdev, eth_txgbe_dev_uninit);
 }
@@ -1020,7 +998,7 @@ txgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
        rxbal = rd32(hw, TXGBE_RXBAL(rxq->reg_idx));
        rxbah = rd32(hw, TXGBE_RXBAH(rxq->reg_idx));
        rxcfg = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
-       if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+       if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
                restart = (rxcfg & TXGBE_RXCFG_ENA) &&
                        !(rxcfg & TXGBE_RXCFG_VLAN);
                rxcfg |= TXGBE_RXCFG_VLAN;
@@ -1055,7 +1033,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
        vlan_ext = (portctrl & TXGBE_PORTCTL_VLANEXT);
        qinq = vlan_ext && (portctrl & TXGBE_PORTCTL_QINQ);
        switch (vlan_type) {
-       case ETH_VLAN_TYPE_INNER:
+       case RTE_ETH_VLAN_TYPE_INNER:
                if (vlan_ext) {
                        wr32m(hw, TXGBE_VLANCTL,
                                TXGBE_VLANCTL_TPID_MASK,
@@ -1075,7 +1053,7 @@ txgbe_vlan_tpid_set(struct rte_eth_dev *dev,
                                TXGBE_TAGTPID_LSB(tpid));
                }
                break;
-       case ETH_VLAN_TYPE_OUTER:
+       case RTE_ETH_VLAN_TYPE_OUTER:
                if (vlan_ext) {
                        /* Only the high 16-bits is valid */
                        wr32m(hw, TXGBE_EXTAG,
@@ -1159,11 +1137,11 @@ txgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
        rxq = dev->data->rx_queues[queue];
 
        if (on) {
-               rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+               rxq->vlan_flags = RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
+               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        } else {
-               rxq->vlan_flags = PKT_RX_VLAN;
-               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+               rxq->vlan_flags = RTE_MBUF_F_RX_VLAN;
+               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
        }
 }
 
@@ -1209,7 +1187,6 @@ txgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 
        ctrl = rd32(hw, TXGBE_PORTCTL);
        ctrl &= ~TXGBE_PORTCTL_VLANEXT;
-       ctrl &= ~TXGBE_PORTCTL_QINQ;
        wr32(hw, TXGBE_PORTCTL, ctrl);
 }
 
@@ -1217,17 +1194,38 @@ static void
 txgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-       struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
        uint32_t ctrl;
 
        PMD_INIT_FUNC_TRACE();
 
        ctrl  = rd32(hw, TXGBE_PORTCTL);
        ctrl |= TXGBE_PORTCTL_VLANEXT;
-       if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP ||
-           txmode->offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
-               ctrl |= TXGBE_PORTCTL_QINQ;
+       wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_qinq_hw_strip_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl = rd32(hw, TXGBE_PORTCTL);
+       ctrl &= ~TXGBE_PORTCTL_QINQ;
+       wr32(hw, TXGBE_PORTCTL, ctrl);
+}
+
+static void
+txgbe_qinq_hw_strip_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t ctrl;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ctrl  = rd32(hw, TXGBE_PORTCTL);
+       ctrl |= TXGBE_PORTCTL_QINQ | TXGBE_PORTCTL_VLANEXT;
        wr32(hw, TXGBE_PORTCTL, ctrl);
 }
 
@@ -1242,7 +1240,7 @@ txgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
 
-               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        txgbe_vlan_strip_queue_set(dev, i, 1);
                else
                        txgbe_vlan_strip_queue_set(dev, i, 0);
@@ -1256,17 +1254,17 @@ txgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        struct txgbe_rx_queue *rxq;
 
-       if (mask & ETH_VLAN_STRIP_MASK) {
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
                rxmode = &dev->data->dev_conf.rxmode;
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
-                               rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+                               rxq->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
                        }
                else
                        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                                rxq = dev->data->rx_queues[i];
-                               rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                               rxq->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
                        }
        }
 }
@@ -1277,23 +1275,30 @@ txgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
        struct rte_eth_rxmode *rxmode;
        rxmode = &dev->data->dev_conf.rxmode;
 
-       if (mask & ETH_VLAN_STRIP_MASK)
+       if (mask & RTE_ETH_VLAN_STRIP_MASK)
                txgbe_vlan_hw_strip_config(dev);
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        txgbe_vlan_hw_filter_enable(dev);
                else
                        txgbe_vlan_hw_filter_disable(dev);
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
                        txgbe_vlan_hw_extend_enable(dev);
                else
                        txgbe_vlan_hw_extend_disable(dev);
        }
 
+       if (mask & RTE_ETH_QINQ_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+                       txgbe_qinq_hw_strip_enable(dev);
+               else
+                       txgbe_qinq_hw_strip_disable(dev);
+       }
+
        return 0;
 }
 
@@ -1326,10 +1331,10 @@ txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
        switch (nb_rx_q) {
        case 1:
        case 2:
-               RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS;
+               RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_64_POOLS;
                break;
        case 4:
-               RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS;
+               RTE_ETH_DEV_SRIOV(dev).active = RTE_ETH_32_POOLS;
                break;
        default:
                return -EINVAL;
@@ -1352,18 +1357,18 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
        if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
                /* check multi-queue mode */
                switch (dev_conf->rxmode.mq_mode) {
-               case ETH_MQ_RX_VMDQ_DCB:
-                       PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+               case RTE_ETH_MQ_RX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "RTE_ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
                        break;
-               case ETH_MQ_RX_VMDQ_DCB_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
                        PMD_INIT_LOG(ERR, "SRIOV active,"
                                        " unsupported mq_mode rx %d.",
                                        dev_conf->rxmode.mq_mode);
                        return -EINVAL;
-               case ETH_MQ_RX_RSS:
-               case ETH_MQ_RX_VMDQ_RSS:
-                       dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+               case RTE_ETH_MQ_RX_RSS:
+               case RTE_ETH_MQ_RX_VMDQ_RSS:
+                       dev->data->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_VMDQ_RSS;
                        if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)
                                if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) {
                                        PMD_INIT_LOG(ERR, "SRIOV is active,"
@@ -1373,13 +1378,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                        return -EINVAL;
                                }
                        break;
-               case ETH_MQ_RX_VMDQ_ONLY:
-               case ETH_MQ_RX_NONE:
+               case RTE_ETH_MQ_RX_VMDQ_ONLY:
+               case RTE_ETH_MQ_RX_NONE:
                        /* if nothing mq mode configure, use default scheme */
                        dev->data->dev_conf.rxmode.mq_mode =
-                               ETH_MQ_RX_VMDQ_ONLY;
+                               RTE_ETH_MQ_RX_VMDQ_ONLY;
                        break;
-               default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
+               default: /* RTE_ETH_MQ_RX_DCB, RTE_ETH_MQ_RX_DCB_RSS or RTE_ETH_MQ_TX_DCB*/
                        /* SRIOV only works in VMDq enable mode */
                        PMD_INIT_LOG(ERR, "SRIOV is active,"
                                        " wrong mq_mode rx %d.",
@@ -1388,13 +1393,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                }
 
                switch (dev_conf->txmode.mq_mode) {
-               case ETH_MQ_TX_VMDQ_DCB:
-                       PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
-                       dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+               case RTE_ETH_MQ_TX_VMDQ_DCB:
+                       PMD_INIT_LOG(INFO, "RTE_ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+                       dev->data->dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB;
                        break;
-               default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+               default: /* RTE_ETH_MQ_TX_VMDQ_ONLY or RTE_ETH_MQ_TX_NONE */
                        dev->data->dev_conf.txmode.mq_mode =
-                               ETH_MQ_TX_VMDQ_ONLY;
+                               RTE_ETH_MQ_TX_VMDQ_ONLY;
                        break;
                }
 
@@ -1409,13 +1414,13 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                        return -EINVAL;
                }
        } else {
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB_RSS) {
                        PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is"
                                          " not supported.");
                        return -EINVAL;
                }
                /* check configuration for vmdb+dcb mode */
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_conf *conf;
 
                        if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1424,15 +1429,15 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                        conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf;
-                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                       if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+                              conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
                                                " nb_queue_pools must be %d or %d.",
-                                               ETH_16_POOLS, ETH_32_POOLS);
+                                               RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
                                return -EINVAL;
                        }
                }
-               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_DCB) {
                        const struct rte_eth_vmdq_dcb_tx_conf *conf;
 
                        if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) {
@@ -1441,39 +1446,39 @@ txgbe_check_mq_mode(struct rte_eth_dev *dev)
                                return -EINVAL;
                        }
                        conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf;
-                       if (!(conf->nb_queue_pools == ETH_16_POOLS ||
-                              conf->nb_queue_pools == ETH_32_POOLS)) {
+                       if (!(conf->nb_queue_pools == RTE_ETH_16_POOLS ||
+                              conf->nb_queue_pools == RTE_ETH_32_POOLS)) {
                                PMD_INIT_LOG(ERR, "VMDQ+DCB selected,"
                                                " nb_queue_pools != %d and"
                                                " nb_queue_pools != %d.",
-                                               ETH_16_POOLS, ETH_32_POOLS);
+                                               RTE_ETH_16_POOLS, RTE_ETH_32_POOLS);
                                return -EINVAL;
                        }
                }
 
                /* For DCB mode check our configuration before we go further */
-               if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+               if (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_DCB) {
                        const struct rte_eth_dcb_rx_conf *conf;
 
                        conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
-                       if (!(conf->nb_tcs == ETH_4_TCS ||
-                              conf->nb_tcs == ETH_8_TCS)) {
+                       if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+                              conf->nb_tcs == RTE_ETH_8_TCS)) {
                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
                                                " and nb_tcs != %d.",
-                                               ETH_4_TCS, ETH_8_TCS);
+                                               RTE_ETH_4_TCS, RTE_ETH_8_TCS);
                                return -EINVAL;
                        }
                }
 
-               if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+               if (dev_conf->txmode.mq_mode == RTE_ETH_MQ_TX_DCB) {
                        const struct rte_eth_dcb_tx_conf *conf;
 
                        conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
-                       if (!(conf->nb_tcs == ETH_4_TCS ||
-                              conf->nb_tcs == ETH_8_TCS)) {
+                       if (!(conf->nb_tcs == RTE_ETH_4_TCS ||
+                              conf->nb_tcs == RTE_ETH_8_TCS)) {
                                PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d"
                                                " and nb_tcs != %d.",
-                                               ETH_4_TCS, ETH_8_TCS);
+                                               RTE_ETH_4_TCS, RTE_ETH_8_TCS);
                                return -EINVAL;
                        }
                }
@@ -1490,8 +1495,8 @@ txgbe_dev_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
-               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
        /* multiple queue mode checking */
        ret  = txgbe_check_mq_mode(dev);
@@ -1615,7 +1620,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint32_t intr_vector = 0;
        int err;
        bool link_up = false, negotiate = 0;
@@ -1629,17 +1634,6 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       /* TXGBE devices don't support:
-        *    - half duplex (checked afterwards for valid speeds)
-        *    - fixed speed: TODO implement
-        */
-       if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
-               PMD_INIT_LOG(ERR,
-               "Invalid link_speeds for port %u, fix speed not supported",
-                               dev->data->port_id);
-               return -EINVAL;
-       }
-
        /* Stop the link setup handler before resetting the HW. */
        rte_eal_alarm_cancel(txgbe_dev_setup_link_alarm_handler, dev);
 
@@ -1660,6 +1654,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                return -1;
        hw->mac.start_hw(hw);
        hw->mac.get_link_status = true;
+       hw->dev_start = true;
 
        /* configure PF module if SRIOV enabled */
        txgbe_pf_host_configure(dev);
@@ -1675,17 +1670,14 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                        return -1;
        }
 
-       if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
-               intr_handle->intr_vec =
-                       rte_zmalloc("intr_vec",
-                                   dev->data->nb_rx_queues * sizeof(int), 0);
-               if (intr_handle->intr_vec == NULL) {
+       if (rte_intr_dp_is_en(intr_handle)) {
+               if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
+                                                  dev->data->nb_rx_queues)) {
                        PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
                                     " intr_vec", dev->data->nb_rx_queues);
                        return -ENOMEM;
                }
        }
-
        /* confiugre msix for sleep until rx interrupt */
        txgbe_configure_msix(dev);
 
@@ -1699,15 +1691,15 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
-       mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
-               ETH_VLAN_EXTEND_MASK;
+       mask = RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
+               RTE_ETH_VLAN_EXTEND_MASK;
        err = txgbe_vlan_offload_config(dev, mask);
        if (err) {
                PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
                goto error;
        }
 
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+       if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
                txgbe_vmdq_vlan_hw_filter_enable(dev);
        }
@@ -1768,30 +1760,30 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        if (err)
                goto error;
 
-       allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
-                       ETH_LINK_SPEED_10G;
+       allowed_speeds = RTE_ETH_LINK_SPEED_100M | RTE_ETH_LINK_SPEED_1G |
+                       RTE_ETH_LINK_SPEED_10G;
 
        link_speeds = &dev->data->dev_conf.link_speeds;
-       if (*link_speeds & ~allowed_speeds) {
+       if (((*link_speeds) >> 1) & ~(allowed_speeds >> 1)) {
                PMD_INIT_LOG(ERR, "Invalid link setting");
                goto error;
        }
 
        speed = 0x0;
-       if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
+       if (*link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
                speed = (TXGBE_LINK_SPEED_100M_FULL |
                         TXGBE_LINK_SPEED_1GB_FULL |
                         TXGBE_LINK_SPEED_10GB_FULL);
        } else {
-               if (*link_speeds & ETH_LINK_SPEED_10G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_10G)
                        speed |= TXGBE_LINK_SPEED_10GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_5G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_5G)
                        speed |= TXGBE_LINK_SPEED_5GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_2_5G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_2_5G)
                        speed |= TXGBE_LINK_SPEED_2_5GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_1G)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_1G)
                        speed |= TXGBE_LINK_SPEED_1GB_FULL;
-               if (*link_speeds & ETH_LINK_SPEED_100M)
+               if (*link_speeds & RTE_ETH_LINK_SPEED_100M)
                        speed |= TXGBE_LINK_SPEED_100M_FULL;
        }
 
@@ -1866,7 +1858,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        int vf;
        struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
@@ -1916,10 +1908,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
 
        /* Clean datapath event and queue/vec mapping */
        rte_intr_efd_disable(intr_handle);
-       if (intr_handle->intr_vec != NULL) {
-               rte_free(intr_handle->intr_vec);
-               intr_handle->intr_vec = NULL;
-       }
+       rte_intr_vec_list_free(intr_handle);
 
        /* reset hierarchy commit */
        tm_conf->committed = false;
@@ -1929,6 +1918,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
 
        hw->adapter_stopped = true;
        dev->data->dev_started = 0;
+       hw->dev_start = false;
 
        return 0;
 }
@@ -1981,7 +1971,7 @@ txgbe_dev_close(struct rte_eth_dev *dev)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        int retries = 0;
        int ret;
 
@@ -2445,8 +2435,8 @@ static int txgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
 }
 
 static int txgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
-       struct rte_eth_xstat_name *xstats_names,
        const uint64_t *ids,
+       struct rte_eth_xstat_name *xstats_names,
        unsigned int limit)
 {
        unsigned int i;
@@ -2605,8 +2595,9 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_mac_addrs = hw->mac.num_rar_entries;
        dev_info->max_hash_mac_addrs = TXGBE_VMDQ_NUM_UC_MAC;
        dev_info->max_vfs = pci_dev->max_vfs;
-       dev_info->max_vmdq_pools = ETH_64_POOLS;
+       dev_info->max_vmdq_pools = RTE_ETH_64_POOLS;
        dev_info->vmdq_queue_num = dev_info->max_rx_queues;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
        dev_info->rx_queue_offload_capa = txgbe_get_rx_queue_offloads(dev);
        dev_info->rx_offload_capa = (txgbe_get_rx_port_offloads(dev) |
                                     dev_info->rx_queue_offload_capa);
@@ -2638,11 +2629,11 @@ txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->tx_desc_lim = tx_desc_lim;
 
        dev_info->hash_key_size = TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
-       dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+       dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
        dev_info->flow_type_rss_offloads = TXGBE_RSS_OFFLOAD_ALL;
 
-       dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
-       dev_info->speed_capa |= ETH_LINK_SPEED_100M;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_10G;
+       dev_info->speed_capa |= RTE_ETH_LINK_SPEED_100M;
 
        /* Driver-preferred Rx/Tx parameters */
        dev_info->default_rxportconf.burst_size = 32;
@@ -2699,10 +2690,11 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
        int wait = 1;
 
        memset(&link, 0, sizeof(link));
-       link.link_status = ETH_LINK_DOWN;
-       link.link_speed = ETH_SPEED_NUM_NONE;
-       link.link_duplex = ETH_LINK_HALF_DUPLEX;
-       link.link_autoneg = ETH_LINK_AUTONEG;
+       link.link_status = RTE_ETH_LINK_DOWN;
+       link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+       link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
+       link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+                       RTE_ETH_LINK_SPEED_FIXED);
 
        hw->mac.get_link_status = true;
 
@@ -2716,8 +2708,8 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
        err = hw->mac.check_link(hw, &link_speed, &link_up, wait);
 
        if (err != 0) {
-               link.link_speed = ETH_SPEED_NUM_100M;
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
                return rte_eth_linkstatus_set(dev, &link);
        }
 
@@ -2731,37 +2723,39 @@ txgbe_dev_link_update_share(struct rte_eth_dev *dev,
                                txgbe_dev_setup_link_alarm_handler, dev);
                }
                return rte_eth_linkstatus_set(dev, &link);
+       } else if (!hw->dev_start) {
+               return rte_eth_linkstatus_set(dev, &link);
        }
 
        intr->flags &= ~TXGBE_FLAG_NEED_LINK_CONFIG;
-       link.link_status = ETH_LINK_UP;
-       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       link.link_status = RTE_ETH_LINK_UP;
+       link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        switch (link_speed) {
        default:
        case TXGBE_LINK_SPEED_UNKNOWN:
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
 
        case TXGBE_LINK_SPEED_100M_FULL:
-               link.link_speed = ETH_SPEED_NUM_100M;
+               link.link_speed = RTE_ETH_SPEED_NUM_100M;
                break;
 
        case TXGBE_LINK_SPEED_1GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_1G;
+               link.link_speed = RTE_ETH_SPEED_NUM_1G;
                break;
 
        case TXGBE_LINK_SPEED_2_5GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_2_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_2_5G;
                break;
 
        case TXGBE_LINK_SPEED_5GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_5G;
+               link.link_speed = RTE_ETH_SPEED_NUM_5G;
                break;
 
        case TXGBE_LINK_SPEED_10GB_FULL:
-               link.link_speed = ETH_SPEED_NUM_10G;
+               link.link_speed = RTE_ETH_SPEED_NUM_10G;
                break;
        }
 
@@ -2930,12 +2924,17 @@ txgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
  *  - On failure, a negative value.
  */
 static int
-txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+txgbe_dev_interrupt_get_status(struct rte_eth_dev *dev,
+                               struct rte_intr_handle *intr_handle)
 {
        uint32_t eicr;
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
 
+       if (rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_UIO &&
+               rte_intr_type_get(intr_handle) != RTE_INTR_HANDLE_VFIO_MSIX)
+               wr32(hw, TXGBE_PX_INTA, 1);
+
        /* clear all cause mask */
        txgbe_disable_intr(hw);
 
@@ -2986,7 +2985,7 @@ txgbe_dev_link_status_print(struct rte_eth_dev *dev)
                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
                                        (int)(dev->data->port_id),
                                        (unsigned int)link.link_speed,
-                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+                       link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
                                        "full-duplex" : "half-duplex");
        } else {
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
@@ -3099,7 +3098,7 @@ txgbe_dev_interrupt_delayed_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        uint32_t eicr;
@@ -3157,7 +3156,7 @@ txgbe_dev_interrupt_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
-       txgbe_dev_interrupt_get_status(dev);
+       txgbe_dev_interrupt_get_status(dev, dev->intr_handle);
        txgbe_dev_interrupt_action(dev, dev->intr_handle);
 }
 
@@ -3217,13 +3216,13 @@ txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                tx_pause = 0;
 
        if (rx_pause && tx_pause)
-               fc_conf->mode = RTE_FC_FULL;
+               fc_conf->mode = RTE_ETH_FC_FULL;
        else if (rx_pause)
-               fc_conf->mode = RTE_FC_RX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
        else if (tx_pause)
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
        else
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
 
        return 0;
 }
@@ -3355,16 +3354,16 @@ txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
                return -ENOTSUP;
        }
 
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i += 4) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
                if (!mask)
                        continue;
@@ -3396,16 +3395,16 @@ txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+       if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
                        "(%d) doesn't match the number hardware can supported "
-                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+                       "(%d)", reta_size, RTE_ETH_RSS_RETA_SIZE_128);
                return -EINVAL;
        }
 
        for (i = 0; i < reta_size; i += 4) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
                if (!mask)
                        continue;
@@ -3455,32 +3454,19 @@ static int
 txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
-       struct rte_eth_dev_info dev_info;
        uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
        struct rte_eth_dev_data *dev_data = dev->data;
-       int ret;
-
-       ret = txgbe_dev_info_get(dev, &dev_info);
-       if (ret != 0)
-               return ret;
-
-       /* check that mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
-               return -EINVAL;
 
        /* If device is started, refuse mtu that requires the support of
         * scattered packets when this feature has not been enabled before.
         */
        if (dev_data->dev_started && !dev_data->scattered_rx &&
-           (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
+           (frame_size + 2 * RTE_VLAN_HLEN >
             dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
                PMD_INIT_LOG(ERR, "Stop port first.");
                return -EINVAL;
        }
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        if (hw->mode)
                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
                        TXGBE_FRAME_SIZE_MAX);
@@ -3585,12 +3571,12 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
                return -ENOTSUP;
 
        if (on) {
-               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+               for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = ~0;
                        wr32(hw, TXGBE_UCADDRTBL(i), ~0);
                }
        } else {
-               for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+               for (i = 0; i < RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
                        uta_info->uta_shadow[i] = 0;
                        wr32(hw, TXGBE_UCADDRTBL(i), 0);
                }
@@ -3614,15 +3600,15 @@ txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
 {
        uint32_t new_val = orig_val;
 
-       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_UNTAG)
                new_val |= TXGBE_POOLETHCTL_UTA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
                new_val |= TXGBE_POOLETHCTL_MCHA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
                new_val |= TXGBE_POOLETHCTL_UCHA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
                new_val |= TXGBE_POOLETHCTL_BCA;
-       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+       if (rx_mask & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
                new_val |= TXGBE_POOLETHCTL_MCP;
 
        return new_val;
@@ -3632,7 +3618,7 @@ static int
 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        uint32_t mask;
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
 
@@ -3714,7 +3700,7 @@ static void
 txgbe_configure_msix(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        uint32_t queue_id, base = TXGBE_MISC_VEC_ID;
        uint32_t vec = TXGBE_MISC_VEC_ID;
@@ -3748,8 +3734,10 @@ txgbe_configure_msix(struct rte_eth_dev *dev)
                        queue_id++) {
                        /* by default, 1:1 mapping */
                        txgbe_set_ivar_map(hw, 0, queue_id, vec);
-                       intr_handle->intr_vec[queue_id] = vec;
-                       if (vec < base + intr_handle->nb_efd - 1)
+                       rte_intr_vec_list_index_set(intr_handle,
+                                                          queue_id, vec);
+                       if (vec < base + rte_intr_nb_efd_get(intr_handle)
+                           - 1)
                                vec++;
                }
 
@@ -4273,15 +4261,15 @@ txgbe_start_timecounters(struct rte_eth_dev *dev)
        rte_eth_linkstatus_get(dev, &link);
 
        switch (link.link_speed) {
-       case ETH_SPEED_NUM_100M:
+       case RTE_ETH_SPEED_NUM_100M:
                incval = TXGBE_INCVAL_100;
                shift = TXGBE_INCVAL_SHIFT_100;
                break;
-       case ETH_SPEED_NUM_1G:
+       case RTE_ETH_SPEED_NUM_1G:
                incval = TXGBE_INCVAL_1GB;
                shift = TXGBE_INCVAL_SHIFT_1GB;
                break;
-       case ETH_SPEED_NUM_10G:
+       case RTE_ETH_SPEED_NUM_10G:
        default:
                incval = TXGBE_INCVAL_10GB;
                shift = TXGBE_INCVAL_SHIFT_10GB;
@@ -4637,7 +4625,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        uint8_t nb_tcs;
        uint8_t i, j;
 
-       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+       if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_DCB_FLAG)
                dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
        else
                dcb_info->nb_tcs = 1;
@@ -4648,7 +4636,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        if (dcb_config->vt_mode) { /* vt is enabled */
                struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
                if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
                        for (j = 0; j < nb_tcs; j++) {
@@ -4672,9 +4660,9 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        } else { /* vt is disabled */
                struct rte_eth_dcb_rx_conf *rx_conf =
                                &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
                        dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
-               if (dcb_info->nb_tcs == ETH_4_TCS) {
+               if (dcb_info->nb_tcs == RTE_ETH_4_TCS) {
                        for (i = 0; i < dcb_info->nb_tcs; i++) {
                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4687,7 +4675,7 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
                        dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
                        dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
                        dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
-               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+               } else if (dcb_info->nb_tcs == RTE_ETH_8_TCS) {
                        for (i = 0; i < dcb_info->nb_tcs; i++) {
                                dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
                                dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
@@ -4917,7 +4905,7 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
        }
 
        switch (l2_tunnel->l2_tunnel_type) {
-       case RTE_L2_TUNNEL_TYPE_E_TAG:
+       case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
                ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
                break;
        default:
@@ -4948,7 +4936,7 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
                return ret;
 
        switch (l2_tunnel->l2_tunnel_type) {
-       case RTE_L2_TUNNEL_TYPE_E_TAG:
+       case RTE_ETH_L2_TUNNEL_TYPE_E_TAG:
                ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
                break;
        default:
@@ -4988,16 +4976,15 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                if (udp_tunnel->udp_port == 0) {
                        PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
                        ret = -EINVAL;
                        break;
                }
                wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
-               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                if (udp_tunnel->udp_port == 0) {
                        PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
                        ret = -EINVAL;
@@ -5005,7 +4992,7 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
                break;
-       case RTE_TUNNEL_TYPE_TEREDO:
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
                if (udp_tunnel->udp_port == 0) {
                        PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
                        ret = -EINVAL;
@@ -5013,6 +5000,14 @@ txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
                break;
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
+               if (udp_tunnel->udp_port == 0) {
+                       PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+               break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
                ret = -EINVAL;
@@ -5037,7 +5032,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                return -EINVAL;
 
        switch (udp_tunnel->prot_type) {
-       case RTE_TUNNEL_TYPE_VXLAN:
+       case RTE_ETH_TUNNEL_TYPE_VXLAN:
                cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
                if (cur_port != udp_tunnel->udp_port) {
                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5046,9 +5041,8 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                        break;
                }
                wr32(hw, TXGBE_VXLANPORT, 0);
-               wr32(hw, TXGBE_VXLANPORTGPE, 0);
                break;
-       case RTE_TUNNEL_TYPE_GENEVE:
+       case RTE_ETH_TUNNEL_TYPE_GENEVE:
                cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
                if (cur_port != udp_tunnel->udp_port) {
                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5058,7 +5052,7 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_GENEVEPORT, 0);
                break;
-       case RTE_TUNNEL_TYPE_TEREDO:
+       case RTE_ETH_TUNNEL_TYPE_TEREDO:
                cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
                if (cur_port != udp_tunnel->udp_port) {
                        PMD_DRV_LOG(ERR, "Port %u does not exist.",
@@ -5068,6 +5062,16 @@ txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
                }
                wr32(hw, TXGBE_TEREDOPORT, 0);
                break;
+       case RTE_ETH_TUNNEL_TYPE_VXLAN_GPE:
+               cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORTGPE);
+               if (cur_port != udp_tunnel->udp_port) {
+                       PMD_DRV_LOG(ERR, "Port %u does not exist.",
+                                       udp_tunnel->udp_port);
+                       ret = -EINVAL;
+                       break;
+               }
+               wr32(hw, TXGBE_VXLANPORTGPE, 0);
+               break;
        default:
                PMD_DRV_LOG(ERR, "Invalid tunnel type");
                ret = -EINVAL;
@@ -5339,17 +5343,17 @@ RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
                              TXGBE_DEVARG_FFE_PRE "=<uint16>"
                              TXGBE_DEVARG_FFE_POST "=<uint16>");
 
-RTE_LOG_REGISTER(txgbe_logtype_init, pmd.net.txgbe.init, NOTICE);
-RTE_LOG_REGISTER(txgbe_logtype_driver, pmd.net.txgbe.driver, NOTICE);
-RTE_LOG_REGISTER(txgbe_logtype_bp, pmd.net.txgbe.bp, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_driver, driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_bp, bp, NOTICE);
 
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_RX
-       RTE_LOG_REGISTER(txgbe_logtype_rx, pmd.net.txgbe.rx, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_rx, rx, DEBUG);
 #endif
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX
-       RTE_LOG_REGISTER(txgbe_logtype_tx, pmd.net.txgbe.tx, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx, tx, DEBUG);
 #endif
 
 #ifdef RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
-       RTE_LOG_REGISTER(txgbe_logtype_tx_free, pmd.net.txgbe.tx_free, DEBUG);
+       RTE_LOG_REGISTER_SUFFIX(txgbe_logtype_tx_free, tx_free, DEBUG);
 #endif