drivers/net: fix exposing internal headers
[dpdk.git] / drivers / net / iavf / iavf_rxtx.c
index 988a68f..05a7dd8 100644 (file)
@@ -24,9 +24,6 @@
 #include <rte_ip.h>
 #include <rte_net.h>
 
-#include "iavf_log.h"
-#include "base/iavf_prototype.h"
-#include "base/iavf_type.h"
 #include "iavf.h"
 #include "iavf_rxtx.h"
 
@@ -92,71 +89,70 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
        return 0;
 }
 
-#ifdef RTE_LIBRTE_AVF_INC_VECTOR
 static inline bool
-check_rx_vec_allow(struct avf_rx_queue *rxq)
+check_rx_vec_allow(struct iavf_rx_queue *rxq)
 {
-       if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST &&
+       if (rxq->rx_free_thresh >= IAVF_VPMD_RX_MAX_BURST &&
            rxq->nb_rx_desc % rxq->rx_free_thresh == 0) {
                PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq.");
-               return TRUE;
+               return true;
        }
 
        PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq.");
-       return FALSE;
+       return false;
 }
 
 static inline bool
-check_tx_vec_allow(struct avf_tx_queue *txq)
+check_tx_vec_allow(struct iavf_tx_queue *txq)
 {
-       if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) &&
-           txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
-           txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
+       if (!(txq->offloads & IAVF_NO_VECTOR_FLAGS) &&
+           txq->rs_thresh >= IAVF_VPMD_TX_MAX_BURST &&
+           txq->rs_thresh <= IAVF_VPMD_TX_MAX_FREE_BUF) {
                PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
-               return TRUE;
+               return true;
        }
        PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq.");
-       return FALSE;
+       return false;
 }
-#endif
 
 static inline bool
-check_rx_bulk_allow(struct avf_rx_queue *rxq)
+check_rx_bulk_allow(struct iavf_rx_queue *rxq)
 {
-       int ret = TRUE;
+       int ret = true;
 
-       if (!(rxq->rx_free_thresh >= AVF_RX_MAX_BURST)) {
+       if (!(rxq->rx_free_thresh >= IAVF_RX_MAX_BURST)) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
                             "rxq->rx_free_thresh=%d, "
-                            "AVF_RX_MAX_BURST=%d",
-                            rxq->rx_free_thresh, AVF_RX_MAX_BURST);
-               ret = FALSE;
+                            "IAVF_RX_MAX_BURST=%d",
+                            rxq->rx_free_thresh, IAVF_RX_MAX_BURST);
+               ret = false;
        } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
                             "rxq->nb_rx_desc=%d, "
                             "rxq->rx_free_thresh=%d",
                             rxq->nb_rx_desc, rxq->rx_free_thresh);
-               ret = FALSE;
+               ret = false;
        }
        return ret;
 }
 
 static inline void
-reset_rx_queue(struct avf_rx_queue *rxq)
+reset_rx_queue(struct iavf_rx_queue *rxq)
 {
-       uint16_t len, i;
+       uint16_t len;
+       uint32_t i;
 
        if (!rxq)
                return;
 
-       len = rxq->nb_rx_desc + AVF_RX_MAX_BURST;
+       len = rxq->nb_rx_desc + IAVF_RX_MAX_BURST;
 
-       for (i = 0; i < len * sizeof(union avf_rx_desc); i++)
+       for (i = 0; i < len * sizeof(union iavf_rx_desc); i++)
                ((volatile char *)rxq->rx_ring)[i] = 0;
 
        memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
 
-       for (i = 0; i < AVF_RX_MAX_BURST; i++)
+       for (i = 0; i < IAVF_RX_MAX_BURST; i++)
                rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
 
        /* for rx bulk */
@@ -171,10 +167,11 @@ reset_rx_queue(struct avf_rx_queue *rxq)
 }
 
 static inline void
-reset_tx_queue(struct avf_tx_queue *txq)
+reset_tx_queue(struct iavf_tx_queue *txq)
 {
-       struct avf_tx_entry *txe;
-       uint16_t i, prev, size;
+       struct iavf_tx_entry *txe;
+       uint32_t i, size;
+       uint16_t prev;
 
        if (!txq) {
                PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
@@ -182,14 +179,14 @@ reset_tx_queue(struct avf_tx_queue *txq)
        }
 
        txe = txq->sw_ring;
-       size = sizeof(struct avf_tx_desc) * txq->nb_tx_desc;
+       size = sizeof(struct iavf_tx_desc) * txq->nb_tx_desc;
        for (i = 0; i < size; i++)
                ((volatile char *)txq->tx_ring)[i] = 0;
 
        prev = (uint16_t)(txq->nb_tx_desc - 1);
        for (i = 0; i < txq->nb_tx_desc; i++) {
                txq->tx_ring[i].cmd_type_offset_bsz =
-                       rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE);
+                       rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
                txe[i].mbuf =  NULL;
                txe[i].last_id = i;
                txe[prev].next_id = i;
@@ -207,9 +204,9 @@ reset_tx_queue(struct avf_tx_queue *txq)
 }
 
 static int
-alloc_rxq_mbufs(struct avf_rx_queue *rxq)
+alloc_rxq_mbufs(struct iavf_rx_queue *rxq)
 {
-       volatile union avf_rx_desc *rxd;
+       volatile union iavf_rx_desc *rxd;
        struct rte_mbuf *mbuf = NULL;
        uint64_t dma_addr;
        uint16_t i;
@@ -233,7 +230,7 @@ alloc_rxq_mbufs(struct avf_rx_queue *rxq)
                rxd = &rxq->rx_ring[i];
                rxd->read.pkt_addr = dma_addr;
                rxd->read.hdr_addr = 0;
-#ifndef RTE_LIBRTE_AVF_16BYTE_RX_DESC
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
                rxd->read.rsvd1 = 0;
                rxd->read.rsvd2 = 0;
 #endif
@@ -245,7 +242,7 @@ alloc_rxq_mbufs(struct avf_rx_queue *rxq)
 }
 
 static inline void
-release_rxq_mbufs(struct avf_rx_queue *rxq)
+release_rxq_mbufs(struct iavf_rx_queue *rxq)
 {
        uint16_t i;
 
@@ -272,7 +269,7 @@ release_rxq_mbufs(struct avf_rx_queue *rxq)
 }
 
 static inline void
-release_txq_mbufs(struct avf_tx_queue *txq)
+release_txq_mbufs(struct iavf_tx_queue *txq)
 {
        uint16_t i;
 
@@ -289,24 +286,27 @@ release_txq_mbufs(struct avf_tx_queue *txq)
        }
 }
 
-static const struct avf_rxq_ops def_rxq_ops = {
+static const struct iavf_rxq_ops def_rxq_ops = {
        .release_mbufs = release_rxq_mbufs,
 };
 
-static const struct avf_txq_ops def_txq_ops = {
+static const struct iavf_txq_ops def_txq_ops = {
        .release_mbufs = release_txq_mbufs,
 };
 
 int
-avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                       uint16_t nb_desc, unsigned int socket_id,
                       const struct rte_eth_rxconf *rx_conf,
                       struct rte_mempool *mp)
 {
-       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct avf_adapter *ad =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_rx_queue *rxq;
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct iavf_adapter *ad =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_info *vf =
+               IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct iavf_vsi *vsi = &vf->vsi;
+       struct iavf_rx_queue *rxq;
        const struct rte_memzone *mz;
        uint32_t ring_size;
        uint16_t len;
@@ -314,9 +314,9 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
        PMD_INIT_FUNC_TRACE();
 
-       if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
-           nb_desc > AVF_MAX_RING_DESC ||
-           nb_desc < AVF_MIN_RING_DESC) {
+       if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
+           nb_desc > IAVF_MAX_RING_DESC ||
+           nb_desc < IAVF_MIN_RING_DESC) {
                PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is "
                             "invalid", nb_desc);
                return -EINVAL;
@@ -324,20 +324,20 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
        /* Check free threshold */
        rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
-                        AVF_DEFAULT_RX_FREE_THRESH :
+                        IAVF_DEFAULT_RX_FREE_THRESH :
                         rx_conf->rx_free_thresh;
        if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
                return -EINVAL;
 
        /* Free memory if needed */
        if (dev->data->rx_queues[queue_idx]) {
-               avf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               iavf_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
        /* Allocate the rx queue data structure */
-       rxq = rte_zmalloc_socket("avf rxq",
-                                sizeof(struct avf_rx_queue),
+       rxq = rte_zmalloc_socket("iavf rxq",
+                                sizeof(struct iavf_rx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 socket_id);
        if (!rxq) {
@@ -346,6 +346,14 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -ENOMEM;
        }
 
+       if (vf->vf_res->vf_cap_flags &
+           VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+           vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
+               rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
+       } else {
+               rxq->rxdid = IAVF_RXDID_LEGACY_1;
+       }
+
        rxq->mp = mp;
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_free_thresh;
@@ -354,14 +362,15 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        rxq->crc_len = 0; /* crc stripping by default */
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
        rxq->rx_hdr_len = 0;
+       rxq->vsi = vsi;
 
        len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
-       rxq->rx_buf_len = RTE_ALIGN(len, (1 << AVF_RXQ_CTX_DBUFF_SHIFT));
+       rxq->rx_buf_len = RTE_ALIGN(len, (1 << IAVF_RXQ_CTX_DBUFF_SHIFT));
 
        /* Allocate the software ring. */
-       len = nb_desc + AVF_RX_MAX_BURST;
+       len = nb_desc + IAVF_RX_MAX_BURST;
        rxq->sw_ring =
-               rte_zmalloc_socket("avf rx sw ring",
+               rte_zmalloc_socket("iavf rx sw ring",
                                   sizeof(struct rte_mbuf *) * len,
                                   RTE_CACHE_LINE_SIZE,
                                   socket_id);
@@ -374,11 +383,11 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        /* Allocate the maximun number of RX ring hardware descriptor with
         * a liitle more to support bulk allocate.
         */
-       len = AVF_MAX_RING_DESC + AVF_RX_MAX_BURST;
-       ring_size = RTE_ALIGN(len * sizeof(union avf_rx_desc),
-                             AVF_DMA_MEM_ALIGN);
+       len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
+       ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
+                             IAVF_DMA_MEM_ALIGN);
        mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
-                                     ring_size, AVF_RING_BASE_ALIGN,
+                                     ring_size, IAVF_RING_BASE_ALIGN,
                                      socket_id);
        if (!mz) {
                PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
@@ -389,16 +398,16 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        /* Zero all the descriptors in the ring. */
        memset(mz->addr, 0, ring_size);
        rxq->rx_ring_phys_addr = mz->iova;
-       rxq->rx_ring = (union avf_rx_desc *)mz->addr;
+       rxq->rx_ring = (union iavf_rx_desc *)mz->addr;
 
        rxq->mz = mz;
        reset_rx_queue(rxq);
-       rxq->q_set = TRUE;
+       rxq->q_set = true;
        dev->data->rx_queues[queue_idx] = rxq;
-       rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id);
+       rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
        rxq->ops = &def_rxq_ops;
 
-       if (check_rx_bulk_allow(rxq) == TRUE) {
+       if (check_rx_bulk_allow(rxq) == true) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
                             "satisfied. Rx Burst Bulk Alloc function will be "
                             "used on port=%d, queue=%d.",
@@ -411,22 +420,21 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                ad->rx_bulk_alloc_allowed = false;
        }
 
-#ifdef RTE_LIBRTE_AVF_INC_VECTOR
-       if (check_rx_vec_allow(rxq) == FALSE)
+       if (check_rx_vec_allow(rxq) == false)
                ad->rx_vec_allowed = false;
-#endif
+
        return 0;
 }
 
 int
-avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
+iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
                       uint16_t queue_idx,
                       uint16_t nb_desc,
                       unsigned int socket_id,
                       const struct rte_eth_txconf *tx_conf)
 {
-       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct avf_tx_queue *txq;
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct iavf_tx_queue *txq;
        const struct rte_memzone *mz;
        uint32_t ring_size;
        uint16_t tx_rs_thresh, tx_free_thresh;
@@ -436,9 +444,9 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
-       if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
-           nb_desc > AVF_MAX_RING_DESC ||
-           nb_desc < AVF_MIN_RING_DESC) {
+       if (nb_desc % IAVF_ALIGN_RING_DESC != 0 ||
+           nb_desc > IAVF_MAX_RING_DESC ||
+           nb_desc < IAVF_MIN_RING_DESC) {
                PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is "
                            "invalid", nb_desc);
                return -EINVAL;
@@ -452,13 +460,13 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Free memory if needed. */
        if (dev->data->tx_queues[queue_idx]) {
-               avf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               iavf_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
                dev->data->tx_queues[queue_idx] = NULL;
        }
 
        /* Allocate the TX queue data structure. */
-       txq = rte_zmalloc_socket("avf txq",
-                                sizeof(struct avf_tx_queue),
+       txq = rte_zmalloc_socket("iavf txq",
+                                sizeof(struct iavf_tx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 socket_id);
        if (!txq) {
@@ -477,8 +485,8 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Allocate software ring */
        txq->sw_ring =
-               rte_zmalloc_socket("avf tx sw ring",
-                                  sizeof(struct avf_tx_entry) * nb_desc,
+               rte_zmalloc_socket("iavf tx sw ring",
+                                  sizeof(struct iavf_tx_entry) * nb_desc,
                                   RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!txq->sw_ring) {
@@ -488,10 +496,10 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* Allocate TX hardware ring descriptors. */
-       ring_size = sizeof(struct avf_tx_desc) * AVF_MAX_RING_DESC;
-       ring_size = RTE_ALIGN(ring_size, AVF_DMA_MEM_ALIGN);
+       ring_size = sizeof(struct iavf_tx_desc) * IAVF_MAX_RING_DESC;
+       ring_size = RTE_ALIGN(ring_size, IAVF_DMA_MEM_ALIGN);
        mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                                     ring_size, AVF_RING_BASE_ALIGN,
+                                     ring_size, IAVF_RING_BASE_ALIGN,
                                      socket_id);
        if (!mz) {
                PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
@@ -500,33 +508,31 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
        txq->tx_ring_phys_addr = mz->iova;
-       txq->tx_ring = (struct avf_tx_desc *)mz->addr;
+       txq->tx_ring = (struct iavf_tx_desc *)mz->addr;
 
        txq->mz = mz;
        reset_tx_queue(txq);
-       txq->q_set = TRUE;
+       txq->q_set = true;
        dev->data->tx_queues[queue_idx] = txq;
-       txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx);
+       txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
        txq->ops = &def_txq_ops;
 
-#ifdef RTE_LIBRTE_AVF_INC_VECTOR
-       if (check_tx_vec_allow(txq) == FALSE) {
-               struct avf_adapter *ad =
-                       AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       if (check_tx_vec_allow(txq) == false) {
+               struct iavf_adapter *ad =
+                       IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
                ad->tx_vec_allowed = false;
        }
-#endif
 
        return 0;
 }
 
 int
-avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct avf_rx_queue *rxq;
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct iavf_rx_queue *rxq;
        int err = 0;
 
        PMD_DRV_FUNC_TRACE();
@@ -545,11 +551,11 @@ avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        rte_wmb();
 
        /* Init the RX tail register. */
-       AVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
-       AVF_WRITE_FLUSH(hw);
+       IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+       IAVF_WRITE_FLUSH(hw);
 
        /* Ready to switch the queue on */
-       err = avf_switch_queue(adapter, rx_queue_id, TRUE, TRUE);
+       err = iavf_switch_queue(adapter, rx_queue_id, true, true);
        if (err)
                PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
                            rx_queue_id);
@@ -561,12 +567,12 @@ avf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 int
-avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct avf_tx_queue *txq;
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct iavf_tx_queue *txq;
        int err = 0;
 
        PMD_DRV_FUNC_TRACE();
@@ -577,11 +583,11 @@ avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        txq = dev->data->tx_queues[tx_queue_id];
 
        /* Init the RX tail register. */
-       AVF_PCI_REG_WRITE(txq->qtx_tail, 0);
-       AVF_WRITE_FLUSH(hw);
+       IAVF_PCI_REG_WRITE(txq->qtx_tail, 0);
+       IAVF_WRITE_FLUSH(hw);
 
        /* Ready to switch the queue on */
-       err = avf_switch_queue(adapter, tx_queue_id, FALSE, TRUE);
+       err = iavf_switch_queue(adapter, tx_queue_id, false, true);
 
        if (err)
                PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
@@ -594,11 +600,11 @@ avf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 int
-avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_rx_queue *rxq;
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_rx_queue *rxq;
        int err;
 
        PMD_DRV_FUNC_TRACE();
@@ -606,7 +612,7 @@ avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        if (rx_queue_id >= dev->data->nb_rx_queues)
                return -EINVAL;
 
-       err = avf_switch_queue(adapter, rx_queue_id, TRUE, FALSE);
+       err = iavf_switch_queue(adapter, rx_queue_id, true, false);
        if (err) {
                PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
                            rx_queue_id);
@@ -622,11 +628,11 @@ avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 }
 
 int
-avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_tx_queue *txq;
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_tx_queue *txq;
        int err;
 
        PMD_DRV_FUNC_TRACE();
@@ -634,7 +640,7 @@ avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
        if (tx_queue_id >= dev->data->nb_tx_queues)
                return -EINVAL;
 
-       err = avf_switch_queue(adapter, tx_queue_id, FALSE, FALSE);
+       err = iavf_switch_queue(adapter, tx_queue_id, false, false);
        if (err) {
                PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
                            tx_queue_id);
@@ -650,9 +656,9 @@ avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 }
 
 void
-avf_dev_rx_queue_release(void *rxq)
+iavf_dev_rx_queue_release(void *rxq)
 {
-       struct avf_rx_queue *q = (struct avf_rx_queue *)rxq;
+       struct iavf_rx_queue *q = (struct iavf_rx_queue *)rxq;
 
        if (!q)
                return;
@@ -664,9 +670,9 @@ avf_dev_rx_queue_release(void *rxq)
 }
 
 void
-avf_dev_tx_queue_release(void *txq)
+iavf_dev_tx_queue_release(void *txq)
 {
-       struct avf_tx_queue *q = (struct avf_tx_queue *)txq;
+       struct iavf_tx_queue *q = (struct iavf_tx_queue *)txq;
 
        if (!q)
                return;
@@ -678,16 +684,16 @@ avf_dev_tx_queue_release(void *txq)
 }
 
 void
-avf_stop_queues(struct rte_eth_dev *dev)
+iavf_stop_queues(struct rte_eth_dev *dev)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_rx_queue *rxq;
-       struct avf_tx_queue *txq;
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_rx_queue *rxq;
+       struct iavf_tx_queue *txq;
        int ret, i;
 
        /* Stop All queues */
-       ret = avf_disable_queues(adapter);
+       ret = iavf_disable_queues(adapter);
        if (ret)
                PMD_DRV_LOG(WARNING, "Fail to stop queues");
 
@@ -710,10 +716,10 @@ avf_stop_queues(struct rte_eth_dev *dev)
 }
 
 static inline void
-avf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union avf_rx_desc *rxdp)
+iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
 {
        if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
-               (1 << AVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+               (1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
                mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
                mb->vlan_tci =
                        rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
@@ -722,31 +728,49 @@ avf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union avf_rx_desc *rxdp)
        }
 }
 
+static inline void
+iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
+                         volatile union iavf_rx_flex_desc *rxdp)
+{
+       if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
+               (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
+               mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+               mb->vlan_tci =
+                       rte_le_to_cpu_16(rxdp->wb.l2tag1);
+       } else {
+               mb->vlan_tci = 0;
+       }
+}
+
 /* Translate the rx descriptor status and error fields to pkt flags */
 static inline uint64_t
-avf_rxd_to_pkt_flags(uint64_t qword)
+iavf_rxd_to_pkt_flags(uint64_t qword)
 {
        uint64_t flags;
-       uint64_t error_bits = (qword >> AVF_RXD_QW1_ERROR_SHIFT);
+       uint64_t error_bits = (qword >> IAVF_RXD_QW1_ERROR_SHIFT);
 
-#define AVF_RX_ERR_BITS 0x3f
+#define IAVF_RX_ERR_BITS 0x3f
 
        /* Check if RSS_HASH */
-       flags = (((qword >> AVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
-                                       AVF_RX_DESC_FLTSTAT_RSS_HASH) ==
-                       AVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
+       flags = (((qword >> IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+                                       IAVF_RX_DESC_FLTSTAT_RSS_HASH) ==
+                       IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
 
-       if (likely((error_bits & AVF_RX_ERR_BITS) == 0)) {
+       /* Check if FDIR Match */
+       flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ?
+                               PKT_RX_FDIR : 0);
+
+       if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) {
                flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
                return flags;
        }
 
-       if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_IPE_SHIFT)))
+       if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_IPE_SHIFT)))
                flags |= PKT_RX_IP_CKSUM_BAD;
        else
                flags |= PKT_RX_IP_CKSUM_GOOD;
 
-       if (unlikely(error_bits & (1 << AVF_RX_DESC_ERROR_L4E_SHIFT)))
+       if (unlikely(error_bits & (1 << IAVF_RX_DESC_ERROR_L4E_SHIFT)))
                flags |= PKT_RX_L4_CKSUM_BAD;
        else
                flags |= PKT_RX_L4_CKSUM_GOOD;
@@ -756,14 +780,125 @@ avf_rxd_to_pkt_flags(uint64_t qword)
        return flags;
 }
 
+static inline uint64_t
+iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+       uint64_t flags = 0;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+       uint16_t flexbh;
+
+       flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+               IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+               IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+
+       if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+               mb->hash.fdir.hi =
+                       rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+               flags |= PKT_RX_FDIR_ID;
+       }
+#else
+       mb->hash.fdir.hi =
+               rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+       flags |= PKT_RX_FDIR_ID;
+#endif
+       return flags;
+}
+
+
+/* Translate the rx flex descriptor status to pkt flags */
+static inline void
+iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
+                      volatile union iavf_rx_flex_desc *rxdp)
+{
+       volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
+                       (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+       uint16_t stat_err;
+
+       stat_err = rte_le_to_cpu_16(desc->status_error0);
+       if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+               mb->ol_flags |= PKT_RX_RSS_HASH;
+               mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+       }
+#endif
+
+       if (desc->flow_id != 0xFFFFFFFF) {
+               mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+               mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+       }
+}
+
+#define IAVF_RX_FLEX_ERR0_BITS \
+       ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) |       \
+        (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |  \
+        (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |  \
+        (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
+        (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) |        \
+        (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
+
+/* Rx L3/L4 checksum */
+static inline uint64_t
+iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
+{
+       uint64_t flags = 0;
+
+       /* check if HW has decoded the packet and checksum */
+       if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
+               return 0;
+
+       if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
+               flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
+               return flags;
+       }
+
+       if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
+               flags |= PKT_RX_IP_CKSUM_BAD;
+       else
+               flags |= PKT_RX_IP_CKSUM_GOOD;
+
+       if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
+               flags |= PKT_RX_L4_CKSUM_BAD;
+       else
+               flags |= PKT_RX_L4_CKSUM_GOOD;
+
+       if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
+               flags |= PKT_RX_EIP_CKSUM_BAD;
+
+       return flags;
+}
+
+/* If the number of free RX descriptors is greater than the RX free
+ * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+ * register. Update the RDT with the value of the last processed RX
+ * descriptor minus 1, to guarantee that the RDT register is never
+ * equal to the RDH register, which creates a "full" ring situation
+ * from the hardware point of view.
+ */
+static inline void
+iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
+{
+       nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
+
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG,
+                          "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
+                          rxq->port_id, rxq->queue_id, rx_id, nb_hold);
+               rx_id = (uint16_t)((rx_id == 0) ?
+                       (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+}
+
 /* implement recv_pkts */
 uint16_t
-avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       volatile union avf_rx_desc *rx_ring;
-       volatile union avf_rx_desc *rxdp;
-       struct avf_rx_queue *rxq;
-       union avf_rx_desc rxd;
+       volatile union iavf_rx_desc *rx_ring;
+       volatile union iavf_rx_desc *rxdp;
+       struct iavf_rx_queue *rxq;
+       union iavf_rx_desc rxd;
        struct rte_mbuf *rxe;
        struct rte_eth_dev *dev;
        struct rte_mbuf *rxm;
@@ -775,42 +910,25 @@ avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        uint16_t rx_id, nb_hold;
        uint64_t dma_addr;
        uint64_t pkt_flags;
-       static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
-               /* [0] reserved */
-               [1] = RTE_PTYPE_L2_ETHER,
-               /* [2] - [21] reserved */
-               [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_FRAG,
-               [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_NONFRAG,
-               [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_UDP,
-               /* [25] reserved */
-               [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_TCP,
-               [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_SCTP,
-               [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_ICMP,
-               /* All others reserved */
-       };
+       const uint32_t *ptype_tbl;
 
        nb_rx = 0;
        nb_hold = 0;
        rxq = rx_queue;
        rx_id = rxq->rx_tail;
        rx_ring = rxq->rx_ring;
+       ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
                qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
-               rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
-                           AVF_RXD_QW1_STATUS_SHIFT;
+               rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+                           IAVF_RXD_QW1_STATUS_SHIFT;
 
                /* Check the DD bit first */
-               if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
                        break;
-               AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+               IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
 
                nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb)) {
@@ -840,14 +958,13 @@ avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        rte_prefetch0(rxq->sw_ring[rx_id]);
                }
                rxm = rxe;
-               rxe = nmb;
                dma_addr =
                        rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
-               rx_packet_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
-                               AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+               rx_packet_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+                               IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
 
                rxm->data_off = RTE_PKTMBUF_HEADROOM;
                rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
@@ -857,50 +974,284 @@ avf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rxm->data_len = rx_packet_len;
                rxm->port = rxq->port_id;
                rxm->ol_flags = 0;
-               avf_rxd_to_vlan_tci(rxm, &rxd);
-               pkt_flags = avf_rxd_to_pkt_flags(qword1);
+               iavf_rxd_to_vlan_tci(rxm, &rxd);
+               pkt_flags = iavf_rxd_to_pkt_flags(qword1);
                rxm->packet_type =
                        ptype_tbl[(uint8_t)((qword1 &
-                       AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
+                       IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
 
                if (pkt_flags & PKT_RX_RSS_HASH)
                        rxm->hash.rss =
                                rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
 
+               if (pkt_flags & PKT_RX_FDIR)
+                       pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm);
+
                rxm->ol_flags |= pkt_flags;
 
                rx_pkts[nb_rx++] = rxm;
        }
        rxq->rx_tail = rx_id;
 
-       /* If the number of free RX descriptors is greater than the RX free
-        * threshold of the queue, advance the receive tail register of queue.
-        * Update that register with the value of the last processed RX
-        * descriptor minus 1.
-        */
-       nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
-       if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-                          "nb_hold=%u nb_rx=%u",
-                          rxq->port_id, rxq->queue_id,
-                          rx_id, nb_hold, nb_rx);
-               rx_id = (uint16_t)((rx_id == 0) ?
-                       (rxq->nb_rx_desc - 1) : (rx_id - 1));
-               AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
-               nb_hold = 0;
+       iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+       return nb_rx;
+}
+
+/* implement recv_pkts for flexible Rx descriptor */
+uint16_t
+iavf_recv_pkts_flex_rxd(void *rx_queue,
+                       struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       volatile union iavf_rx_desc *rx_ring;
+       volatile union iavf_rx_flex_desc *rxdp;
+       struct iavf_rx_queue *rxq;
+       union iavf_rx_flex_desc rxd;
+       struct rte_mbuf *rxe;
+       struct rte_eth_dev *dev;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       uint16_t nb_rx;
+       uint16_t rx_stat_err0;
+       uint16_t rx_packet_len;
+       uint16_t rx_id, nb_hold;
+       uint64_t dma_addr;
+       uint64_t pkt_flags;
+       const uint32_t *ptype_tbl;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rxq = rx_queue;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+       while (nb_rx < nb_pkts) {
+               rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
+               rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+               /* Check the DD bit first */
+               if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+                       break;
+               IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+               nmb = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(!nmb)) {
+                       dev = &rte_eth_devices[rxq->port_id];
+                       dev->data->rx_mbuf_alloc_failed++;
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u", rxq->port_id, rxq->queue_id);
+                       break;
+               }
+
+               rxd = *rxdp;
+               nb_hold++;
+               rxe = rxq->sw_ring[rx_id];
+               rx_id++;
+               if (unlikely(rx_id == rxq->nb_rx_desc))
+                       rx_id = 0;
+
+               /* Prefetch next mbuf */
+               rte_prefetch0(rxq->sw_ring[rx_id]);
+
+               /* When next RX descriptor is on a cache line boundary,
+                * prefetch the next 4 RX descriptors and next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_prefetch0(&rx_ring[rx_id]);
+                       rte_prefetch0(rxq->sw_ring[rx_id]);
+               }
+               rxm = rxe;
+               dma_addr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+               rxdp->read.hdr_addr = 0;
+               rxdp->read.pkt_addr = dma_addr;
+
+               rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
+                               IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
+
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
+               rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len = rx_packet_len;
+               rxm->data_len = rx_packet_len;
+               rxm->port = rxq->port_id;
+               rxm->ol_flags = 0;
+               rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+                       rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+               iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
+               iavf_rxd_to_pkt_fields(rxm, &rxd);
+               pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+               rxm->ol_flags |= pkt_flags;
+
+               rx_pkts[nb_rx++] = rxm;
        }
-       rxq->nb_rx_hold = nb_hold;
+       rxq->rx_tail = rx_id;
+
+       iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+       return nb_rx;
+}
+
+/* implement recv_scattered_pkts for flexible Rx descriptor */
+uint16_t
+iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                 uint16_t nb_pkts)
+{
+       struct iavf_rx_queue *rxq = rx_queue;
+       union iavf_rx_flex_desc rxd;
+       struct rte_mbuf *rxe;
+       struct rte_mbuf *first_seg = rxq->pkt_first_seg;
+       struct rte_mbuf *last_seg = rxq->pkt_last_seg;
+       struct rte_mbuf *nmb, *rxm;
+       uint16_t rx_id = rxq->rx_tail;
+       uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+       struct rte_eth_dev *dev;
+       uint16_t rx_stat_err0;
+       uint64_t dma_addr;
+       uint64_t pkt_flags;
+
+       volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
+       volatile union iavf_rx_flex_desc *rxdp;
+       const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+       while (nb_rx < nb_pkts) {
+               rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
+               rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+               /* Check the DD bit */
+               if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+                       break;
+               IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+
+               nmb = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(!nmb)) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u", rxq->port_id, rxq->queue_id);
+                       dev = &rte_eth_devices[rxq->port_id];
+                       dev->data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               rxd = *rxdp;
+               nb_hold++;
+               rxe = rxq->sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf */
+               rte_prefetch0(rxq->sw_ring[rx_id]);
+
+               /* When next RX descriptor is on a cache line boundary,
+                * prefetch the next 4 RX descriptors and next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_prefetch0(&rx_ring[rx_id]);
+                       rte_prefetch0(rxq->sw_ring[rx_id]);
+               }
+
+               rxm = rxe;
+               dma_addr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
+               /* Set data buffer address and data length of the mbuf */
+               rxdp->read.hdr_addr = 0;
+               rxdp->read.pkt_addr = dma_addr;
+               rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
+                               IAVF_RX_FLX_DESC_PKT_LEN_M;
+               rxm->data_len = rx_packet_len;
+               rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+               /* If this is the first buffer of the received packet, set the
+                * pointer to the first mbuf of the packet and initialize its
+                * context. Otherwise, update the total length and the number
+                * of segments of the current scattered packet, and update the
+                * pointer to the last mbuf of the current packet.
+                */
+               if (!first_seg) {
+                       first_seg = rxm;
+                       first_seg->nb_segs = 1;
+                       first_seg->pkt_len = rx_packet_len;
+               } else {
+                       first_seg->pkt_len =
+                               (uint16_t)(first_seg->pkt_len +
+                                               rx_packet_len);
+                       first_seg->nb_segs++;
+                       last_seg->next = rxm;
+               }
+
+               /* If this is not the last buffer of the received packet,
+                * update the pointer to the last mbuf of the current scattered
+                * packet and continue to parse the RX ring.
+                */
+               if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
+                       last_seg = rxm;
+                       continue;
+               }
+
+               /* This is the last buffer of the received packet. If the CRC
+                * is not stripped by the hardware:
+                *  - Subtract the CRC length from the total packet length.
+                *  - If the last buffer only contains the whole CRC or a part
+                *  of it, free the mbuf associated to the last buffer. If part
+                *  of the CRC is also contained in the previous mbuf, subtract
+                *  the length of that CRC part from the data length of the
+                *  previous mbuf.
+                */
+               rxm->next = NULL;
+               if (unlikely(rxq->crc_len > 0)) {
+                       first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+                       if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
+                               rte_pktmbuf_free_seg(rxm);
+                               first_seg->nb_segs--;
+                               last_seg->data_len =
+                                       (uint16_t)(last_seg->data_len -
+                                       (RTE_ETHER_CRC_LEN - rx_packet_len));
+                               last_seg->next = NULL;
+                       } else {
+                               rxm->data_len = (uint16_t)(rx_packet_len -
+                                                       RTE_ETHER_CRC_LEN);
+                       }
+               }
+
+               first_seg->port = rxq->port_id;
+               first_seg->ol_flags = 0;
+               first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+                       rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
+               iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
+               iavf_rxd_to_pkt_fields(first_seg, &rxd);
+               pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+               first_seg->ol_flags |= pkt_flags;
+
+               /* Prefetch data of first segment, if configured to do so. */
+               rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
+                                         first_seg->data_off));
+               rx_pkts[nb_rx++] = first_seg;
+               first_seg = NULL;
+       }
+
+       /* Record index of the next RX descriptor to probe. */
+       rxq->rx_tail = rx_id;
+       rxq->pkt_first_seg = first_seg;
+       rxq->pkt_last_seg = last_seg;
+
+       iavf_update_rx_tail(rxq, nb_hold, rx_id);
 
        return nb_rx;
 }
 
 /* implement recv_scattered_pkts  */
 uint16_t
-avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                        uint16_t nb_pkts)
 {
-       struct avf_rx_queue *rxq = rx_queue;
-       union avf_rx_desc rxd;
+       struct iavf_rx_queue *rxq = rx_queue;
+       union iavf_rx_desc rxd;
        struct rte_mbuf *rxe;
        struct rte_mbuf *first_seg = rxq->pkt_first_seg;
        struct rte_mbuf *last_seg = rxq->pkt_last_seg;
@@ -913,38 +1264,20 @@ avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        uint64_t dma_addr;
        uint64_t pkt_flags;
 
-       volatile union avf_rx_desc *rx_ring = rxq->rx_ring;
-       volatile union avf_rx_desc *rxdp;
-       static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
-               /* [0] reserved */
-               [1] = RTE_PTYPE_L2_ETHER,
-               /* [2] - [21] reserved */
-               [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_FRAG,
-               [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_NONFRAG,
-               [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_UDP,
-               /* [25] reserved */
-               [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_TCP,
-               [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_SCTP,
-               [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_ICMP,
-               /* All others reserved */
-       };
+       volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
+       volatile union iavf_rx_desc *rxdp;
+       const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
        while (nb_rx < nb_pkts) {
                rxdp = &rx_ring[rx_id];
                qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
-               rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
-                           AVF_RXD_QW1_STATUS_SHIFT;
+               rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+                           IAVF_RXD_QW1_STATUS_SHIFT;
 
                /* Check the DD bit */
-               if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
                        break;
-               AVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
+               IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
 
                nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb)) {
@@ -975,15 +1308,14 @@ avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                }
 
                rxm = rxe;
-               rxe = nmb;
                dma_addr =
                        rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
 
                /* Set data buffer address and data length of the mbuf */
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
-               rx_packet_len = (qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
-                                AVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+               rx_packet_len = (qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+                                IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
                rxm->data_len = rx_packet_len;
                rxm->data_off = RTE_PKTMBUF_HEADROOM;
 
@@ -1009,7 +1341,7 @@ avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * update the pointer to the last mbuf of the current scattered
                 * packet and continue to parse the RX ring.
                 */
-               if (!(rx_status & (1 << AVF_RX_DESC_STATUS_EOF_SHIFT))) {
+               if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_EOF_SHIFT))) {
                        last_seg = rxm;
                        continue;
                }
@@ -1025,31 +1357,34 @@ avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 */
                rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt_len -= ETHER_CRC_LEN;
-                       if (rx_packet_len <= ETHER_CRC_LEN) {
+                       first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+                       if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
                                first_seg->nb_segs--;
                                last_seg->data_len =
                                        (uint16_t)(last_seg->data_len -
-                                       (ETHER_CRC_LEN - rx_packet_len));
+                                       (RTE_ETHER_CRC_LEN - rx_packet_len));
                                last_seg->next = NULL;
                        } else
                                rxm->data_len = (uint16_t)(rx_packet_len -
-                                                               ETHER_CRC_LEN);
+                                                       RTE_ETHER_CRC_LEN);
                }
 
                first_seg->port = rxq->port_id;
                first_seg->ol_flags = 0;
-               avf_rxd_to_vlan_tci(first_seg, &rxd);
-               pkt_flags = avf_rxd_to_pkt_flags(qword1);
+               iavf_rxd_to_vlan_tci(first_seg, &rxd);
+               pkt_flags = iavf_rxd_to_pkt_flags(qword1);
                first_seg->packet_type =
                        ptype_tbl[(uint8_t)((qword1 &
-                       AVF_RXD_QW1_PTYPE_MASK) >> AVF_RXD_QW1_PTYPE_SHIFT)];
+                       IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT)];
 
                if (pkt_flags & PKT_RX_RSS_HASH)
                        first_seg->hash.rss =
                                rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
 
+               if (pkt_flags & PKT_RX_FDIR)
+                       pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg);
+
                first_seg->ol_flags |= pkt_flags;
 
                /* Prefetch data of first segment, if configured to do so. */
@@ -1064,125 +1399,168 @@ avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        rxq->pkt_first_seg = first_seg;
        rxq->pkt_last_seg = last_seg;
 
-       /* If the number of free RX descriptors is greater than the RX free
-        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
-        * register. Update the RDT with the value of the last processed RX
-        * descriptor minus 1, to guarantee that the RDT register is never
-        * equal to the RDH register, which creates a "full" ring situtation
-        * from the hardware point of view.
+       iavf_update_rx_tail(rxq, nb_hold, rx_id);
+
+       return nb_rx;
+}
+
+#define IAVF_LOOK_AHEAD 8
+static inline int
+iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
+{
+       volatile union iavf_rx_flex_desc *rxdp;
+       struct rte_mbuf **rxep;
+       struct rte_mbuf *mb;
+       uint16_t stat_err0;
+       uint16_t pkt_len;
+       int32_t s[IAVF_LOOK_AHEAD], nb_dd;
+       int32_t i, j, nb_rx = 0;
+       uint64_t pkt_flags;
+       const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+       rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
+       rxep = &rxq->sw_ring[rxq->rx_tail];
+
+       stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
+
+       /* Make sure there is at least 1 packet to receive */
+       if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
+               return 0;
+
+       /* Scan LOOK_AHEAD descriptors at a time to determine which
+        * descriptors reference packets that are ready to be received.
         */
-       nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
-       if (nb_hold > rxq->rx_free_thresh) {
-               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-                          "nb_hold=%u nb_rx=%u",
-                          rxq->port_id, rxq->queue_id,
-                          rx_id, nb_hold, nb_rx);
-               rx_id = (uint16_t)(rx_id == 0 ?
-                       (rxq->nb_rx_desc - 1) : (rx_id - 1));
-               AVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
-               nb_hold = 0;
+       for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
+            rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
+               /* Read desc statuses backwards to avoid race condition */
+               for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
+                       s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
+
+               rte_smp_rmb();
+
+               /* Compute how many status bits were set */
+               for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
+                       nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
+
+               nb_rx += nb_dd;
+
+               /* Translate descriptor info to mbuf parameters */
+               for (j = 0; j < nb_dd; j++) {
+                       IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
+                                         rxq->rx_tail +
+                                         i * IAVF_LOOK_AHEAD + j);
+
+                       mb = rxep[j];
+                       pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
+                               IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
+                       mb->data_len = pkt_len;
+                       mb->pkt_len = pkt_len;
+                       mb->ol_flags = 0;
+
+                       mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
+                               rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
+                       iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
+                       iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
+                       stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
+                       pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+
+                       mb->ol_flags |= pkt_flags;
+               }
+
+               for (j = 0; j < IAVF_LOOK_AHEAD; j++)
+                       rxq->rx_stage[i + j] = rxep[j];
+
+               if (nb_dd != IAVF_LOOK_AHEAD)
+                       break;
        }
-       rxq->nb_rx_hold = nb_hold;
+
+       /* Clear software ring entries */
+       for (i = 0; i < nb_rx; i++)
+               rxq->sw_ring[rxq->rx_tail + i] = NULL;
 
        return nb_rx;
 }
 
-#define AVF_LOOK_AHEAD 8
 static inline int
-avf_rx_scan_hw_ring(struct avf_rx_queue *rxq)
+iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
 {
-       volatile union avf_rx_desc *rxdp;
+       volatile union iavf_rx_desc *rxdp;
        struct rte_mbuf **rxep;
        struct rte_mbuf *mb;
        uint16_t pkt_len;
        uint64_t qword1;
        uint32_t rx_status;
-       int32_t s[AVF_LOOK_AHEAD], nb_dd;
+       int32_t s[IAVF_LOOK_AHEAD], nb_dd;
        int32_t i, j, nb_rx = 0;
        uint64_t pkt_flags;
-       static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = {
-               /* [0] reserved */
-               [1] = RTE_PTYPE_L2_ETHER,
-               /* [2] - [21] reserved */
-               [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_FRAG,
-               [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_NONFRAG,
-               [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_UDP,
-               /* [25] reserved */
-               [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_TCP,
-               [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_SCTP,
-               [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_ICMP,
-               /* All others reserved */
-       };
+       const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        rxep = &rxq->sw_ring[rxq->rx_tail];
 
        qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
-       rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
-                   AVF_RXD_QW1_STATUS_SHIFT;
+       rx_status = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+                   IAVF_RXD_QW1_STATUS_SHIFT;
 
        /* Make sure there is at least 1 packet to receive */
-       if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT)))
+       if (!(rx_status & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
                return 0;
 
        /* Scan LOOK_AHEAD descriptors at a time to determine which
         * descriptors reference packets that are ready to be received.
         */
-       for (i = 0; i < AVF_RX_MAX_BURST; i += AVF_LOOK_AHEAD,
-            rxdp += AVF_LOOK_AHEAD, rxep += AVF_LOOK_AHEAD) {
+       for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
+            rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
                /* Read desc statuses backwards to avoid race condition */
-               for (j = AVF_LOOK_AHEAD - 1; j >= 0; j--) {
+               for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--) {
                        qword1 = rte_le_to_cpu_64(
                                rxdp[j].wb.qword1.status_error_len);
-                       s[j] = (qword1 & AVF_RXD_QW1_STATUS_MASK) >>
-                              AVF_RXD_QW1_STATUS_SHIFT;
+                       s[j] = (qword1 & IAVF_RXD_QW1_STATUS_MASK) >>
+                              IAVF_RXD_QW1_STATUS_SHIFT;
                }
 
                rte_smp_rmb();
 
                /* Compute how many status bits were set */
-               for (j = 0, nb_dd = 0; j < AVF_LOOK_AHEAD; j++)
-                       nb_dd += s[j] & (1 << AVF_RX_DESC_STATUS_DD_SHIFT);
+               for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
+                       nb_dd += s[j] & (1 << IAVF_RX_DESC_STATUS_DD_SHIFT);
 
                nb_rx += nb_dd;
 
                /* Translate descriptor info to mbuf parameters */
                for (j = 0; j < nb_dd; j++) {
-                       AVF_DUMP_RX_DESC(rxq, &rxdp[j],
-                                        rxq->rx_tail + i * AVF_LOOK_AHEAD + j);
+                       IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
+                                        rxq->rx_tail + i * IAVF_LOOK_AHEAD + j);
 
                        mb = rxep[j];
                        qword1 = rte_le_to_cpu_64
                                        (rxdp[j].wb.qword1.status_error_len);
-                       pkt_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >>
-                                 AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
+                       pkt_len = ((qword1 & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+                                 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
                        mb->data_len = pkt_len;
                        mb->pkt_len = pkt_len;
                        mb->ol_flags = 0;
-                       avf_rxd_to_vlan_tci(mb, &rxdp[j]);
-                       pkt_flags = avf_rxd_to_pkt_flags(qword1);
+                       iavf_rxd_to_vlan_tci(mb, &rxdp[j]);
+                       pkt_flags = iavf_rxd_to_pkt_flags(qword1);
                        mb->packet_type =
                                ptype_tbl[(uint8_t)((qword1 &
-                               AVF_RXD_QW1_PTYPE_MASK) >>
-                               AVF_RXD_QW1_PTYPE_SHIFT)];
+                               IAVF_RXD_QW1_PTYPE_MASK) >>
+                               IAVF_RXD_QW1_PTYPE_SHIFT)];
 
                        if (pkt_flags & PKT_RX_RSS_HASH)
                                mb->hash.rss = rte_le_to_cpu_32(
                                        rxdp[j].wb.qword0.hi_dword.rss);
 
+                       if (pkt_flags & PKT_RX_FDIR)
+                               pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb);
+
                        mb->ol_flags |= pkt_flags;
                }
 
-               for (j = 0; j < AVF_LOOK_AHEAD; j++)
+               for (j = 0; j < IAVF_LOOK_AHEAD; j++)
                        rxq->rx_stage[i + j] = rxep[j];
 
-               if (nb_dd != AVF_LOOK_AHEAD)
+               if (nb_dd != IAVF_LOOK_AHEAD)
                        break;
        }
 
@@ -1194,7 +1572,7 @@ avf_rx_scan_hw_ring(struct avf_rx_queue *rxq)
 }
 
 static inline uint16_t
-avf_rx_fill_from_stage(struct avf_rx_queue *rxq,
+iavf_rx_fill_from_stage(struct iavf_rx_queue *rxq,
                       struct rte_mbuf **rx_pkts,
                       uint16_t nb_pkts)
 {
@@ -1213,9 +1591,9 @@ avf_rx_fill_from_stage(struct avf_rx_queue *rxq,
 }
 
 static inline int
-avf_rx_alloc_bufs(struct avf_rx_queue *rxq)
+iavf_rx_alloc_bufs(struct iavf_rx_queue *rxq)
 {
-       volatile union avf_rx_desc *rxdp;
+       volatile union iavf_rx_desc *rxdp;
        struct rte_mbuf **rxep;
        struct rte_mbuf *mb;
        uint16_t alloc_idx, i;
@@ -1252,7 +1630,7 @@ avf_rx_alloc_bufs(struct avf_rx_queue *rxq)
 
        /* Update rx tail register */
        rte_wmb();
-       AVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
+       IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
 
        rxq->rx_free_trigger =
                (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -1265,22 +1643,25 @@ avf_rx_alloc_bufs(struct avf_rx_queue *rxq)
 static inline uint16_t
 rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       struct avf_rx_queue *rxq = (struct avf_rx_queue *)rx_queue;
+       struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
        uint16_t nb_rx = 0;
 
        if (!nb_pkts)
                return 0;
 
        if (rxq->rx_nb_avail)
-               return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+               return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
 
-       nb_rx = (uint16_t)avf_rx_scan_hw_ring(rxq);
+       if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
+               nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
+       else
+               nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
        rxq->rx_next_avail = 0;
        rxq->rx_nb_avail = nb_rx;
        rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
 
        if (rxq->rx_tail > rxq->rx_free_trigger) {
-               if (avf_rx_alloc_bufs(rxq) != 0) {
+               if (iavf_rx_alloc_bufs(rxq) != 0) {
                        uint16_t i, j;
 
                        /* TODO: count rx_mbuf_alloc_failed here */
@@ -1302,13 +1683,13 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                   rxq->rx_tail, nb_rx);
 
        if (rxq->rx_nb_avail)
-               return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
+               return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
 
        return 0;
 }
 
 static uint16_t
-avf_recv_pkts_bulk_alloc(void *rx_queue,
+iavf_recv_pkts_bulk_alloc(void *rx_queue,
                         struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
@@ -1317,11 +1698,11 @@ avf_recv_pkts_bulk_alloc(void *rx_queue,
        if (unlikely(nb_pkts == 0))
                return 0;
 
-       if (likely(nb_pkts <= AVF_RX_MAX_BURST))
+       if (likely(nb_pkts <= IAVF_RX_MAX_BURST))
                return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
 
        while (nb_pkts) {
-               n = RTE_MIN(nb_pkts, AVF_RX_MAX_BURST);
+               n = RTE_MIN(nb_pkts, IAVF_RX_MAX_BURST);
                count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
                nb_rx = (uint16_t)(nb_rx + count);
                nb_pkts = (uint16_t)(nb_pkts - count);
@@ -1333,15 +1714,15 @@ avf_recv_pkts_bulk_alloc(void *rx_queue,
 }
 
 static inline int
-avf_xmit_cleanup(struct avf_tx_queue *txq)
+iavf_xmit_cleanup(struct iavf_tx_queue *txq)
 {
-       struct avf_tx_entry *sw_ring = txq->sw_ring;
+       struct iavf_tx_entry *sw_ring = txq->sw_ring;
        uint16_t last_desc_cleaned = txq->last_desc_cleaned;
        uint16_t nb_tx_desc = txq->nb_tx_desc;
        uint16_t desc_to_clean_to;
        uint16_t nb_tx_to_clean;
 
-       volatile struct avf_tx_desc *txd = txq->tx_ring;
+       volatile struct iavf_tx_desc *txd = txq->tx_ring;
 
        desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
        if (desc_to_clean_to >= nb_tx_desc)
@@ -1349,8 +1730,8 @@ avf_xmit_cleanup(struct avf_tx_queue *txq)
 
        desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
        if ((txd[desc_to_clean_to].cmd_type_offset_bsz &
-                       rte_cpu_to_le_64(AVF_TXD_QW1_DTYPE_MASK)) !=
-                       rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DESC_DONE)) {
+                       rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)) !=
+                       rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE)) {
                PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done "
                                "(port=%d queue=%d)", desc_to_clean_to,
                                txq->port_id, txq->queue_id);
@@ -1374,7 +1755,7 @@ avf_xmit_cleanup(struct avf_tx_queue *txq)
 
 /* Check if the context descriptor is needed for TX offloading */
 static inline uint16_t
-avf_calc_context_desc(uint64_t flags)
+iavf_calc_context_desc(uint64_t flags)
 {
        static uint64_t mask = PKT_TX_TCP_SEG;
 
@@ -1382,53 +1763,53 @@ avf_calc_context_desc(uint64_t flags)
 }
 
 static inline void
-avf_txd_enable_checksum(uint64_t ol_flags,
+iavf_txd_enable_checksum(uint64_t ol_flags,
                        uint32_t *td_cmd,
                        uint32_t *td_offset,
-                       union avf_tx_offload tx_offload)
+                       union iavf_tx_offload tx_offload)
 {
        /* Set MACLEN */
        *td_offset |= (tx_offload.l2_len >> 1) <<
-                     AVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+                     IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
 
        /* Enable L3 checksum offloads */
        if (ol_flags & PKT_TX_IP_CKSUM) {
-               *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+               *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM;
                *td_offset |= (tx_offload.l3_len >> 2) <<
-                             AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+                             IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
        } else if (ol_flags & PKT_TX_IPV4) {
-               *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV4;
+               *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4;
                *td_offset |= (tx_offload.l3_len >> 2) <<
-                             AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+                             IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
        } else if (ol_flags & PKT_TX_IPV6) {
-               *td_cmd |= AVF_TX_DESC_CMD_IIPT_IPV6;
+               *td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
                *td_offset |= (tx_offload.l3_len >> 2) <<
-                             AVF_TX_DESC_LENGTH_IPLEN_SHIFT;
+                             IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
        }
 
        if (ol_flags & PKT_TX_TCP_SEG) {
-               *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
+               *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
                *td_offset |= (tx_offload.l4_len >> 2) <<
-                             AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+                             IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                return;
        }
 
        /* Enable L4 checksum offloads */
        switch (ol_flags & PKT_TX_L4_MASK) {
        case PKT_TX_TCP_CKSUM:
-               *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_TCP;
-               *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
-                             AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
+               *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
+                             IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        case PKT_TX_SCTP_CKSUM:
-               *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_SCTP;
-               *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
-                             AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
+               *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
+                             IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        case PKT_TX_UDP_CKSUM:
-               *td_cmd |= AVF_TX_DESC_CMD_L4T_EOFT_UDP;
-               *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
-                             AVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               *td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
+               *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
+                             IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
                break;
        default:
                break;
@@ -1439,7 +1820,7 @@ avf_txd_enable_checksum(uint64_t ol_flags,
  * support IP -> L4 and IP -> IP -> L4
  */
 static inline uint64_t
-avf_set_tso_ctx(struct rte_mbuf *mbuf, union avf_tx_offload tx_offload)
+iavf_set_tso_ctx(struct rte_mbuf *mbuf, union iavf_tx_offload tx_offload)
 {
        uint64_t ctx_desc = 0;
        uint32_t cd_cmd, hdr_len, cd_tso_len;
@@ -1449,46 +1830,43 @@ avf_set_tso_ctx(struct rte_mbuf *mbuf, union avf_tx_offload tx_offload)
                return ctx_desc;
        }
 
-       /* in case of non tunneling packet, the outer_l2_len and
-        * outer_l3_len must be 0.
-        */
        hdr_len = tx_offload.l2_len +
                  tx_offload.l3_len +
                  tx_offload.l4_len;
 
-       cd_cmd = AVF_TX_CTX_DESC_TSO;
+       cd_cmd = IAVF_TX_CTX_DESC_TSO;
        cd_tso_len = mbuf->pkt_len - hdr_len;
-       ctx_desc |= ((uint64_t)cd_cmd << AVF_TXD_CTX_QW1_CMD_SHIFT) |
-                    ((uint64_t)cd_tso_len << AVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
-                    ((uint64_t)mbuf->tso_segsz << AVF_TXD_CTX_QW1_MSS_SHIFT);
+       ctx_desc |= ((uint64_t)cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
+                    ((uint64_t)cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+                    ((uint64_t)mbuf->tso_segsz << IAVF_TXD_CTX_QW1_MSS_SHIFT);
 
        return ctx_desc;
 }
 
 /* Construct the tx flags */
 static inline uint64_t
-avf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
+iavf_build_ctob(uint32_t td_cmd, uint32_t td_offset, unsigned int size,
               uint32_t td_tag)
 {
-       return rte_cpu_to_le_64(AVF_TX_DESC_DTYPE_DATA |
-                               ((uint64_t)td_cmd  << AVF_TXD_QW1_CMD_SHIFT) |
+       return rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
+                               ((uint64_t)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
                                ((uint64_t)td_offset <<
-                                AVF_TXD_QW1_OFFSET_SHIFT) |
+                                IAVF_TXD_QW1_OFFSET_SHIFT) |
                                ((uint64_t)size  <<
-                                AVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
+                                IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
                                ((uint64_t)td_tag  <<
-                                AVF_TXD_QW1_L2TAG1_SHIFT));
+                                IAVF_TXD_QW1_L2TAG1_SHIFT));
 }
 
 /* TX function */
 uint16_t
-avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       volatile struct avf_tx_desc *txd;
-       volatile struct avf_tx_desc *txr;
-       struct avf_tx_queue *txq;
-       struct avf_tx_entry *sw_ring;
-       struct avf_tx_entry *txe, *txn;
+       volatile struct iavf_tx_desc *txd;
+       volatile struct iavf_tx_desc *txr;
+       struct iavf_tx_queue *txq;
+       struct iavf_tx_entry *sw_ring;
+       struct iavf_tx_entry *txe, *txn;
        struct rte_mbuf *tx_pkt;
        struct rte_mbuf *m_seg;
        uint16_t tx_id;
@@ -1502,7 +1880,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        uint16_t tx_last;
        uint16_t slen;
        uint64_t buf_dma_addr;
-       union avf_tx_offload tx_offload = {0};
+       union iavf_tx_offload tx_offload = {0};
 
        txq = tx_queue;
        sw_ring = txq->sw_ring;
@@ -1512,7 +1890,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
        /* Check if the descriptor ring needs to be cleaned. */
        if (txq->nb_free < txq->free_thresh)
-               avf_xmit_cleanup(txq);
+               iavf_xmit_cleanup(txq);
 
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                td_cmd = 0;
@@ -1529,7 +1907,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                tx_offload.tso_segsz = tx_pkt->tso_segsz;
 
                /* Calculate the number of context descriptors needed. */
-               nb_ctx = avf_calc_context_desc(ol_flags);
+               nb_ctx = iavf_calc_context_desc(ol_flags);
 
                /* The number of descriptors that must be allocated for
                 * a packet equals to the number of the segments of that
@@ -1547,14 +1925,14 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                           txq->port_id, txq->queue_id, tx_id, tx_last);
 
                if (nb_used > txq->nb_free) {
-                       if (avf_xmit_cleanup(txq)) {
+                       if (iavf_xmit_cleanup(txq)) {
                                if (nb_tx == 0)
                                        return 0;
                                goto end_of_tx;
                        }
                        if (unlikely(nb_used > txq->rs_thresh)) {
                                while (nb_used > txq->nb_free) {
-                                       if (avf_xmit_cleanup(txq)) {
+                                       if (iavf_xmit_cleanup(txq)) {
                                                if (nb_tx == 0)
                                                        return 0;
                                                goto end_of_tx;
@@ -1565,7 +1943,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Descriptor based VLAN insertion */
                if (ol_flags & PKT_TX_VLAN_PKT) {
-                       td_cmd |= AVF_TX_DESC_CMD_IL2TAG1;
+                       td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
                        td_tag = tx_pkt->vlan_tci;
                }
 
@@ -1575,14 +1953,17 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                td_cmd |= 0x04;
 
                /* Enable checksum offloading */
-               if (ol_flags & AVF_TX_CKSUM_OFFLOAD_MASK)
-                       avf_txd_enable_checksum(ol_flags, &td_cmd,
+               if (ol_flags & IAVF_TX_CKSUM_OFFLOAD_MASK)
+                       iavf_txd_enable_checksum(ol_flags, &td_cmd,
                                                &td_offset, tx_offload);
 
                if (nb_ctx) {
                        /* Setup TX context descriptor if required */
                        uint64_t cd_type_cmd_tso_mss =
-                               AVF_TX_DESC_DTYPE_CONTEXT;
+                               IAVF_TX_DESC_DTYPE_CONTEXT;
+                       volatile struct iavf_tx_context_desc *ctx_txd =
+                               (volatile struct iavf_tx_context_desc *)
+                                                       &txr[tx_id];
 
                        txn = &sw_ring[txe->next_id];
                        RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
@@ -1594,9 +1975,12 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        /* TSO enabled */
                        if (ol_flags & PKT_TX_TCP_SEG)
                                cd_type_cmd_tso_mss |=
-                                       avf_set_tso_ctx(tx_pkt, tx_offload);
+                                       iavf_set_tso_ctx(tx_pkt, tx_offload);
 
-                       AVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
+                       ctx_txd->type_cmd_tso_mss =
+                               rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+                       IAVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
@@ -1615,12 +1999,12 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        slen = m_seg->data_len;
                        buf_dma_addr = rte_mbuf_data_iova(m_seg);
                        txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
-                       txd->cmd_type_offset_bsz = avf_build_ctob(td_cmd,
+                       txd->cmd_type_offset_bsz = iavf_build_ctob(td_cmd,
                                                                  td_offset,
                                                                  slen,
                                                                  td_tag);
 
-                       AVF_DUMP_TX_DESC(txq, txd, tx_id);
+                       IAVF_DUMP_TX_DESC(txq, txd, tx_id);
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
@@ -1628,7 +2012,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                } while (m_seg);
 
                /* The last packet data descriptor needs End Of Packet (EOP) */
-               td_cmd |= AVF_TX_DESC_CMD_EOP;
+               td_cmd |= IAVF_TX_DESC_CMD_EOP;
                txq->nb_used = (uint16_t)(txq->nb_used + nb_used);
                txq->nb_free = (uint16_t)(txq->nb_free - nb_used);
 
@@ -1637,7 +2021,7 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                   "%4u (port=%d queue=%d)",
                                   tx_last, txq->port_id, txq->queue_id);
 
-                       td_cmd |= AVF_TX_DESC_CMD_RS;
+                       td_cmd |= IAVF_TX_DESC_CMD_RS;
 
                        /* Update txq RS bit counters */
                        txq->nb_used = 0;
@@ -1645,8 +2029,8 @@ avf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                txd->cmd_type_offset_bsz |=
                        rte_cpu_to_le_64(((uint64_t)td_cmd) <<
-                                        AVF_TXD_QW1_CMD_SHIFT);
-               AVF_DUMP_TX_DESC(txq, txd, tx_id);
+                                        IAVF_TXD_QW1_CMD_SHIFT);
+               IAVF_DUMP_TX_DESC(txq, txd, tx_id);
        }
 
 end_of_tx:
@@ -1655,36 +2039,15 @@ end_of_tx:
        PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
                   txq->port_id, txq->queue_id, tx_id, nb_tx);
 
-       AVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+       IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
        txq->tx_tail = tx_id;
 
        return nb_tx;
 }
 
-static uint16_t
-avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                 uint16_t nb_pkts)
-{
-       uint16_t nb_tx = 0;
-       struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue;
-
-       while (nb_pkts) {
-               uint16_t ret, num;
-
-               num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
-               ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num);
-               nb_tx += ret;
-               nb_pkts -= ret;
-               if (ret < num)
-                       break;
-       }
-
-       return nb_tx;
-}
-
 /* TX prep functions */
 uint16_t
-avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
              uint16_t nb_pkts)
 {
        int i, ret;
@@ -1695,34 +2058,34 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                m = tx_pkts[i];
                ol_flags = m->ol_flags;
 
-               /* Check condition for nb_segs > AVF_TX_MAX_MTU_SEG. */
+               /* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
                if (!(ol_flags & PKT_TX_TCP_SEG)) {
-                       if (m->nb_segs > AVF_TX_MAX_MTU_SEG) {
-                               rte_errno = -EINVAL;
+                       if (m->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+                               rte_errno = EINVAL;
                                return i;
                        }
-               } else if ((m->tso_segsz < AVF_MIN_TSO_MSS) ||
-                          (m->tso_segsz > AVF_MAX_TSO_MSS)) {
+               } else if ((m->tso_segsz < IAVF_MIN_TSO_MSS) ||
+                          (m->tso_segsz > IAVF_MAX_TSO_MSS)) {
                        /* MSS outside the range are considered malicious */
-                       rte_errno = -EINVAL;
+                       rte_errno = EINVAL;
                        return i;
                }
 
-               if (ol_flags & AVF_TX_OFFLOAD_NOTSUP_MASK) {
-                       rte_errno = -ENOTSUP;
+               if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+                       rte_errno = ENOTSUP;
                        return i;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -1732,77 +2095,124 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 
 /* choose rx function*/
 void
-avf_set_rx_function(struct rte_eth_dev *dev)
+iavf_set_rx_function(struct rte_eth_dev *dev)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_rx_queue *rxq;
+       struct iavf_adapter *adapter =
+               IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+#ifdef RTE_ARCH_X86
+       struct iavf_rx_queue *rxq;
        int i;
+       bool use_avx2 = false;
 
-       if (adapter->rx_vec_allowed) {
-               if (dev->data->scattered_rx) {
-                       PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback"
-                                   " (port=%d).", dev->data->port_id);
-                       dev->rx_pkt_burst = avf_recv_scattered_pkts_vec;
-               } else {
-                       PMD_DRV_LOG(DEBUG, "Using Vector Rx callback"
-                                   " (port=%d).", dev->data->port_id);
-                       dev->rx_pkt_burst = avf_recv_pkts_vec;
-               }
+       if (!iavf_rx_vec_dev_check(dev)) {
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        rxq = dev->data->rx_queues[i];
-                       if (!rxq)
-                               continue;
-                       avf_rxq_vec_setup(rxq);
+                       (void)iavf_rxq_vec_setup(rxq);
                }
-       } else if (dev->data->scattered_rx) {
+
+               if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+                   rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+                       use_avx2 = true;
+
+               if (dev->data->scattered_rx) {
+                       PMD_DRV_LOG(DEBUG,
+                                   "Using %sVector Scattered Rx (port %d).",
+                                   use_avx2 ? "avx2 " : "",
+                                   dev->data->port_id);
+                       if (vf->vf_res->vf_cap_flags &
+                               VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+                               dev->rx_pkt_burst = use_avx2 ?
+                                       iavf_recv_scattered_pkts_vec_avx2_flex_rxd :
+                                       iavf_recv_scattered_pkts_vec_flex_rxd;
+                       else
+                               dev->rx_pkt_burst = use_avx2 ?
+                                       iavf_recv_scattered_pkts_vec_avx2 :
+                                       iavf_recv_scattered_pkts_vec;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+                                   use_avx2 ? "avx2 " : "",
+                                   dev->data->port_id);
+                       if (vf->vf_res->vf_cap_flags &
+                               VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+                               dev->rx_pkt_burst = use_avx2 ?
+                                       iavf_recv_pkts_vec_avx2_flex_rxd :
+                                       iavf_recv_pkts_vec_flex_rxd;
+                       else
+                               dev->rx_pkt_burst = use_avx2 ?
+                                       iavf_recv_pkts_vec_avx2 :
+                                       iavf_recv_pkts_vec;
+               }
+
+               return;
+       }
+#endif
+
+       if (dev->data->scattered_rx) {
                PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
                            dev->data->port_id);
-               dev->rx_pkt_burst = avf_recv_scattered_pkts;
+               if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+                       dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+               else
+                       dev->rx_pkt_burst = iavf_recv_scattered_pkts;
        } else if (adapter->rx_bulk_alloc_allowed) {
                PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
                            dev->data->port_id);
-               dev->rx_pkt_burst = avf_recv_pkts_bulk_alloc;
+               dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
        } else {
                PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
                            dev->data->port_id);
-               dev->rx_pkt_burst = avf_recv_pkts;
+               if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+                       dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+               else
+                       dev->rx_pkt_burst = iavf_recv_pkts;
        }
 }
 
 /* choose tx function*/
 void
-avf_set_tx_function(struct rte_eth_dev *dev)
+iavf_set_tx_function(struct rte_eth_dev *dev)
 {
-       struct avf_adapter *adapter =
-               AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-       struct avf_tx_queue *txq;
+#ifdef RTE_ARCH_X86
+       struct iavf_tx_queue *txq;
        int i;
+       bool use_avx2 = false;
 
-       if (adapter->tx_vec_allowed) {
-               PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).",
-                           dev->data->port_id);
-               dev->tx_pkt_burst = avf_xmit_pkts_vec;
-               dev->tx_pkt_prepare = NULL;
+       if (!iavf_tx_vec_dev_check(dev)) {
                for (i = 0; i < dev->data->nb_tx_queues; i++) {
                        txq = dev->data->tx_queues[i];
                        if (!txq)
                                continue;
-                       avf_txq_vec_setup(txq);
+                       iavf_txq_vec_setup(txq);
                }
-       } else {
-               PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
+
+               if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+                   rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+                       use_avx2 = true;
+
+               PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+                           use_avx2 ? "avx2 " : "",
                            dev->data->port_id);
-               dev->tx_pkt_burst = avf_xmit_pkts;
-               dev->tx_pkt_prepare = avf_prep_pkts;
+               dev->tx_pkt_burst = use_avx2 ?
+                                   iavf_xmit_pkts_vec_avx2 :
+                                   iavf_xmit_pkts_vec;
+               dev->tx_pkt_prepare = NULL;
+
+               return;
        }
+#endif
+
+       PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
+                   dev->data->port_id);
+       dev->tx_pkt_burst = iavf_xmit_pkts;
+       dev->tx_pkt_prepare = iavf_prep_pkts;
 }
 
 void
-avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                     struct rte_eth_rxq_info *qinfo)
 {
-       struct avf_rx_queue *rxq;
+       struct iavf_rx_queue *rxq;
 
        rxq = dev->data->rx_queues[queue_id];
 
@@ -1811,15 +2221,15 @@ avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->nb_desc = rxq->nb_rx_desc;
 
        qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
-       qinfo->conf.rx_drop_en = TRUE;
+       qinfo->conf.rx_drop_en = true;
        qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
 }
 
 void
-avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                     struct rte_eth_txq_info *qinfo)
 {
-       struct avf_tx_queue *txq;
+       struct iavf_tx_queue *txq;
 
        txq = dev->data->tx_queues[queue_id];
 
@@ -1833,25 +2243,26 @@ avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 
 /* Get the number of used descriptors of a rx queue */
 uint32_t
-avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
+iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-#define AVF_RXQ_SCAN_INTERVAL 4
-       volatile union avf_rx_desc *rxdp;
-       struct avf_rx_queue *rxq;
+#define IAVF_RXQ_SCAN_INTERVAL 4
+       volatile union iavf_rx_desc *rxdp;
+       struct iavf_rx_queue *rxq;
        uint16_t desc = 0;
 
        rxq = dev->data->rx_queues[queue_id];
        rxdp = &rxq->rx_ring[rxq->rx_tail];
+
        while ((desc < rxq->nb_rx_desc) &&
               ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
-                AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) &
-              (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) {
+                IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
+              (1 << IAVF_RX_DESC_STATUS_DD_SHIFT)) {
                /* Check the DD bit of a rx descriptor of each 4 in a group,
                 * to avoid checking too frequently and downgrading performance
                 * too much.
                 */
-               desc += AVF_RXQ_SCAN_INTERVAL;
-               rxdp += AVF_RXQ_SCAN_INTERVAL;
+               desc += IAVF_RXQ_SCAN_INTERVAL;
+               rxdp += IAVF_RXQ_SCAN_INTERVAL;
                if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
                        rxdp = &(rxq->rx_ring[rxq->rx_tail +
                                        desc - rxq->nb_rx_desc]);
@@ -1861,9 +2272,9 @@ avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
 }
 
 int
-avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
+iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
 {
-       struct avf_rx_queue *rxq = rx_queue;
+       struct iavf_rx_queue *rxq = rx_queue;
        volatile uint64_t *status;
        uint64_t mask;
        uint32_t desc;
@@ -1879,8 +2290,8 @@ avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
                desc -= rxq->nb_rx_desc;
 
        status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
-       mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT)
-               << AVF_RXD_QW1_STATUS_SHIFT);
+       mask = rte_le_to_cpu_64((1ULL << IAVF_RX_DESC_STATUS_DD_SHIFT)
+               << IAVF_RXD_QW1_STATUS_SHIFT);
        if (*status & mask)
                return RTE_ETH_RX_DESC_DONE;
 
@@ -1888,9 +2299,9 @@ avf_dev_rx_desc_status(void *rx_queue, uint16_t offset)
 }
 
 int
-avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
+iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
 {
-       struct avf_tx_queue *txq = tx_queue;
+       struct iavf_tx_queue *txq = tx_queue;
        volatile uint64_t *status;
        uint64_t mask, expect;
        uint32_t desc;
@@ -1909,47 +2320,550 @@ avf_dev_tx_desc_status(void *tx_queue, uint16_t offset)
        }
 
        status = &txq->tx_ring[desc].cmd_type_offset_bsz;
-       mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK);
+       mask = rte_le_to_cpu_64(IAVF_TXD_QW1_DTYPE_MASK);
        expect = rte_cpu_to_le_64(
-                AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT);
+                IAVF_TX_DESC_DTYPE_DESC_DONE << IAVF_TXD_QW1_DTYPE_SHIFT);
        if ((*status & mask) == expect)
                return RTE_ETH_TX_DESC_DONE;
 
        return RTE_ETH_TX_DESC_FULL;
 }
 
-__rte_weak uint16_t
-avf_recv_pkts_vec(__rte_unused void *rx_queue,
-                 __rte_unused struct rte_mbuf **rx_pkts,
-                 __rte_unused uint16_t nb_pkts)
-{
-       return 0;
-}
-
-__rte_weak uint16_t
-avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
-                           __rte_unused struct rte_mbuf **rx_pkts,
-                           __rte_unused uint16_t nb_pkts)
-{
-       return 0;
-}
-
-__rte_weak uint16_t
-avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
-                        __rte_unused struct rte_mbuf **tx_pkts,
-                        __rte_unused uint16_t nb_pkts)
+const uint32_t *
+iavf_get_default_ptype_table(void)
 {
-       return 0;
-}
+       static const uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE]
+               __rte_cache_aligned = {
+               /* L2 types */
+               /* [0] reserved */
+               [1] = RTE_PTYPE_L2_ETHER,
+               [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+               /* [3] - [5] reserved */
+               [6] = RTE_PTYPE_L2_ETHER_LLDP,
+               /* [7] - [10] reserved */
+               [11] = RTE_PTYPE_L2_ETHER_ARP,
+               /* [12] - [21] reserved */
+
+               /* Non tunneled IPv4 */
+               [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_FRAG,
+               [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_NONFRAG,
+               [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_UDP,
+               /* [25] reserved */
+               [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_TCP,
+               [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_SCTP,
+               [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_ICMP,
+
+               /* IPv4 --> IPv4 */
+               [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [32] reserved */
+               [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_SCTP,
+               [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> IPv6 */
+               [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [39] reserved */
+               [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_SCTP,
+               [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> GRE/Teredo/VXLAN */
+               [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT,
+
+               /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+               [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [47] reserved */
+               [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_SCTP,
+               [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+               [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [54] reserved */
+               [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_SCTP,
+               [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+               [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+               /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+               [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [62] reserved */
+               [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_SCTP,
+               [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+               [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [69] reserved */
+               [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_SCTP,
+               [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                      RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_ICMP,
+               /* [73] - [87] reserved */
+
+               /* Non tunneled IPv6 */
+               [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_FRAG,
+               [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_NONFRAG,
+               [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_UDP,
+               /* [91] reserved */
+               [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_TCP,
+               [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_SCTP,
+               [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_L4_ICMP,
+
+               /* IPv6 --> IPv4 */
+               [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_FRAG,
+               [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_NONFRAG,
+               [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_UDP,
+               /* [98] reserved */
+               [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                      RTE_PTYPE_TUNNEL_IP |
+                      RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                      RTE_PTYPE_INNER_L4_TCP,
+               [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_SCTP,
+               [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> IPv6 */
+               [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               /* [105] reserved */
+               [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_SCTP,
+               [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_IP |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> GRE/Teredo/VXLAN */
+               [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT,
+
+               /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+               [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               /* [113] reserved */
+               [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_SCTP,
+               [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+               [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               /* [120] reserved */
+               [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_SCTP,
+               [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+               [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+               /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+               [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               /* [128] reserved */
+               [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_SCTP,
+               [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+               [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               /* [135] reserved */
+               [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_SCTP,
+               [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+               /* [139] - [299] reserved */
+
+               /* PPPoE */
+               [300] = RTE_PTYPE_L2_ETHER_PPPOE,
+               [301] = RTE_PTYPE_L2_ETHER_PPPOE,
+
+               /* PPPoE --> IPv4 */
+               [302] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_FRAG,
+               [303] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_NONFRAG,
+               [304] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [305] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_TCP,
+               [306] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_SCTP,
+               [307] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_ICMP,
 
-__rte_weak int
-avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
-{
-       return -1;
-}
+               /* PPPoE --> IPv6 */
+               [308] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_FRAG,
+               [309] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_NONFRAG,
+               [310] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [311] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_TCP,
+               [312] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_SCTP,
+               [313] = RTE_PTYPE_L2_ETHER_PPPOE |
+                       RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_ICMP,
+               /* [314] - [324] reserved */
+
+               /* IPv4/IPv6 --> GTPC/GTPU */
+               [325] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPC,
+               [326] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPC,
+               [327] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPC,
+               [328] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPC,
+               [329] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU,
+               [330] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU,
+
+               /* IPv4 --> GTPU --> IPv4 */
+               [331] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [332] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [333] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               [334] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [335] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> GTPU --> IPv4 */
+               [336] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [337] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [338] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               [339] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [340] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> GTPU --> IPv6 */
+               [341] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [342] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [343] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               [344] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [345] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv6 --> GTPU --> IPv6 */
+               [346] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_FRAG,
+               [347] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_NONFRAG,
+               [348] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_UDP,
+               [349] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_TCP,
+               [350] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_TUNNEL_GTPU |
+                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_INNER_L4_ICMP,
+               /* All others reserved */
+       };
 
-__rte_weak int
-avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
-{
-       return -1;
+       return ptype_tbl;
 }