net/qede: remove dead code
[dpdk.git] / drivers / net / qede / qede_rxtx.c
index 665a07b..ea264f5 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include <rte_net.h>
@@ -28,7 +26,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
        }
        rxq->sw_rx_ring[idx].mbuf = new_mb;
        rxq->sw_rx_ring[idx].page_offset = 0;
-       mapping = rte_mbuf_data_dma_addr_default(new_mb);
+       mapping = rte_mbuf_data_iova_default(new_mb);
        /* Advance PROD and get BD pointer */
        rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
        rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
@@ -37,35 +35,108 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
        return 0;
 }
 
+#define QEDE_MAX_BULK_ALLOC_COUNT 512
+
+static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
+{
+       void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
+       struct rte_mbuf *mbuf = NULL;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       int i, ret = 0;
+       uint16_t idx;
+
+       if (count > QEDE_MAX_BULK_ALLOC_COUNT)
+               count = QEDE_MAX_BULK_ALLOC_COUNT;
+
+       ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
+       if (unlikely(ret)) {
+               PMD_RX_LOG(ERR, rxq,
+                          "Failed to allocate %d rx buffers "
+                           "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+                           count,
+                           rxq->sw_rx_prod & NUM_RX_BDS(rxq),
+                           rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+                           rte_mempool_avail_count(rxq->mb_pool),
+                           rte_mempool_in_use_count(rxq->mb_pool));
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < count; i++) {
+               mbuf = obj_p[i];
+               if (likely(i < count - 1))
+                       rte_prefetch0(obj_p[i + 1]);
+
+               idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+               rxq->sw_rx_ring[idx].mbuf = mbuf;
+               rxq->sw_rx_ring[idx].page_offset = 0;
+               mapping = rte_mbuf_data_iova_default(mbuf);
+               rx_bd = (struct eth_rx_bd *)
+                       ecore_chain_produce(&rxq->rx_bd_ring);
+               rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+               rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+               rxq->sw_rx_prod++;
+       }
+
+       return 0;
+}
+
+/* Criterias for calculating Rx buffer size -
+ * 1) rx_buf_size should not exceed the size of mbuf
+ * 2) In scattered_rx mode - minimum rx_buf_size should be
+ *    (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+ * 3) In regular mode - minimum rx_buf_size should be
+ *    (MTU + Maximum L2 Header Size + 2)
+ *    In above cases +2 corrosponds to 2 bytes padding in front of L2
+ *    header.
+ * 4) rx_buf_size should be cacheline-size aligned. So considering
+ *    criteria 1, we need to adjust the size to floor instead of ceil,
+ *    so that we don't exceed mbuf size while ceiling rx_buf_size.
+ */
 int
-qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                   uint16_t nb_desc, unsigned int socket_id,
-                   __rte_unused const struct rte_eth_rxconf *rx_conf,
-                   struct rte_mempool *mp)
+qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+                     uint16_t max_frame_size)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-       struct qede_rx_queue *rxq;
-       uint16_t max_rx_pkt_len;
-       uint16_t bufsz;
-       size_t size;
-       int rc;
+       int rx_buf_size;
 
-       PMD_INIT_FUNC_TRACE(edev);
+       if (dev->data->scattered_rx) {
+               /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+                * bufferes can be used for single packet. So need to make sure
+                * mbuf size is sufficient enough for this.
+                */
+               if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+                    (max_frame_size + QEDE_ETH_OVERHEAD)) {
+                       DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
+                              mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
+                       return -EINVAL;
+               }
 
-       /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
-       if (!rte_is_power_of_2(nb_desc)) {
-               DP_ERR(edev, "Ring size %u is not power of 2\n",
-                         nb_desc);
-               return -EINVAL;
+               rx_buf_size = RTE_MAX(mbufsz,
+                                     (max_frame_size + QEDE_ETH_OVERHEAD) /
+                                      ETH_RX_MAX_BUFF_PER_PKT);
+       } else {
+               rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
        }
 
-       /* Free memory prior to re-allocation if needed... */
-       if (dev->data->rx_queues[queue_idx] != NULL) {
-               qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
-       }
+       /* Align to cache-line size if needed */
+       return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
+}
+
+static struct qede_rx_queue *
+qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
+                       uint16_t queue_idx,
+                       uint16_t nb_desc,
+                       unsigned int socket_id,
+                       struct rte_mempool *mp,
+                       uint16_t bufsz)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct qede_rx_queue *rxq;
+       size_t size;
+       int rc;
 
        /* First allocate the rx queue data structure */
        rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
@@ -74,7 +145,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (!rxq) {
                DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
                          socket_id);
-               return -ENOMEM;
+               return NULL;
        }
 
        rxq->qdev = qdev;
@@ -82,24 +153,9 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        rxq->nb_rx_desc = nb_desc;
        rxq->queue_id = queue_idx;
        rxq->port_id = dev->data->port_id;
-       max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
-       qdev->mtu = max_rx_pkt_len;
 
-       /* Fix up RX buffer size */
-       bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
-       if ((rxmode->enable_scatter)                    ||
-           (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
-               if (!dev->data->scattered_rx) {
-                       DP_INFO(edev, "Forcing scatter-gather mode\n");
-                       dev->data->scattered_rx = 1;
-               }
-       }
-       if (dev->data->scattered_rx)
-               rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
-       else
-               rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
-       /* Align to cache-line size if needed */
-       rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+
+       rxq->rx_buf_size = bufsz;
 
        DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
                qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
@@ -109,11 +165,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
                                             RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq->sw_rx_ring) {
-               DP_NOTICE(edev, false,
-                         "Unable to alloc memory for sw_rx_ring on socket %u\n",
-                         socket_id);
+               DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+                      " socket %u\n", socket_id);
                rte_free(rxq);
-               return -ENOMEM;
+               return NULL;
        }
 
        /* Allocate FW Rx ring  */
@@ -127,12 +182,11 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                            NULL);
 
        if (rc != ECORE_SUCCESS) {
-               DP_NOTICE(edev, false,
-                         "Unable to alloc memory for rxbd ring on socket %u\n",
-                         socket_id);
+               DP_ERR(edev, "Memory allocation fails for RX BD ring"
+                      " on socket %u\n", socket_id);
                rte_free(rxq->sw_rx_ring);
                rte_free(rxq);
-               return -ENOMEM;
+               return NULL;
        }
 
        /* Allocate FW completion ring */
@@ -146,20 +200,93 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                            NULL);
 
        if (rc != ECORE_SUCCESS) {
-               DP_NOTICE(edev, false,
-                         "Unable to alloc memory for cqe ring on socket %u\n",
-                         socket_id);
+               DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+                      " on socket %u\n", socket_id);
                qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
                rte_free(rxq->sw_rx_ring);
                rte_free(rxq);
-               return -ENOMEM;
+               return NULL;
+       }
+
+       return rxq;
+}
+
+int
+qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
+                   uint16_t nb_desc, unsigned int socket_id,
+                   __rte_unused const struct rte_eth_rxconf *rx_conf,
+                   struct rte_mempool *mp)
+{
+       struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+       struct qede_rx_queue *rxq;
+       uint16_t max_rx_pkt_len;
+       uint16_t bufsz;
+       int rc;
+
+       PMD_INIT_FUNC_TRACE(edev);
+
+       /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
+       if (!rte_is_power_of_2(nb_desc)) {
+               DP_ERR(edev, "Ring size %u is not power of 2\n",
+                         nb_desc);
+               return -EINVAL;
+       }
+
+       /* Free memory prior to re-allocation if needed... */
+       if (dev->data->rx_queues[qid] != NULL) {
+               qede_rx_queue_release(dev->data->rx_queues[qid]);
+               dev->data->rx_queues[qid] = NULL;
+       }
+
+       max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+
+       /* Fix up RX buffer size */
+       bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+       /* cache align the mbuf size to simplfy rx_buf_size calculation */
+       bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
+           (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+               if (!dev->data->scattered_rx) {
+                       DP_INFO(edev, "Forcing scatter-gather mode\n");
+                       dev->data->scattered_rx = 1;
+               }
        }
 
-       dev->data->rx_queues[queue_idx] = rxq;
-       qdev->fp_array[queue_idx].rxq = rxq;
+       rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+       if (rc < 0)
+               return rc;
+
+       bufsz = rc;
+
+       if (ECORE_IS_CMT(edev)) {
+               rxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,
+                                             socket_id, mp, bufsz);
+               if (!rxq)
+                       return -ENOMEM;
+
+               qdev->fp_array[qid * 2].rxq = rxq;
+               rxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,
+                                             socket_id, mp, bufsz);
+               if (!rxq)
+                       return -ENOMEM;
+
+               qdev->fp_array[qid * 2 + 1].rxq = rxq;
+               /* provide per engine fp struct as rx queue */
+               dev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];
+       } else {
+               rxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,
+                                             socket_id, mp, bufsz);
+               if (!rxq)
+                       return -ENOMEM;
+
+               dev->data->rx_queues[qid] = rxq;
+               qdev->fp_array[qid].rxq = rxq;
+       }
 
        DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
-                 queue_idx, nb_desc, qdev->mtu, socket_id);
+                 qid, nb_desc, rxq->rx_buf_size, socket_id);
 
        return 0;
 }
@@ -190,14 +317,35 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
        }
 }
 
+static void _qede_rx_queue_release(struct qede_dev *qdev,
+                                  struct ecore_dev *edev,
+                                  struct qede_rx_queue *rxq)
+{
+       qede_rx_queue_release_mbufs(rxq);
+       qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+       qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
+       rte_free(rxq->sw_rx_ring);
+       rte_free(rxq);
+}
+
 void qede_rx_queue_release(void *rx_queue)
 {
        struct qede_rx_queue *rxq = rx_queue;
+       struct qede_fastpath_cmt *fp_cmt;
+       struct qede_dev *qdev;
+       struct ecore_dev *edev;
 
        if (rxq) {
-               qede_rx_queue_release_mbufs(rxq);
-               rte_free(rxq->sw_rx_ring);
-               rte_free(rxq);
+               qdev = rxq->qdev;
+               edev = QEDE_INIT_EDEV(qdev);
+               PMD_INIT_FUNC_TRACE(edev);
+               if (ECORE_IS_CMT(edev)) {
+                       fp_cmt = rx_queue;
+                       _qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);
+                       _qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);
+               } else {
+                       _qede_rx_queue_release(qdev, edev, rxq);
+               }
        }
 }
 
@@ -211,8 +359,8 @@ static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
        int hwfn_index;
        int rc;
 
-       if (rx_queue_id < eth_dev->data->nb_rx_queues) {
-               rxq = eth_dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < qdev->num_rx_queues) {
+               rxq = qdev->fp_array[rx_queue_id].rxq;
                hwfn_index = rx_queue_id % edev->num_hwfns;
                p_hwfn = &edev->hwfns[hwfn_index];
                rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
@@ -234,32 +382,18 @@ static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
        return rc;
 }
 
-int
-qede_tx_queue_setup(struct rte_eth_dev *dev,
-                   uint16_t queue_idx,
-                   uint16_t nb_desc,
-                   unsigned int socket_id,
-                   const struct rte_eth_txconf *tx_conf)
+static struct qede_tx_queue *
+qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
+                       uint16_t queue_idx,
+                       uint16_t nb_desc,
+                       unsigned int socket_id,
+                       const struct rte_eth_txconf *tx_conf)
 {
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        struct qede_tx_queue *txq;
        int rc;
 
-       PMD_INIT_FUNC_TRACE(edev);
-
-       if (!rte_is_power_of_2(nb_desc)) {
-               DP_ERR(edev, "Ring size %u is not power of 2\n",
-                      nb_desc);
-               return -EINVAL;
-       }
-
-       /* Free memory prior to re-allocation if needed... */
-       if (dev->data->tx_queues[queue_idx] != NULL) {
-               qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
-       }
-
        txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
 
@@ -267,7 +401,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
                DP_ERR(edev,
                       "Unable to allocate memory for txq on socket %u",
                       socket_id);
-               return -ENOMEM;
+               return NULL;
        }
 
        txq->nb_tx_desc = nb_desc;
@@ -287,7 +421,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
                       "Unable to allocate memory for txbd ring on socket %u",
                       socket_id);
                qede_tx_queue_release(txq);
-               return -ENOMEM;
+               return NULL;
        }
 
        /* Allocate software ring */
@@ -302,7 +436,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
                       socket_id);
                qdev->ops->common->chain_free(edev, &txq->tx_pbl);
                qede_tx_queue_release(txq);
-               return -ENOMEM;
+               return NULL;
        }
 
        txq->queue_id = queue_idx;
@@ -313,12 +447,61 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
            tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
            (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
 
-       dev->data->tx_queues[queue_idx] = txq;
-       qdev->fp_array[queue_idx].txq = txq;
-
        DP_INFO(edev,
                  "txq %u num_desc %u tx_free_thresh %u socket %u\n",
                  queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
+       return txq;
+}
+
+int
+qede_tx_queue_setup(struct rte_eth_dev *dev,
+                   uint16_t queue_idx,
+                   uint16_t nb_desc,
+                   unsigned int socket_id,
+                   const struct rte_eth_txconf *tx_conf)
+{
+       struct qede_dev *qdev = dev->data->dev_private;
+       struct ecore_dev *edev = &qdev->edev;
+       struct qede_tx_queue *txq;
+
+       PMD_INIT_FUNC_TRACE(edev);
+
+       if (!rte_is_power_of_2(nb_desc)) {
+               DP_ERR(edev, "Ring size %u is not power of 2\n",
+                      nb_desc);
+               return -EINVAL;
+       }
+
+       /* Free memory prior to re-allocation if needed... */
+       if (dev->data->tx_queues[queue_idx] != NULL) {
+               qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       if (ECORE_IS_CMT(edev)) {
+               txq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,
+                                             socket_id, tx_conf);
+               if (!txq)
+                       return -ENOMEM;
+
+               qdev->fp_array[queue_idx * 2].txq = txq;
+               txq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,
+                                             socket_id, tx_conf);
+               if (!txq)
+                       return -ENOMEM;
+
+               qdev->fp_array[(queue_idx * 2) + 1].txq = txq;
+               dev->data->tx_queues[queue_idx] =
+                                       &qdev->fp_array_cmt[queue_idx];
+       } else {
+               txq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,
+                                             socket_id, tx_conf);
+               if (!txq)
+                       return -ENOMEM;
+
+               dev->data->tx_queues[queue_idx] = txq;
+               qdev->fp_array[queue_idx].txq = txq;
+       }
 
        return 0;
 }
@@ -348,14 +531,35 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
        }
 }
 
+static void _qede_tx_queue_release(struct qede_dev *qdev,
+                                  struct ecore_dev *edev,
+                                  struct qede_tx_queue *txq)
+{
+       qede_tx_queue_release_mbufs(txq);
+       qdev->ops->common->chain_free(edev, &txq->tx_pbl);
+       rte_free(txq->sw_tx_ring);
+       rte_free(txq);
+}
+
 void qede_tx_queue_release(void *tx_queue)
 {
        struct qede_tx_queue *txq = tx_queue;
+       struct qede_fastpath_cmt *fp_cmt;
+       struct qede_dev *qdev;
+       struct ecore_dev *edev;
 
        if (txq) {
-               qede_tx_queue_release_mbufs(txq);
-               rte_free(txq->sw_tx_ring);
-               rte_free(txq);
+               qdev = txq->qdev;
+               edev = QEDE_INIT_EDEV(qdev);
+               PMD_INIT_FUNC_TRACE(edev);
+
+               if (ECORE_IS_CMT(edev)) {
+                       fp_cmt = tx_queue;
+                       _qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);
+                       _qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);
+               } else {
+                       _qede_tx_queue_release(qdev, edev, txq);
+               }
        }
 }
 
@@ -389,10 +593,13 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
 
 int qede_alloc_fp_resc(struct qede_dev *qdev)
 {
-       struct ecore_dev *edev = &qdev->edev;
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_fastpath *fp;
        uint32_t num_sbs;
        uint16_t sb_idx;
+       int i;
+
+       PMD_INIT_FUNC_TRACE(edev);
 
        if (IS_VF(edev))
                ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
@@ -416,6 +623,28 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
        memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
                        sizeof(*qdev->fp_array));
 
+       if (ECORE_IS_CMT(edev)) {
+               qdev->fp_array_cmt = rte_calloc("fp_cmt",
+                                               QEDE_RXTX_MAX(qdev) / 2,
+                                               sizeof(*qdev->fp_array_cmt),
+                                               RTE_CACHE_LINE_SIZE);
+
+               if (!qdev->fp_array_cmt) {
+                       DP_ERR(edev, "fp array for CMT allocation failed\n");
+                       return -ENOMEM;
+               }
+
+               memset((void *)qdev->fp_array_cmt, 0,
+                      (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));
+
+               /* Establish the mapping of fp_array with fp_array_cmt */
+               for (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {
+                       qdev->fp_array_cmt[i].qdev = qdev;
+                       qdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];
+                       qdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];
+               }
+       }
+
        for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
                fp = &qdev->fp_array[sb_idx];
                fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
@@ -440,8 +669,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_fastpath *fp;
-       struct qede_rx_queue *rxq;
-       struct qede_tx_queue *txq;
        uint16_t sb_idx;
        uint8_t i;
 
@@ -464,21 +691,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
                if (eth_dev->data->rx_queues[i]) {
                        qede_rx_queue_release(eth_dev->data->rx_queues[i]);
-                       rxq = eth_dev->data->rx_queues[i];
-                       qdev->ops->common->chain_free(edev,
-                                                     &rxq->rx_bd_ring);
-                       qdev->ops->common->chain_free(edev,
-                                                     &rxq->rx_comp_ring);
                        eth_dev->data->rx_queues[i] = NULL;
                }
        }
 
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
                if (eth_dev->data->tx_queues[i]) {
-                       txq = eth_dev->data->tx_queues[i];
                        qede_tx_queue_release(eth_dev->data->tx_queues[i]);
-                       qdev->ops->common->chain_free(edev,
-                                                     &txq->tx_pbl);
                        eth_dev->data->tx_queues[i] = NULL;
                }
        }
@@ -486,6 +705,10 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        if (qdev->fp_array)
                rte_free(qdev->fp_array);
        qdev->fp_array = NULL;
+
+       if (qdev->fp_array_cmt)
+               rte_free(qdev->fp_array_cmt);
+       qdev->fp_array_cmt = NULL;
 }
 
 static inline void
@@ -537,9 +760,9 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
        int hwfn_index;
        int rc;
 
-       if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+       if (rx_queue_id < qdev->num_rx_queues) {
                fp = &qdev->fp_array[rx_queue_id];
-               rxq = eth_dev->data->rx_queues[rx_queue_id];
+               rxq = fp->rxq;
                /* Allocate buffers for the Rx ring */
                for (j = 0; j < rxq->nb_rx_desc; j++) {
                        rc = qede_alloc_rx_buffer(rxq);
@@ -553,9 +776,10 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
                ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
                /* Prepare ramrod */
                memset(&params, 0, sizeof(params));
-               params.queue_id = rx_queue_id;
+               params.queue_id = rx_queue_id / edev->num_hwfns;
                params.vport_id = 0;
-               params.sb = fp->sb_info->igu_sb_id;
+               params.stats_id = params.vport_id;
+               params.p_sb = fp->sb_info;
                DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
                                fp->rxq->queue_id, fp->sb_info->igu_sb_id);
                params.sb_idx = RX_PI;
@@ -579,7 +803,7 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
                fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
                fp->rxq->handle = ret_params.p_handle;
 
-               fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+               fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
                qede_update_rx_prod(qdev, fp->rxq);
                eth_dev->data->rx_queue_state[rx_queue_id] =
                        RTE_ETH_QUEUE_STATE_STARTED;
@@ -607,13 +831,14 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
        int hwfn_index;
        int rc;
 
-       if (tx_queue_id < eth_dev->data->nb_tx_queues) {
-               txq = eth_dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < qdev->num_tx_queues) {
                fp = &qdev->fp_array[tx_queue_id];
+               txq = fp->txq;
                memset(&params, 0, sizeof(params));
-               params.queue_id = tx_queue_id;
+               params.queue_id = tx_queue_id / edev->num_hwfns;
                params.vport_id = 0;
-               params.sb = fp->sb_info->igu_sb_id;
+               params.stats_id = params.vport_id;
+               params.p_sb = fp->sb_info;
                DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
                                fp->txq->queue_id, fp->sb_info->igu_sb_id);
                params.sb_idx = TX_PI(0); /* tc = 0 */
@@ -636,7 +861,7 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
                txq->doorbell_addr = ret_params.p_doorbell;
                txq->handle = ret_params.p_handle;
 
-               txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+               txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
                SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
                                DB_DEST_XCM);
                SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
@@ -704,7 +929,6 @@ qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
                qede_free_tx_pkt(txq);
 }
 
-
 static int qede_drain_txq(struct qede_dev *qdev,
                          struct qede_tx_queue *txq, bool allow_drain)
 {
@@ -740,7 +964,6 @@ static int qede_drain_txq(struct qede_dev *qdev,
        return 0;
 }
 
-
 /* Stops a given TX queue in the HW */
 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 {
@@ -751,8 +974,8 @@ static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
        int hwfn_index;
        int rc;
 
-       if (tx_queue_id < eth_dev->data->nb_tx_queues) {
-               txq = eth_dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < qdev->num_tx_queues) {
+               txq = qdev->fp_array[tx_queue_id].txq;
                /* Drain txq */
                if (qede_drain_txq(qdev, txq, true))
                        return -1; /* For the lack of retcodes */
@@ -781,15 +1004,15 @@ int qede_start_queues(struct rte_eth_dev *eth_dev)
 {
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        uint8_t id;
-       int rc;
+       int rc = -1;
 
-       for_each_rss(id) {
+       for (id = 0; id < qdev->num_rx_queues; id++) {
                rc = qede_rx_queue_start(eth_dev, id);
                if (rc != ECORE_SUCCESS)
                        return -1;
        }
 
-       for_each_tss(id) {
+       for (id = 0; id < qdev->num_tx_queues; id++) {
                rc = qede_tx_queue_start(eth_dev, id);
                if (rc != ECORE_SUCCESS)
                        return -1;
@@ -804,21 +1027,25 @@ void qede_stop_queues(struct rte_eth_dev *eth_dev)
        uint8_t id;
 
        /* Stopping RX/TX queues */
-       for_each_tss(id) {
+       for (id = 0; id < qdev->num_tx_queues; id++)
                qede_tx_queue_stop(eth_dev, id);
-       }
 
-       for_each_rss(id) {
+       for (id = 0; id < qdev->num_rx_queues; id++)
                qede_rx_queue_stop(eth_dev, id);
-       }
 }
 
-static bool qede_tunn_exist(uint16_t flag)
+static inline bool qede_tunn_exist(uint16_t flag)
 {
        return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
                    PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
 }
 
+static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
+{
+       return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+               PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
+}
+
 /*
  * qede_check_tunn_csum_l4:
  * Returns:
@@ -845,6 +1072,129 @@ static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
        return 0;
 }
 
+/* Returns outer L2, L3 and L4 packet_type for tunneled packets */
+static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
+{
+       uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+       struct rte_ether_hdr *eth_hdr;
+       struct rte_ipv4_hdr *ipv4_hdr;
+       struct rte_ipv6_hdr *ipv6_hdr;
+       struct rte_vlan_hdr *vlan_hdr;
+       uint16_t ethertype;
+       bool vlan_tagged = 0;
+       uint16_t len;
+
+       eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+       len = sizeof(struct rte_ether_hdr);
+       ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
+
+        /* Note: Valid only if VLAN stripping is disabled */
+       if (ethertype == RTE_ETHER_TYPE_VLAN) {
+               vlan_tagged = 1;
+               vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+               len += sizeof(struct rte_vlan_hdr);
+               ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
+       }
+
+       if (ethertype == RTE_ETHER_TYPE_IPV4) {
+               packet_type |= RTE_PTYPE_L3_IPV4;
+               ipv4_hdr = rte_pktmbuf_mtod_offset(m,
+                                       struct rte_ipv4_hdr *, len);
+               if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
+                       packet_type |= RTE_PTYPE_L4_TCP;
+               else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
+                       packet_type |= RTE_PTYPE_L4_UDP;
+       } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
+               packet_type |= RTE_PTYPE_L3_IPV6;
+               ipv6_hdr = rte_pktmbuf_mtod_offset(m,
+                                               struct rte_ipv6_hdr *, len);
+               if (ipv6_hdr->proto == IPPROTO_TCP)
+                       packet_type |= RTE_PTYPE_L4_TCP;
+               else if (ipv6_hdr->proto == IPPROTO_UDP)
+                       packet_type |= RTE_PTYPE_L4_UDP;
+       }
+
+       if (vlan_tagged)
+               packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+       else
+               packet_type |= RTE_PTYPE_L2_ETHER;
+
+       return packet_type;
+}
+
+static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
+{
+       uint16_t val;
+
+       /* Lookup table */
+       static const uint32_t
+       ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+               [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4          |
+                                      RTE_PTYPE_INNER_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6          |
+                                      RTE_PTYPE_INNER_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4      |
+                                          RTE_PTYPE_INNER_L4_TCP       |
+                                          RTE_PTYPE_INNER_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6      |
+                                          RTE_PTYPE_INNER_L4_TCP       |
+                                          RTE_PTYPE_INNER_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4      |
+                                          RTE_PTYPE_INNER_L4_UDP       |
+                                          RTE_PTYPE_INNER_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6      |
+                                          RTE_PTYPE_INNER_L4_UDP       |
+                                          RTE_PTYPE_INNER_L2_ETHER,
+               /* Frags with no VLAN */
+               [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4     |
+                                           RTE_PTYPE_INNER_L4_FRAG     |
+                                           RTE_PTYPE_INNER_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6     |
+                                           RTE_PTYPE_INNER_L4_FRAG     |
+                                           RTE_PTYPE_INNER_L2_ETHER,
+               /* VLANs */
+               [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4     |
+                                           RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6     |
+                                           RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+                                               RTE_PTYPE_INNER_L4_TCP  |
+                                               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+                                               RTE_PTYPE_INNER_L4_TCP  |
+                                               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
+                                               RTE_PTYPE_INNER_L4_UDP  |
+                                               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
+                                               RTE_PTYPE_INNER_L4_UDP  |
+                                               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               /* Frags with VLAN */
+               [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
+                                                RTE_PTYPE_INNER_L4_FRAG |
+                                                RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
+                                                RTE_PTYPE_INNER_L4_FRAG |
+                                                RTE_PTYPE_INNER_L2_ETHER_VLAN,
+       };
+
+       /* Bits (0..3) provides L3/L4 protocol type */
+       /* Bits (4,5) provides frag and VLAN info */
+       val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+              PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+              (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+               PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+              (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+               PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+               (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+                PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
+
+       if (val < QEDE_PKT_TYPE_MAX)
+               return ptype_lkup_tbl[val];
+
+       return RTE_PTYPE_UNKNOWN;
+}
+
 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
 {
        uint16_t val;
@@ -852,30 +1202,74 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
        /* Lookup table */
        static const uint32_t
        ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
-               [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
-               [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
-               [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-               [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-               [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-               [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+               [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4    |
+                                          RTE_PTYPE_L4_TCP     |
+                                          RTE_PTYPE_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6    |
+                                          RTE_PTYPE_L4_TCP     |
+                                          RTE_PTYPE_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4    |
+                                          RTE_PTYPE_L4_UDP     |
+                                          RTE_PTYPE_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6    |
+                                          RTE_PTYPE_L4_UDP     |
+                                          RTE_PTYPE_L2_ETHER,
+               /* Frags with no VLAN */
+               [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4   |
+                                           RTE_PTYPE_L4_FRAG   |
+                                           RTE_PTYPE_L2_ETHER,
+               [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6   |
+                                           RTE_PTYPE_L4_FRAG   |
+                                           RTE_PTYPE_L2_ETHER,
+               /* VLANs */
+               [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4           |
+                                           RTE_PTYPE_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6           |
+                                           RTE_PTYPE_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4       |
+                                               RTE_PTYPE_L4_TCP        |
+                                               RTE_PTYPE_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6       |
+                                               RTE_PTYPE_L4_TCP        |
+                                               RTE_PTYPE_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4       |
+                                               RTE_PTYPE_L4_UDP        |
+                                               RTE_PTYPE_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6       |
+                                               RTE_PTYPE_L4_UDP        |
+                                               RTE_PTYPE_L2_ETHER_VLAN,
+               /* Frags with VLAN */
+               [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4      |
+                                                RTE_PTYPE_L4_FRAG      |
+                                                RTE_PTYPE_L2_ETHER_VLAN,
+               [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6      |
+                                                RTE_PTYPE_L4_FRAG      |
+                                                RTE_PTYPE_L2_ETHER_VLAN,
        };
 
        /* Bits (0..3) provides L3/L4 protocol type */
+       /* Bits (4,5) provides frag and VLAN info */
        val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
               PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
               (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
-               PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+               PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
+              (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+               PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
+               (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
+                PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
 
        if (val < QEDE_PKT_TYPE_MAX)
-               return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
-       else
-               return RTE_PTYPE_UNKNOWN;
+               return ptype_lkup_tbl[val];
+
+       return RTE_PTYPE_UNKNOWN;
 }
 
 static inline uint8_t
 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
 {
-       struct ipv4_hdr *ip;
+       struct rte_ipv4_hdr *ip;
        uint16_t pkt_csum;
        uint16_t calc_csum;
        uint16_t val;
@@ -886,8 +1280,8 @@ qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
        if (unlikely(val)) {
                m->packet_type = qede_rx_cqe_to_pkt_type(flag);
                if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
-                       ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
-                                          sizeof(struct ether_hdr));
+                       ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+                                          sizeof(struct rte_ether_hdr));
                        pkt_csum = ip->hdr_checksum;
                        ip->hdr_checksum = 0;
                        calc_csum = rte_ipv4_cksum(ip);
@@ -911,14 +1305,14 @@ qede_reuse_page(__rte_unused struct qede_dev *qdev,
                struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
 {
        struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
-       uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+       uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
        struct qede_rx_entry *curr_prod;
        dma_addr_t new_mapping;
 
        curr_prod = &rxq->sw_rx_ring[idx];
        *curr_prod = *curr_cons;
 
-       new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+       new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
                      curr_prod->page_offset;
 
        rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
@@ -1017,17 +1411,17 @@ static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
                [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
                [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
                [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
-                               RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+                               RTE_PTYPE_TUNNEL_GENEVE,
                [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
-                               RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+                               RTE_PTYPE_TUNNEL_GRE,
                [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
-                               RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+                               RTE_PTYPE_TUNNEL_VXLAN,
                [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
-                               RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+                               RTE_PTYPE_TUNNEL_GENEVE,
                [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
-                               RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+                               RTE_PTYPE_TUNNEL_GRE,
                [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
-                               RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+                               RTE_PTYPE_TUNNEL_VXLAN,
                [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
                                RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
                [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
@@ -1083,7 +1477,7 @@ qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
                                                        pkt_len;
                if (unlikely(!cur_size)) {
                        PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
-                                  " left for mapping jumbo", num_segs);
+                                  " left for mapping jumbo\n", num_segs);
                        qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
                        return -EINVAL;
                }
@@ -1101,38 +1495,300 @@ qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
        return 0;
 }
 
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+static inline void
+print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
+                uint8_t bitfield)
+{
+       PMD_RX_LOG(INFO, rxq,
+               "len 0x%04x bf 0x%04x hash_val 0x%x"
+               " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
+               " inner_l2=%s inner_l3=%s inner_l4=%s\n",
+               m->data_len, bitfield, m->hash.rss,
+               (unsigned long)m->ol_flags,
+               rte_get_ptype_l2_name(m->packet_type),
+               rte_get_ptype_l3_name(m->packet_type),
+               rte_get_ptype_l4_name(m->packet_type),
+               rte_get_ptype_tunnel_name(m->packet_type),
+               rte_get_ptype_inner_l2_name(m->packet_type),
+               rte_get_ptype_inner_l3_name(m->packet_type),
+               rte_get_ptype_inner_l4_name(m->packet_type));
+}
+#endif
+
 uint16_t
-qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+qede_recv_pkts_regular(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
+       struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
+       register struct rte_mbuf *rx_mb = NULL;
        struct qede_rx_queue *rxq = p_rxq;
        struct qede_dev *qdev = rxq->qdev;
        struct ecore_dev *edev = &qdev->edev;
-       uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
-       uint16_t rx_pkt = 0;
        union eth_rx_cqe *cqe;
-       struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
-       register struct rte_mbuf *rx_mb = NULL;
-       register struct rte_mbuf *seg1 = NULL;
+       uint64_t ol_flags;
        enum eth_rx_cqe_type cqe_type;
-       uint16_t pkt_len = 0; /* Sum of all BD segments */
+       int rss_enable = qdev->rss_enable;
+       int rx_alloc_count = 0;
+       uint32_t packet_type;
+       uint32_t rss_hash;
+       uint16_t vlan_tci, port_id;
+       uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index, num_rx_bds;
+       uint16_t rx_pkt = 0;
+       uint16_t pkt_len = 0;
        uint16_t len; /* Length of first BD */
-       uint8_t num_segs = 1;
        uint16_t preload_idx;
        uint16_t parse_flag;
 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
        uint8_t bitfield_val;
-       enum rss_hash_type htype;
 #endif
-       uint8_t tunn_parse_flag;
-       uint8_t j;
-       struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
-       uint64_t ol_flags;
+       uint8_t offset, flags, bd_num;
+
+
+       /* Allocate buffers that we used in previous loop */
+       if (rxq->rx_alloc_count) {
+               if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
+                            rxq->rx_alloc_count))) {
+                       struct rte_eth_dev *dev;
+
+                       PMD_RX_LOG(ERR, rxq,
+                                  "New buffer allocation failed,"
+                                  "dropping incoming packetn");
+                       dev = &rte_eth_devices[rxq->port_id];
+                       dev->data->rx_mbuf_alloc_failed +=
+                                                       rxq->rx_alloc_count;
+                       rxq->rx_alloc_errors += rxq->rx_alloc_count;
+                       return 0;
+               }
+               qede_update_rx_prod(qdev, rxq);
+               rxq->rx_alloc_count = 0;
+       }
+
+       hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+       sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       rte_rmb();
+
+       if (hw_comp_cons == sw_comp_cons)
+               return 0;
+
+       num_rx_bds =  NUM_RX_BDS(rxq);
+       port_id = rxq->port_id;
+
+       while (sw_comp_cons != hw_comp_cons) {
+               ol_flags = 0;
+               packet_type = RTE_PTYPE_UNKNOWN;
+               vlan_tci = 0;
+               rss_hash = 0;
+
+               /* Get the CQE from the completion ring */
+               cqe =
+                   (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+               cqe_type = cqe->fast_path_regular.type;
+               PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+               if (likely(cqe_type == ETH_RX_CQE_TYPE_REGULAR)) {
+                       fp_cqe = &cqe->fast_path_regular;
+               } else {
+                       if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) {
+                               PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
+                               ecore_eth_cqe_completion
+                                       (&edev->hwfns[rxq->queue_id %
+                                                     edev->num_hwfns],
+                                        (struct eth_slow_path_rx_cqe *)cqe);
+                       }
+                       goto next_cqe;
+               }
+
+               /* Get the data from the SW ring */
+               sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
+               rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+               assert(rx_mb != NULL);
+
+               parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+               offset = fp_cqe->placement_offset;
+               len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+               pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+               vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+               rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+               bd_num = fp_cqe->bd_num;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+               bitfield_val = fp_cqe->bitfields;
+#endif
+
+               if (unlikely(qede_tunn_exist(parse_flag))) {
+                       PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+                       if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+                               PMD_RX_LOG(ERR, rxq,
+                                           "L4 csum failed, flags = 0x%x\n",
+                                           parse_flag);
+                               rxq->rx_hw_errors++;
+                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                       } else {
+                               ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                       }
+
+                       if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
+                               PMD_RX_LOG(ERR, rxq,
+                                       "Outer L3 csum failed, flags = 0x%x\n",
+                                       parse_flag);
+                               rxq->rx_hw_errors++;
+                               ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+                       } else {
+                               ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                       }
+
+                       flags = fp_cqe->tunnel_pars_flags.flags;
+
+                       /* Tunnel_type */
+                       packet_type =
+                               qede_rx_cqe_to_tunn_pkt_type(flags);
+
+                       /* Inner header */
+                       packet_type |=
+                             qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+                       /* Outer L3/L4 types is not available in CQE */
+                       packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+                       /* Outer L3/L4 types is not available in CQE.
+                        * Need to add offset to parse correctly,
+                        */
+                       rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+                       packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+               } else {
+                       packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
+               }
+
+               /* Common handling for non-tunnel packets and for inner
+                * headers in the case of tunnel.
+                */
+               if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+                       PMD_RX_LOG(ERR, rxq,
+                                   "L4 csum failed, flags = 0x%x\n",
+                                   parse_flag);
+                       rxq->rx_hw_errors++;
+                       ol_flags |= PKT_RX_L4_CKSUM_BAD;
+               } else {
+                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+               }
+               if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+                       PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+                                  parse_flag);
+                       rxq->rx_hw_errors++;
+                       ol_flags |= PKT_RX_IP_CKSUM_BAD;
+               } else {
+                       ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+               }
+
+               if (unlikely(CQE_HAS_VLAN(parse_flag) ||
+                            CQE_HAS_OUTER_VLAN(parse_flag))) {
+                       /* Note: FW doesn't indicate Q-in-Q packet */
+                       ol_flags |= PKT_RX_VLAN;
+                       if (qdev->vlan_strip_flg) {
+                               ol_flags |= PKT_RX_VLAN_STRIPPED;
+                               rx_mb->vlan_tci = vlan_tci;
+                       }
+               }
+
+               if (rss_enable) {
+                       ol_flags |= PKT_RX_RSS_HASH;
+                       rx_mb->hash.rss = rss_hash;
+               }
+
+               rx_alloc_count++;
+               qede_rx_bd_ring_consume(rxq);
+
+               /* Prefetch next mbuf while processing current one. */
+               preload_idx = rxq->sw_rx_cons & num_rx_bds;
+               rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+
+               /* Update rest of the MBUF fields */
+               rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+               rx_mb->port = port_id;
+               rx_mb->ol_flags = ol_flags;
+               rx_mb->data_len = len;
+               rx_mb->packet_type = packet_type;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+               print_rx_bd_info(rx_mb, rxq, bitfield_val);
+#endif
+               rx_mb->nb_segs = bd_num;
+               rx_mb->pkt_len = pkt_len;
+
+               rx_pkts[rx_pkt] = rx_mb;
+               rx_pkt++;
+
+next_cqe:
+               ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
+               sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+               if (rx_pkt == nb_pkts) {
+                       PMD_RX_LOG(DEBUG, rxq,
+                                  "Budget reached nb_pkts=%u received=%u",
+                                  rx_pkt, nb_pkts);
+                       break;
+               }
+       }
+
+       /* Request number of bufferes to be allocated in next loop */
+       rxq->rx_alloc_count = rx_alloc_count;
+
+       rxq->rcv_pkts += rx_pkt;
+       rxq->rx_segs += rx_pkt;
+       PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
+
+       return rx_pkt;
+}
+
+uint16_t
+qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct qede_rx_queue *rxq = p_rxq;
+       struct qede_dev *qdev = rxq->qdev;
+       struct ecore_dev *edev = &qdev->edev;
+       uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
+       uint16_t rx_pkt = 0;
+       union eth_rx_cqe *cqe;
+       struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
+       register struct rte_mbuf *rx_mb = NULL;
+       register struct rte_mbuf *seg1 = NULL;
+       enum eth_rx_cqe_type cqe_type;
+       uint16_t pkt_len = 0; /* Sum of all BD segments */
+       uint16_t len; /* Length of first BD */
+       uint8_t num_segs = 1;
+       uint16_t preload_idx;
+       uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+       uint8_t bitfield_val;
+#endif
+       uint8_t tunn_parse_flag;
+       struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
+       uint64_t ol_flags;
        uint32_t packet_type;
        uint16_t vlan_tci;
        bool tpa_start_flg;
        uint8_t offset, tpa_agg_idx, flags;
        struct qede_agg_info *tpa_info = NULL;
        uint32_t rss_hash;
+       int rx_alloc_count = 0;
+
+
+       /* Allocate buffers that we used in previous loop */
+       if (rxq->rx_alloc_count) {
+               if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
+                            rxq->rx_alloc_count))) {
+                       struct rte_eth_dev *dev;
+
+                       PMD_RX_LOG(ERR, rxq,
+                                  "New buffer allocation failed,"
+                                  "dropping incoming packetn");
+                       dev = &rte_eth_devices[rxq->port_id];
+                       dev->data->rx_mbuf_alloc_failed +=
+                                                       rxq->rx_alloc_count;
+                       rxq->rx_alloc_errors += rxq->rx_alloc_count;
+                       return 0;
+               }
+               qede_update_rx_prod(qdev, rxq);
+               rxq->rx_alloc_count = 0;
+       }
 
        hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
        sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1166,17 +1822,17 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        /* Mark it as LRO packet */
                        ol_flags |= PKT_RX_LRO;
                        /* In split mode,  seg_len is same as len_on_first_bd
-                        * and ext_bd_len_list will be empty since there are
+                        * and bw_ext_bd_len_list will be empty since there are
                         * no additional buffers
                         */
                        PMD_RX_LOG(INFO, rxq,
-                           "TPA start[%d] - len_on_first_bd %d header %d"
-                           " [bd_list[0] %d], [seg_len %d]\n",
-                           cqe_start_tpa->tpa_agg_index,
-                           rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
-                           cqe_start_tpa->header_len,
-                           rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
-                           rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+                        "TPA start[%d] - len_on_first_bd %d header %d"
+                        " [bd_list[0] %d], [seg_len %d]\n",
+                        cqe_start_tpa->tpa_agg_index,
+                        rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+                        cqe_start_tpa->header_len,
+                        rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
+                        rte_le_to_cpu_16(cqe_start_tpa->seg_len));
 
                break;
                case ETH_RX_CQE_TYPE_TPA_CONT:
@@ -1215,8 +1871,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
                        bitfield_val = fp_cqe->bitfields;
-                       htype = (uint8_t)GET_FIELD(bitfield_val,
-                                       ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
 #endif
                } else {
                        parse_flag =
@@ -1227,8 +1881,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
                        bitfield_val = cqe_start_tpa->bitfields;
-                       htype = (uint8_t)GET_FIELD(bitfield_val,
-                               ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
 #endif
                        rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
                }
@@ -1240,73 +1892,84 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                            parse_flag);
                                rxq->rx_hw_errors++;
                                ol_flags |= PKT_RX_L4_CKSUM_BAD;
-                       } else {
-                               ol_flags |= PKT_RX_L4_CKSUM_GOOD;
-                               if (tpa_start_flg)
-                                       flags =
-                                        cqe_start_tpa->tunnel_pars_flags.flags;
-                               else
-                                       flags = fp_cqe->tunnel_pars_flags.flags;
-                               tunn_parse_flag = flags;
-                               packet_type =
-                               qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
-                       }
-               } else {
-                       PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
-                       if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
-                               PMD_RX_LOG(ERR, rxq,
-                                           "L4 csum failed, flags = 0x%x\n",
-                                           parse_flag);
-                               rxq->rx_hw_errors++;
-                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
                        } else {
                                ol_flags |= PKT_RX_L4_CKSUM_GOOD;
                        }
-                       if (unlikely(qede_check_notunn_csum_l3(rx_mb,
-                                                       parse_flag))) {
+
+                       if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
                                PMD_RX_LOG(ERR, rxq,
-                                          "IP csum failed, flags = 0x%x\n",
-                                          parse_flag);
-                               rxq->rx_hw_errors++;
-                               ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                                       "Outer L3 csum failed, flags = 0x%x\n",
+                                       parse_flag);
+                                 rxq->rx_hw_errors++;
+                                 ol_flags |= PKT_RX_EIP_CKSUM_BAD;
                        } else {
-                               ol_flags |= PKT_RX_IP_CKSUM_GOOD;
-                               packet_type =
-                                       qede_rx_cqe_to_pkt_type(parse_flag);
+                                 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
                        }
+
+                       if (tpa_start_flg)
+                               flags = cqe_start_tpa->tunnel_pars_flags.flags;
+                       else
+                               flags = fp_cqe->tunnel_pars_flags.flags;
+                       tunn_parse_flag = flags;
+
+                       /* Tunnel_type */
+                       packet_type =
+                               qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+                       /* Inner header */
+                       packet_type |=
+                             qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+                       /* Outer L3/L4 types is not available in CQE */
+                       packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+                       /* Outer L3/L4 types is not available in CQE.
+                        * Need to add offset to parse correctly,
+                        */
+                       rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+                       packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+               } else {
+                       packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
                }
 
-               if (CQE_HAS_VLAN(parse_flag)) {
-                       ol_flags |= PKT_RX_VLAN_PKT;
-                       if (qdev->vlan_strip_flg) {
-                               ol_flags |= PKT_RX_VLAN_STRIPPED;
-                               rx_mb->vlan_tci = vlan_tci;
-                       }
+               /* Common handling for non-tunnel packets and for inner
+                * headers in the case of tunnel.
+                */
+               if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+                       PMD_RX_LOG(ERR, rxq,
+                                   "L4 csum failed, flags = 0x%x\n",
+                                   parse_flag);
+                       rxq->rx_hw_errors++;
+                       ol_flags |= PKT_RX_L4_CKSUM_BAD;
+               } else {
+                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+               }
+               if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+                       PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+                                  parse_flag);
+                       rxq->rx_hw_errors++;
+                       ol_flags |= PKT_RX_IP_CKSUM_BAD;
+               } else {
+                       ol_flags |= PKT_RX_IP_CKSUM_GOOD;
                }
-               if (CQE_HAS_OUTER_VLAN(parse_flag)) {
-                       ol_flags |= PKT_RX_QINQ_PKT;
+
+               if (CQE_HAS_VLAN(parse_flag) ||
+                   CQE_HAS_OUTER_VLAN(parse_flag)) {
+                       /* Note: FW doesn't indicate Q-in-Q packet */
+                       ol_flags |= PKT_RX_VLAN;
                        if (qdev->vlan_strip_flg) {
+                               ol_flags |= PKT_RX_VLAN_STRIPPED;
                                rx_mb->vlan_tci = vlan_tci;
-                               ol_flags |= PKT_RX_QINQ_STRIPPED;
                        }
-                       rx_mb->vlan_tci_outer = 0;
                }
+
                /* RSS Hash */
                if (qdev->rss_enable) {
                        ol_flags |= PKT_RX_RSS_HASH;
                        rx_mb->hash.rss = rss_hash;
                }
 
-               if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
-                       PMD_RX_LOG(ERR, rxq,
-                                  "New buffer allocation failed,"
-                                  "dropping incoming packet\n");
-                       qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
-                       rte_eth_devices[rxq->port_id].
-                           data->rx_mbuf_alloc_failed++;
-                       rxq->rx_alloc_errors++;
-                       break;
-               }
+               rx_alloc_count++;
                qede_rx_bd_ring_consume(rxq);
 
                if (!tpa_start_flg && fp_cqe->bd_num > 1) {
@@ -1318,17 +1981,9 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
                                                 pkt_len - len))
                                goto next_cqe;
-                       for (j = 0; j < num_segs; j++) {
-                               if (qede_alloc_rx_buffer(rxq)) {
-                                       PMD_RX_LOG(ERR, rxq,
-                                               "Buffer allocation failed");
-                                       rte_eth_devices[rxq->port_id].
-                                               data->rx_mbuf_alloc_failed++;
-                                       rxq->rx_alloc_errors++;
-                                       break;
-                               }
-                               rxq->rx_segs++;
-                       }
+
+                       rx_alloc_count += num_segs;
+                       rxq->rx_segs += num_segs;
                }
                rxq->rx_segs++; /* for the first segment */
 
@@ -1342,11 +1997,9 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rx_mb->ol_flags = ol_flags;
                rx_mb->data_len = len;
                rx_mb->packet_type = packet_type;
-               PMD_RX_LOG(INFO, rxq,
-                          "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
-                          " ol_flags 0x%04lx\n",
-                          packet_type, len, htype, rx_mb->hash.rss,
-                          (unsigned long)ol_flags);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+               print_rx_bd_info(rx_mb, rxq, bitfield_val);
+#endif
                if (!tpa_start_flg) {
                        rx_mb->nb_segs = fp_cqe->bd_num;
                        rx_mb->pkt_len = pkt_len;
@@ -1372,7 +2025,8 @@ next_cqe:
                }
        }
 
-       qede_update_rx_prod(qdev, rxq);
+       /* Request number of bufferes to be allocated in next loop */
+       rxq->rx_alloc_count = rx_alloc_count;
 
        rxq->rcv_pkts += rx_pkt;
 
@@ -1381,37 +2035,55 @@ next_cqe:
        return rx_pkt;
 }
 
+uint16_t
+qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
+       uint16_t eng0_pkts, eng1_pkts;
+
+       eng0_pkts = nb_pkts / 2;
+
+       eng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);
+
+       eng1_pkts = nb_pkts - eng0_pkts;
+
+       eng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,
+                                  eng1_pkts);
+
+       return eng0_pkts + eng1_pkts;
+}
 
 /* Populate scatter gather buffer descriptor fields */
-static inline uint8_t
+static inline uint16_t
 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
-                 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
+                 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
+                 uint16_t start_seg)
 {
        struct qede_tx_queue *txq = p_txq;
        struct eth_tx_bd *tx_bd = NULL;
        dma_addr_t mapping;
-       uint8_t nb_segs = 0;
+       uint16_t nb_segs = 0;
 
        /* Check for scattered buffers */
        while (m_seg) {
-               if (nb_segs == 0) {
+               if (start_seg == 0) {
                        if (!*bd2) {
                                *bd2 = (struct eth_tx_2nd_bd *)
                                        ecore_chain_produce(&txq->tx_pbl);
                                memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
                                nb_segs++;
                        }
-                       mapping = rte_mbuf_data_dma_addr(m_seg);
+                       mapping = rte_mbuf_data_iova(m_seg);
                        QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
-               } else if (nb_segs == 1) {
+               } else if (start_seg == 1) {
                        if (!*bd3) {
                                *bd3 = (struct eth_tx_3rd_bd *)
                                        ecore_chain_produce(&txq->tx_pbl);
                                memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
                                nb_segs++;
                        }
-                       mapping = rte_mbuf_data_dma_addr(m_seg);
+                       mapping = rte_mbuf_data_iova(m_seg);
                        QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
                } else {
@@ -1419,10 +2091,11 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                                ecore_chain_produce(&txq->tx_pbl);
                        memset(tx_bd, 0, sizeof(*tx_bd));
                        nb_segs++;
-                       mapping = rte_mbuf_data_dma_addr(m_seg);
+                       mapping = rte_mbuf_data_iova(m_seg);
                        QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
                }
+               start_seg++;
                m_seg = m_seg->next;
        }
 
@@ -1442,20 +2115,24 @@ print_tx_bd_info(struct qede_tx_queue *txq,
 
        if (bd1)
                PMD_TX_LOG(INFO, txq,
-                          "BD1: nbytes=%u nbds=%u bd_flags=%04x bf=%04x",
-                          rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
-                          bd1->data.bd_flags.bitfields,
-                          rte_cpu_to_le_16(bd1->data.bitfields));
+                  "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
+                  rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+                  bd1->data.bd_flags.bitfields,
+                  rte_cpu_to_le_16(bd1->data.bitfields));
        if (bd2)
                PMD_TX_LOG(INFO, txq,
-                          "BD2: nbytes=%u bf=%04x\n",
-                          rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+                  "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
+                  rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
+                  bd2->data.bitfields2, bd2->data.tunn_ip_size);
        if (bd3)
                PMD_TX_LOG(INFO, txq,
-                          "BD3: nbytes=%u bf=%04x mss=%u\n",
-                          rte_cpu_to_le_16(bd3->nbytes),
-                          rte_cpu_to_le_16(bd3->data.bitfields),
-                          rte_cpu_to_le_16(bd3->data.lso_mss));
+                  "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
+                  "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
+                  rte_cpu_to_le_16(bd3->nbytes),
+                  rte_cpu_to_le_16(bd3->data.bitfields),
+                  rte_cpu_to_le_16(bd3->data.lso_mss),
+                  bd3->data.tunn_l4_hdr_start_offset_w,
+                  bd3->data.tunn_hdr_size_w);
 
        rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
        PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
@@ -1486,29 +2163,41 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
                ol_flags = m->ol_flags;
                if (ol_flags & PKT_TX_TCP_SEG) {
                        if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
-                               rte_errno = -EINVAL;
+                               rte_errno = EINVAL;
                                break;
                        }
                        /* TBD: confirm its ~9700B for both ? */
                        if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
-                               rte_errno = -EINVAL;
+                               rte_errno = EINVAL;
                                break;
                        }
                } else {
                        if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
-                               rte_errno = -EINVAL;
+                               rte_errno = EINVAL;
                                break;
                        }
                }
                if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
-                       rte_errno = -ENOTSUP;
+                       /* We support only limited tunnel protocols */
+                       if (ol_flags & PKT_TX_TUNNEL_MASK) {
+                               uint64_t temp;
+
+                               temp = ol_flags & PKT_TX_TUNNEL_MASK;
+                               if (temp == PKT_TX_TUNNEL_VXLAN ||
+                                   temp == PKT_TX_TUNNEL_GENEVE ||
+                                   temp == PKT_TX_TUNNEL_MPLSINUDP ||
+                                   temp == PKT_TX_TUNNEL_GRE)
+                                       continue;
+                       }
+
+                       rte_errno = ENOTSUP;
                        break;
                }
 
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        break;
                }
 #endif
@@ -1543,6 +2232,131 @@ qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
 }
 #endif
 
+uint16_t
+qede_xmit_pkts_regular(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct qede_tx_queue *txq = p_txq;
+       struct qede_dev *qdev = txq->qdev;
+       struct ecore_dev *edev = &qdev->edev;
+       struct eth_tx_1st_bd *bd1;
+       struct eth_tx_2nd_bd *bd2;
+       struct eth_tx_3rd_bd *bd3;
+       struct rte_mbuf *m_seg = NULL;
+       struct rte_mbuf *mbuf;
+       struct qede_tx_entry *sw_tx_ring;
+       uint16_t nb_tx_pkts;
+       uint16_t bd_prod;
+       uint16_t idx;
+       uint16_t nb_frags = 0;
+       uint16_t nb_pkt_sent = 0;
+       uint8_t nbds;
+       uint64_t tx_ol_flags;
+       /* BD1 */
+       uint16_t bd1_bf;
+       uint8_t bd1_bd_flags_bf;
+
+       if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
+               PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
+                          nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
+               qede_process_tx_compl(edev, txq);
+       }
+
+       nb_tx_pkts  = nb_pkts;
+       bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+       sw_tx_ring = txq->sw_tx_ring;
+
+       while (nb_tx_pkts--) {
+               /* Init flags/values */
+               nbds = 0;
+               bd1 = NULL;
+               bd2 = NULL;
+               bd3 = NULL;
+               bd1_bf = 0;
+               bd1_bd_flags_bf = 0;
+               nb_frags = 0;
+
+               mbuf = *tx_pkts++;
+               assert(mbuf);
+
+
+               /* Check minimum TX BDS availability against available BDs */
+               if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+                       break;
+
+               tx_ol_flags = mbuf->ol_flags;
+               bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+               if (unlikely(txq->nb_tx_avail <
+                               ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+                       break;
+               bd1_bf |=
+                      (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+                       << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+               /* Offload the IP checksum in the hardware */
+               if (tx_ol_flags & PKT_TX_IP_CKSUM)
+                       bd1_bd_flags_bf |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+
+               /* L4 checksum offload (tcp or udp) */
+               if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+                   (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
+                       bd1_bd_flags_bf |=
+                               1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+               /* Fill the entry in the SW ring and the BDs in the FW ring */
+               idx = TX_PROD(txq);
+               sw_tx_ring[idx].mbuf = mbuf;
+
+               /* BD1 */
+               bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
+               memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+               nbds++;
+
+               /* Map MBUF linear data for DMA and set in the BD1 */
+               QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
+                                    mbuf->data_len);
+               bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
+               bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+
+               /* Handle fragmented MBUF */
+               if (unlikely(mbuf->nb_segs > 1)) {
+                       m_seg = mbuf->next;
+
+                       /* Encode scatter gather buffer descriptors */
+                       nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3,
+                                                    nbds - 1);
+               }
+
+               bd1->data.nbds = nbds + nb_frags;
+
+               txq->nb_tx_avail -= bd1->data.nbds;
+               txq->sw_tx_prod++;
+               bd_prod =
+                   rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+               print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+#endif
+               nb_pkt_sent++;
+               txq->xmit_pkts++;
+       }
+
+       /* Write value of prod idx into bd_prod */
+       txq->tx_db.data.bd_prod = bd_prod;
+       rte_wmb();
+       rte_compiler_barrier();
+       DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
+       rte_wmb();
+
+       /* Check again for Tx completions */
+       qede_process_tx_compl(edev, txq);
+
+       PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+                  nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
+
+       return nb_pkt_sent;
+}
+
 uint16_t
 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -1625,15 +2439,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * offloads. Don't rely on pkt_type marked by Rx, instead use
                 * tx_ol_flags to decide.
                 */
-               if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_VXLAN) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_MPLSINUDP)) {
+               tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+               if (tunn_flg) {
                        /* Check against max which is Tunnel IPv6 + ext */
                        if (unlikely(txq->nb_tx_avail <
                                ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
                                        break;
-                       tunn_flg = true;
+
                        /* First indicate its a tunnel pkt */
                        bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
                                  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1682,12 +2495,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                inner_l4_hdr_offset = (mbuf->l2_len -
                                        MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
 
-                               /* TODO: There's no DPDK flag to request outer
-                                * L4 checksum offload, so we don't do it.
-                                * bd1_bd_flags_bf |=
-                                *      ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
-                                *      ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
-                                */
                                /* Inner L2 size and address type */
                                bd2_bf1 |= (inner_l2_hdr_size &
                                        ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
@@ -1739,6 +2546,10 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                         * and BD2 onwards for data.
                         */
                        hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+                       if (tunn_flg)
+                               hdr_size += mbuf->outer_l2_len +
+                                           mbuf->outer_l3_len;
+
                        bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
                        bd1_bd_flags_bf |=
                                        1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
@@ -1759,22 +2570,44 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                }
 
                /* Descriptor based VLAN insertion */
-               if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+               if (tx_ol_flags & PKT_TX_VLAN_PKT) {
                        vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
                        bd1_bd_flags_bf |=
                            1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
                }
 
                /* Offload the IP checksum in the hardware */
-               if (tx_ol_flags & PKT_TX_IP_CKSUM)
+               if (tx_ol_flags & PKT_TX_IP_CKSUM) {
                        bd1_bd_flags_bf |=
                                1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       /* There's no DPDK flag to request outer-L4 csum
+                        * offload. But in the case of tunnel if inner L3 or L4
+                        * csum offload is requested then we need to force
+                        * recalculation of L4 tunnel header csum also.
+                        */
+                       if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+                                                       PKT_TX_TUNNEL_GRE)) {
+                               bd1_bd_flags_bf |=
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+                       }
+               }
 
                /* L4 checksum offload (tcp or udp) */
                if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
                    (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
                        bd1_bd_flags_bf |=
                                1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+                       /* There's no DPDK flag to request outer-L4 csum
+                        * offload. But in the case of tunnel if inner L3 or L4
+                        * csum offload is requested then we need to force
+                        * recalculation of L4 tunnel header csum also.
+                        */
+                       if (tunn_flg) {
+                               bd1_bd_flags_bf |=
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+                                       ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+                       }
                }
 
                /* Fill the entry in the SW ring and the BDs in the FW ring */
@@ -1787,7 +2620,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                nbds++;
 
                /* Map MBUF linear data for DMA and set in the BD1 */
-               QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+               QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
                                     mbuf->data_len);
                bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
                bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
@@ -1800,11 +2633,11 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        nbds++;
 
                        /* BD1 */
-                       QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+                       QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
                                             hdr_size);
                        /* BD2 */
                        QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
-                                            rte_mbuf_data_dma_addr(mbuf)),
+                                            rte_mbuf_data_iova(mbuf)),
                                             mbuf->data_len - hdr_size);
                        bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
                        if (mplsoudp_flg) {
@@ -1834,17 +2667,17 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Handle fragmented MBUF */
                m_seg = mbuf->next;
+
                /* Encode scatter gather buffer descriptors if required */
-               nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+               nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
                bd1->data.nbds = nbds + nb_frags;
+
                txq->nb_tx_avail -= bd1->data.nbds;
                txq->sw_tx_prod++;
-               rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
                bd_prod =
                    rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
                print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
-               PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
 #endif
                nb_pkt_sent++;
                txq->xmit_pkts++;
@@ -1866,6 +2699,24 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        return nb_pkt_sent;
 }
 
+uint16_t
+qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct qede_fastpath_cmt *fp_cmt = p_fp_cmt;
+       uint16_t eng0_pkts, eng1_pkts;
+
+       eng0_pkts = nb_pkts / 2;
+
+       eng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);
+
+       eng1_pkts = nb_pkts - eng0_pkts;
+
+       eng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,
+                                  eng1_pkts);
+
+       return eng0_pkts + eng1_pkts;
+}
+
 uint16_t
 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
                     __rte_unused struct rte_mbuf **pkts,
@@ -1873,3 +2724,84 @@ qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
 {
        return 0;
 }
+
+
+/* this function does a fake walk through over completion queue
+ * to calculate number of BDs used by HW.
+ * At the end, it restores the state of completion queue.
+ */
+static uint16_t
+qede_parse_fp_cqe(struct qede_rx_queue *rxq)
+{
+       uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
+       union eth_rx_cqe *cqe, *orig_cqe = NULL;
+
+       hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+       sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       if (hw_comp_cons == sw_comp_cons)
+               return 0;
+
+       /* Get the CQE from the completion ring */
+       cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+       orig_cqe = cqe;
+
+       while (sw_comp_cons != hw_comp_cons) {
+               switch (cqe->fast_path_regular.type) {
+               case ETH_RX_CQE_TYPE_REGULAR:
+                       bd_count += cqe->fast_path_regular.bd_num;
+                       break;
+               case ETH_RX_CQE_TYPE_TPA_END:
+                       bd_count += cqe->fast_path_tpa_end.num_of_bds;
+                       break;
+               default:
+                       break;
+               }
+
+               cqe =
+               (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+               sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+       }
+
+       /* revert comp_ring to original state */
+       ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
+
+       return bd_count;
+}
+
+int
+qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
+{
+       uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
+       uint16_t produced, consumed;
+       struct qede_rx_queue *rxq = p_rxq;
+
+       if (offset > rxq->nb_rx_desc)
+               return -EINVAL;
+
+       sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
+       sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+
+       /* find BDs used by HW from completion queue elements */
+       hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
+
+       if (hw_bd_cons < sw_bd_cons)
+               /* wraparound case */
+               consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
+       else
+               consumed = hw_bd_cons - sw_bd_cons;
+
+       if (offset <= consumed)
+               return RTE_ETH_RX_DESC_DONE;
+
+       if (sw_bd_prod < sw_bd_cons)
+               /* wraparound case */
+               produced = (0xffff - sw_bd_cons) + sw_bd_prod;
+       else
+               produced = sw_bd_prod - sw_bd_cons;
+
+       if (offset <= produced)
+               return RTE_ETH_RX_DESC_AVAIL;
+
+       return RTE_ETH_RX_DESC_UNAVAIL;
+}