bus/fslmc: add enqueue response read in qbman
authorNipun Gupta <nipun.gupta@nxp.com>
Fri, 22 Feb 2019 11:16:01 +0000 (11:16 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 1 Mar 2019 17:17:35 +0000 (18:17 +0100)
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
drivers/bus/fslmc/qbman/qbman_portal.c
drivers/bus/fslmc/rte_bus_fslmc_version.map
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.h
drivers/net/dpaa2/dpaa2_rxtx.c

index f377f24..7bcbde8 100644 (file)
@@ -526,6 +526,18 @@ dpaa2_create_dpio_device(int vdev_fd,
                goto err;
        }
 
+       dpio_dev->eqresp = rte_zmalloc(NULL, MAX_EQ_RESP_ENTRIES *
+                                    (sizeof(struct qbman_result) +
+                                    sizeof(struct eqresp_metadata)),
+                                    RTE_CACHE_LINE_SIZE);
+       if (!dpio_dev->eqresp) {
+               DPAA2_BUS_ERR("Memory allocation failed for eqresp");
+               goto err;
+       }
+       dpio_dev->eqresp_meta = (struct eqresp_metadata *)(dpio_dev->eqresp +
+                               MAX_EQ_RESP_ENTRIES);
+
+
        TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
 
        return 0;
@@ -588,6 +600,41 @@ fail:
        return -1;
 }
 
+uint32_t
+dpaa2_free_eq_descriptors(void)
+{
+       struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+       struct qbman_result *eqresp;
+       struct eqresp_metadata *eqresp_meta;
+       struct dpaa2_queue *txq;
+
+       while (dpio_dev->eqresp_ci != dpio_dev->eqresp_pi) {
+               eqresp = &dpio_dev->eqresp[dpio_dev->eqresp_ci];
+               eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_ci];
+
+               if (!qbman_result_eqresp_rspid(eqresp))
+                       break;
+
+               if (qbman_result_eqresp_rc(eqresp)) {
+                       txq = eqresp_meta->dpaa2_q;
+                       txq->cb_eqresp_free(dpio_dev->eqresp_ci);
+               }
+               qbman_result_eqresp_set_rspid(eqresp, 0);
+
+               dpio_dev->eqresp_ci + 1 < MAX_EQ_RESP_ENTRIES ?
+                       dpio_dev->eqresp_ci++ : (dpio_dev->eqresp_ci = 0);
+       }
+
+       /* Return 1 less entry so that PI and CI are never same in a
+        * case there all the EQ responses are in use.
+        */
+       if (dpio_dev->eqresp_ci > dpio_dev->eqresp_pi)
+               return dpio_dev->eqresp_ci - dpio_dev->eqresp_pi - 1;
+       else
+               return dpio_dev->eqresp_ci - dpio_dev->eqresp_pi +
+                       MAX_EQ_RESP_ENTRIES - 1;
+}
+
 static struct rte_dpaa2_object rte_dpaa2_dpio_obj = {
        .dev_type = DPAA2_IO,
        .create = dpaa2_create_dpio_device,
index 4354c76..17e7e4f 100644 (file)
@@ -51,4 +51,8 @@ dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage);
 void
 dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage);
 
+/* free the enqueue response descriptors */
+uint32_t
+dpaa2_free_eq_descriptors(void);
+
 #endif /* _DPAA2_HW_DPIO_H_ */
index 626fcbb..4679e93 100644 (file)
@@ -34,6 +34,7 @@
 
 /* Maximum number of slots available in TX ring */
 #define MAX_TX_RING_SLOTS                      32
+#define MAX_EQ_RESP_ENTRIES                    (MAX_TX_RING_SLOTS + 1)
 
 /* Maximum number of slots available in RX ring */
 #define DPAA2_EQCR_RING_SIZE           8
 /* EQCR shift to get EQCR size for LX2 (2 >> 5) = 32 for LX2 */
 #define DPAA2_LX2_EQCR_SHIFT           5
 
+/* Flag to determine an ordered queue mbuf */
+#define DPAA2_ENQUEUE_FLAG_ORP         (1ULL << 30)
+/* ORP ID shift and mask */
+#define DPAA2_EQCR_OPRID_SHIFT         16
+#define DPAA2_EQCR_OPRID_MASK          0x3FFF0000
+/* Sequence number shift and mask */
+#define DPAA2_EQCR_SEQNUM_SHIFT                0
+#define DPAA2_EQCR_SEQNUM_MASK         0x0000FFFF
+
 #define DPAA2_SWP_CENA_REGION          0
 #define DPAA2_SWP_CINH_REGION          1
 #define DPAA2_SWP_CENA_MEM_REGION      2
 
 #define DPAA2_DPCI_MAX_QUEUES 2
 
+struct dpaa2_queue;
+
+struct eqresp_metadata {
+       struct dpaa2_queue *dpaa2_q;
+       struct rte_mempool *mp;
+};
+
 struct dpaa2_dpio_dev {
        TAILQ_ENTRY(dpaa2_dpio_dev) next;
                /**< Pointer to Next device instance */
        uint16_t index; /**< Index of a instance in the list */
        rte_atomic16_t ref_count;
                /**< How many thread contexts are sharing this.*/
+       uint16_t eqresp_ci;
+       uint16_t eqresp_pi;
+       struct qbman_result *eqresp;
+       struct eqresp_metadata *eqresp_meta;
        struct fsl_mc_io *dpio; /** handle to DPIO portal object */
        uint16_t token;
        struct qbman_swp *sw_portal; /** SW portal object */
@@ -125,6 +146,8 @@ typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
                struct dpaa2_queue *rxq,
                struct rte_event *ev);
 
+typedef void (dpaa2_queue_cb_eqresp_free_t)(uint16_t eqresp_ci);
+
 struct dpaa2_queue {
        struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
        union {
@@ -144,6 +167,7 @@ struct dpaa2_queue {
        };
        struct rte_event ev;
        dpaa2_queue_cb_dqrr_t *cb;
+       dpaa2_queue_cb_eqresp_free_t *cb_eqresp_free;
        struct dpaa2_bp_info *bp_array;
 };
 
index 10c72e0..a9192d3 100644 (file)
@@ -212,6 +212,23 @@ struct qbman_result {
                        __le32 rid_tok;
                        __le64 ctx;
                } scn;
+               struct eq_resp {
+                       uint8_t verb;
+                       uint8_t dca;
+                       __le16 seqnum;
+                       __le16 oprid;
+                       uint8_t reserved;
+                       uint8_t rc;
+                       __le32 tgtid;
+                       __le32 tag;
+                       uint16_t qdbin;
+                       uint8_t qpri;
+                       uint8_t reserved1;
+                       __le32 fqid:24;
+                       __le32 rspid:8;
+                       __le64 rsp_addr;
+                       uint8_t fd[32];
+               } eq_resp;
        };
 };
 
@@ -788,7 +805,6 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
        /************/
        /* Enqueues */
        /************/
-
 /* struct qbman_eq_desc - structure of enqueue descriptor */
 struct qbman_eq_desc {
        union {
@@ -956,6 +972,44 @@ void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
 void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
                           uint8_t dqrr_idx, int park);
 
+/**
+ * qbman_result_eqresp_fd() - Get fd from enqueue response.
+ * @eqresp: enqueue response.
+ *
+ * Return the fd pointer.
+ */
+struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
+
+/**
+ * qbman_result_eqresp_set_rspid() - Set the response id in enqueue response.
+ * @eqresp: enqueue response.
+ * @val: values to set into the response id.
+ *
+ * This value is set into the response id before the enqueue command, which,
+ * get overwritten by qbman once the enqueue command is complete.
+ */
+void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
+
+/**
+ * qbman_result_eqresp_rspid() - Get the response id.
+ * @eqresp: enqueue response.
+ *
+ * Return the response id.
+ *
+ * At the time of enqueue user provides the response id. Response id gets
+ * copied into the enqueue response to determine if the command has been
+ * completed, and response has been updated.
+ */
+uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
+
+/**
+ * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
+ * @eqresp: enqueue response.
+ *
+ * Return 0 when command is sucessful.
+ */
+uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
+
 /**
  * qbman_swp_enqueue() - Issue an enqueue command.
  * @s: the software portal used for enqueue.
index 14f4b03..f49b180 100644 (file)
@@ -1569,6 +1569,32 @@ uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
        return qbman_result_SCN_ctx(scn);
 }
 
+/********************/
+/* Parsing EQ RESP  */
+/********************/
+struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp)
+{
+       return (struct qbman_fd *)&eqresp->eq_resp.fd[0];
+}
+
+void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val)
+{
+       eqresp->eq_resp.rspid = val;
+}
+
+uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp)
+{
+       return eqresp->eq_resp.rspid;
+}
+
+uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp)
+{
+       if (eqresp->eq_resp.rc == 0xE)
+               return 0;
+       else
+               return -1;
+}
+
 /******************/
 /* Buffer release */
 /******************/
index dcc4e08..811a2e7 100644 (file)
@@ -120,7 +120,6 @@ DPDK_18.05 {
 
 DPDK_18.11 {
        global:
-
        dpaa2_dqrr_size;
        dpaa2_eqcr_size;
        dpci_get_link_state;
@@ -129,3 +128,18 @@ DPDK_18.11 {
        dpci_set_opr;
 
 } DPDK_18.05;
+
+DPDK_19.05 {
+       global:
+       dpaa2_free_eq_descriptors;
+
+       qbman_eq_desc_set_orp;
+       qbman_eq_desc_set_token;
+       qbman_result_DQ_odpid;
+       qbman_result_DQ_seqnum;
+       qbman_result_eqresp_fd;
+       qbman_result_eqresp_rc;
+       qbman_result_eqresp_rspid;
+       qbman_result_eqresp_set_rspid;
+} DPDK_18.11;
+
index bc3faa8..0ab43ca 100644 (file)
@@ -665,6 +665,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        return -ret;
                }
        }
+       dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf;
        dev->data->tx_queues[tx_queue_id] = dpaa2_q;
        return 0;
 }
@@ -894,6 +895,10 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
                dpaa2_eth_setup_irqs(dev, 1);
        }
 
+       /* Change the tx burst function if ordered queues are used */
+       if (priv->en_ordered)
+               dev->tx_pkt_burst = dpaa2_dev_tx_ordered;
+
        return 0;
 }
 
@@ -1793,6 +1798,8 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
                dpaa2_ethq->cb = dpaa2_dev_process_parallel_event;
        else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC)
                dpaa2_ethq->cb = dpaa2_dev_process_atomic_event;
+       else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED)
+               dpaa2_ethq->cb = dpaa2_dev_process_ordered_event;
        else
                return -EINVAL;
 
@@ -1807,6 +1814,41 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
                cfg.destination.hold_active = 1;
        }
 
+       if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED &&
+                       !eth_priv->en_ordered) {
+               struct opr_cfg ocfg;
+
+               /* Restoration window size = 256 frames */
+               ocfg.oprrws = 3;
+               /* Restoration window size = 512 frames for LX2 */
+               if (dpaa2_svr_family == SVR_LX2160A)
+                       ocfg.oprrws = 4;
+               /* Auto advance NESN window enabled */
+               ocfg.oa = 1;
+               /* Late arrival window size disabled */
+               ocfg.olws = 0;
+               /* ORL resource exhaustaion advance NESN disabled */
+               ocfg.oeane = 0;
+               /* Loose ordering enabled */
+               ocfg.oloe = 1;
+               eth_priv->en_loose_ordered = 1;
+               /* Strict ordering enabled if explicitly set */
+               if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
+                       ocfg.oloe = 0;
+                       eth_priv->en_loose_ordered = 0;
+               }
+
+               ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
+                                  dpaa2_ethq->tc_index, flow_id,
+                                  OPR_OPT_CREATE, &ocfg);
+               if (ret) {
+                       DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
+                       return ret;
+               }
+
+               eth_priv->en_ordered = 1;
+       }
+
        options |= DPNI_QUEUE_OPT_USER_CTX;
        cfg.user_context = (size_t)(dpaa2_ethq);
 
index 420ad64..313cbe4 100644 (file)
@@ -96,15 +96,17 @@ struct dpaa2_dev_priv {
        uint16_t token;
        uint8_t nb_tx_queues;
        uint8_t nb_rx_queues;
+       uint32_t options;
        void *rx_vq[MAX_RX_QUEUES];
        void *tx_vq[MAX_TX_QUEUES];
 
        struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
-       uint32_t options;
        uint8_t max_mac_filters;
        uint8_t max_vlan_filters;
        uint8_t num_rx_tc;
        uint8_t flags; /*dpaa2 config flags */
+       uint8_t en_ordered;
+       uint8_t en_loose_ordered;
 };
 
 int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
@@ -135,6 +137,15 @@ void dpaa2_dev_process_atomic_event(struct qbman_swp *swp,
                                    const struct qbman_result *dq,
                                    struct dpaa2_queue *rxq,
                                    struct rte_event *ev);
+void dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
+                                    const struct qbman_fd *fd,
+                                    const struct qbman_result *dq,
+                                    struct dpaa2_queue *rxq,
+                                    struct rte_event *ev);
 uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs,
+                             uint16_t nb_pkts);
 uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
+
 #endif /* _DPAA2_ETHDEV_H */
index 2d4b9ef..1aa1847 100644 (file)
@@ -699,6 +699,33 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
        DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
 }
 
+void __attribute__((hot))
+dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
+                               const struct qbman_fd *fd,
+                               const struct qbman_result *dq,
+                               struct dpaa2_queue *rxq,
+                               struct rte_event *ev)
+{
+       rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
+               DPAA2_FD_PTA_SIZE + 16));
+
+       ev->flow_id = rxq->ev.flow_id;
+       ev->sub_event_type = rxq->ev.sub_event_type;
+       ev->event_type = RTE_EVENT_TYPE_ETHDEV;
+       ev->op = RTE_EVENT_OP_NEW;
+       ev->sched_type = rxq->ev.sched_type;
+       ev->queue_id = rxq->ev.queue_id;
+       ev->priority = rxq->ev.priority;
+
+       ev->mbuf = eth_fd_to_mbuf(fd);
+
+       ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
+       ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
+       ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
+
+       qbman_swp_dqrr_consume(swp, dq);
+}
+
 /*
  * Callback to handle sending packets through WRIOP based interface
  */
@@ -864,6 +891,234 @@ skip_tx:
        return num_tx;
 }
 
+void
+dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
+{
+       struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+       struct qbman_fd *fd;
+       struct rte_mbuf *m;
+
+       fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+       m = eth_fd_to_mbuf(fd);
+       rte_pktmbuf_free(m);
+}
+
+static void
+dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+                            struct rte_mbuf *m,
+                            struct qbman_eq_desc *eqdesc)
+{
+       struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+       struct dpaa2_dev_priv *priv = eth_data->dev_private;
+       struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+       struct eqresp_metadata *eqresp_meta;
+       uint16_t orpid, seqnum;
+       uint8_t dq_idx;
+
+       qbman_eq_desc_set_qd(eqdesc, priv->qdid, dpaa2_q->flow_id,
+                            dpaa2_q->tc_index);
+
+       if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
+               orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
+                       DPAA2_EQCR_OPRID_SHIFT;
+               seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
+                       DPAA2_EQCR_SEQNUM_SHIFT;
+
+               if (!priv->en_loose_ordered) {
+                       qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+                       qbman_eq_desc_set_response(eqdesc, (uint64_t)
+                               DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+                               dpio_dev->eqresp_pi]), 1);
+                       qbman_eq_desc_set_token(eqdesc, 1);
+
+                       eqresp_meta = &dpio_dev->eqresp_meta[
+                               dpio_dev->eqresp_pi];
+                       eqresp_meta->dpaa2_q = dpaa2_q;
+                       eqresp_meta->mp = m->pool;
+
+                       dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+                               dpio_dev->eqresp_pi++ :
+                               (dpio_dev->eqresp_pi = 0);
+               } else {
+                       qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+               }
+       } else {
+               dq_idx = m->seqn - 1;
+               qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+               DPAA2_PER_LCORE_DQRR_SIZE--;
+               DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+       }
+       m->seqn = DPAA2_INVALID_MBUF_SEQN;
+}
+
+/* Callback to handle sending ordered packets through WRIOP based interface */
+uint16_t
+dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+       /* Function to transmit the frames to given device and VQ*/
+       struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+       struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+       struct dpaa2_dev_priv *priv = eth_data->dev_private;
+       struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+       struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+       struct rte_mbuf *mi;
+       struct rte_mempool *mp;
+       struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+       struct qbman_swp *swp;
+       uint32_t frames_to_send, num_free_eq_desc;
+       uint32_t loop, retry_count;
+       int32_t ret;
+       uint16_t num_tx = 0;
+       uint16_t bpid;
+
+       if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+               ret = dpaa2_affine_qbman_swp();
+               if (ret) {
+                       DPAA2_PMD_ERR("Failure in affining portal");
+                       return 0;
+               }
+       }
+       swp = DPAA2_PER_LCORE_PORTAL;
+
+       DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
+                          eth_data, dpaa2_q->fqid);
+
+       /* This would also handle normal and atomic queues as any type
+        * of packet can be enqueued when ordered queues are being used.
+        */
+       while (nb_pkts) {
+               /*Check if the queue is congested*/
+               retry_count = 0;
+               while (qbman_result_SCN_state(dpaa2_q->cscn)) {
+                       retry_count++;
+                       /* Retry for some time before giving up */
+                       if (retry_count > CONG_RETRY_COUNT)
+                               goto skip_tx;
+               }
+
+               frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
+                       dpaa2_eqcr_size : nb_pkts;
+
+               if (!priv->en_loose_ordered) {
+                       if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
+                               num_free_eq_desc = dpaa2_free_eq_descriptors();
+                               if (num_free_eq_desc < frames_to_send)
+                                       frames_to_send = num_free_eq_desc;
+                       }
+               }
+
+               for (loop = 0; loop < frames_to_send; loop++) {
+                       /*Prepare enqueue descriptor*/
+                       qbman_eq_desc_clear(&eqdesc[loop]);
+
+                       if ((*bufs)->seqn) {
+                               /* Use only queue 0 for Tx in case of atomic/
+                                * ordered packets as packets can get unordered
+                                * when being tranmitted out from the interface
+                                */
+                               dpaa2_set_enqueue_descriptor(order_sendq,
+                                                            (*bufs),
+                                                            &eqdesc[loop]);
+                       } else {
+                               qbman_eq_desc_set_no_orp(&eqdesc[loop],
+                                                        DPAA2_EQ_RESP_ERR_FQ);
+                               qbman_eq_desc_set_qd(&eqdesc[loop], priv->qdid,
+                                                    dpaa2_q->flow_id,
+                                                    dpaa2_q->tc_index);
+                       }
+
+                       if (likely(RTE_MBUF_DIRECT(*bufs))) {
+                               mp = (*bufs)->pool;
+                               /* Check the basic scenario and set
+                                * the FD appropriately here itself.
+                                */
+                               if (likely(mp && mp->ops_index ==
+                                   priv->bp_list->dpaa2_ops_index &&
+                                   (*bufs)->nb_segs == 1 &&
+                                   rte_mbuf_refcnt_read((*bufs)) == 1)) {
+                                       if (unlikely((*bufs)->ol_flags
+                                               & PKT_TX_VLAN_PKT)) {
+                                         ret = rte_vlan_insert(bufs);
+                                         if (ret)
+                                               goto send_n_return;
+                                       }
+                                       DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+                                               &fd_arr[loop],
+                                               mempool_to_bpid(mp));
+                                       bufs++;
+                                       continue;
+                               }
+                       } else {
+                               mi = rte_mbuf_from_indirect(*bufs);
+                               mp = mi->pool;
+                       }
+                       /* Not a hw_pkt pool allocated frame */
+                       if (unlikely(!mp || !priv->bp_list)) {
+                               DPAA2_PMD_ERR("Err: No buffer pool attached");
+                               goto send_n_return;
+                       }
+
+                       if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+                               DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+                               /* alloc should be from the default buffer pool
+                                * attached to this interface
+                                */
+                               bpid = priv->bp_list->buf_pool.bpid;
+
+                               if (unlikely((*bufs)->nb_segs > 1)) {
+                                       DPAA2_PMD_ERR(
+                                               "S/G not supp for non hw offload buffer");
+                                       goto send_n_return;
+                               }
+                               if (eth_copy_mbuf_to_fd(*bufs,
+                                                       &fd_arr[loop], bpid)) {
+                                       goto send_n_return;
+                               }
+                               /* free the original packet */
+                               rte_pktmbuf_free(*bufs);
+                       } else {
+                               bpid = mempool_to_bpid(mp);
+                               if (unlikely((*bufs)->nb_segs > 1)) {
+                                       if (eth_mbuf_to_sg_fd(*bufs,
+                                                             &fd_arr[loop],
+                                                             bpid))
+                                               goto send_n_return;
+                               } else {
+                                       eth_mbuf_to_fd(*bufs,
+                                                      &fd_arr[loop], bpid);
+                               }
+                       }
+                       bufs++;
+               }
+               loop = 0;
+               while (loop < frames_to_send) {
+                       loop += qbman_swp_enqueue_multiple_desc(swp,
+                                       &eqdesc[loop], &fd_arr[loop],
+                                       frames_to_send - loop);
+               }
+
+               num_tx += frames_to_send;
+               nb_pkts -= frames_to_send;
+       }
+       dpaa2_q->tx_pkts += num_tx;
+       return num_tx;
+
+send_n_return:
+       /* send any already prepared fd */
+       if (loop) {
+               unsigned int i = 0;
+
+               while (i < loop) {
+                       i += qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+                                                       &fd_arr[i], loop - i);
+               }
+               num_tx += loop;
+       }
+skip_tx:
+       dpaa2_q->tx_pkts += num_tx;
+       return num_tx;
+}
+
 /**
  * Dummy DPDK callback for TX.
  *