/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017,2019 NXP
+ * Copyright 2017,2019-2021 NXP
*/
#include <assert.h>
struct rte_event ev[],
uint16_t nb_events)
{
- struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ void *txq[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
+ struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH];
uint8_t qid, i;
RTE_SET_USED(port);
for (i = 0; i < nb_events; i++) {
- qid = rte_event_eth_tx_adapter_txq_get(m);
- rte_eth_tx_burst(m->port, qid, &m, 1);
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m[i]);
+ txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid];
}
+ dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events);
+
return nb_events;
}
*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
}
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to multiple queues respectively.*/
+ uint32_t loop, retry_count;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ struct rte_mbuf *mi;
+ struct rte_eth_dev_data *eth_data;
+ struct dpaa2_dev_priv *priv;
+ struct dpaa2_queue *order_sendq;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ for (loop = 0; loop < nb_pkts; loop++) {
+ dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+ eth_data = dpaa2_q[loop]->eth_data;
+ priv = eth_data->dev_private;
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+ order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+ dpaa2_set_enqueue_descriptor(order_sendq,
+ (*bufs),
+ &eqdesc[loop]);
+ } else {
+ qbman_eq_desc_set_no_orp(&eqdesc[loop],
+ DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_fq(&eqdesc[loop],
+ dpaa2_q[loop]->fqid);
+ }
+
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto send_frames;
+ }
+
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & RTE_MBUF_F_TX_VLAN)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_frames;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop],
+ mempool_to_bpid(mp));
+ bufs++;
+ dpaa2_q[loop]++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_frames;
+ }
+
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR(
+ "S/G not supp for non hw offload buffer");
+ goto send_frames;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_frames;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ mp,
+ bpid))
+ goto send_frames;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+
+ bufs++;
+ dpaa2_q[loop]++;
+ }
+
+send_frames:
+ frames_to_send = loop;
+ loop = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+ &fd_arr[loop],
+ frames_to_send - loop);
+ if (likely(ret > 0)) {
+ loop += ret;
+ } else {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ break;
+ }
+ }
+
+ return loop;
+}
+
/* Callback to handle sending ordered packets through WRIOP based interface */
uint16_t
dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)