1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <fslmc_logs.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_ethdev.h"
25 #include "base/dpaa2_hw_dpni_annot.h"
27 static inline uint32_t __attribute__((hot))
28 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
30 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
31 struct dpaa2_annot_hdr *annotation =
32 (struct dpaa2_annot_hdr *)hw_annot_addr;
34 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
36 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
37 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
39 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
40 pkt_type = RTE_PTYPE_L2_ETHER;
45 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
47 pkt_type |= RTE_PTYPE_L3_IPV4;
48 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
50 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
52 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
54 pkt_type |= RTE_PTYPE_L3_IPV6;
55 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
57 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
62 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
63 L3_IP_1_MORE_FRAGMENT |
64 L3_IP_N_FIRST_FRAGMENT |
65 L3_IP_N_MORE_FRAGMENT)) {
66 pkt_type |= RTE_PTYPE_L4_FRAG;
69 pkt_type |= RTE_PTYPE_L4_NONFRAG;
72 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
73 pkt_type |= RTE_PTYPE_L4_UDP;
75 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
76 pkt_type |= RTE_PTYPE_L4_TCP;
78 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
79 pkt_type |= RTE_PTYPE_L4_SCTP;
81 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
82 pkt_type |= RTE_PTYPE_L4_ICMP;
84 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
85 pkt_type |= RTE_PTYPE_UNKNOWN;
91 static inline void __attribute__((hot))
92 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
94 struct dpaa2_annot_hdr *annotation =
95 (struct dpaa2_annot_hdr *)hw_annot_addr;
97 if (BIT_ISSET_AT_POS(annotation->word3,
98 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
99 mbuf->ol_flags |= PKT_RX_VLAN;
101 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
102 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
104 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
105 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
108 static inline struct rte_mbuf *__attribute__((hot))
109 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
111 struct qbman_sge *sgt, *sge;
115 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
117 fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
119 /* Get Scatter gather table address */
120 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
123 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
125 /* First Scatter gather entry */
126 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
127 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
128 /* Prepare all the metadata for first segment */
129 first_seg->buf_addr = (uint8_t *)sg_addr;
130 first_seg->ol_flags = 0;
131 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
132 first_seg->data_len = sge->length & 0x1FFFF;
133 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
134 first_seg->nb_segs = 1;
135 first_seg->next = NULL;
137 first_seg->packet_type = dpaa2_dev_rx_parse(
138 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
139 + DPAA2_FD_PTA_SIZE);
140 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
141 DPAA2_GET_FD_ADDR(fd)) +
142 DPAA2_FD_PTA_SIZE, first_seg);
143 rte_mbuf_refcnt_set(first_seg, 1);
145 while (!DPAA2_SG_IS_FINAL(sge)) {
147 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
148 DPAA2_GET_FLE_ADDR(sge));
149 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
150 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
151 next_seg->buf_addr = (uint8_t *)sg_addr;
152 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
153 next_seg->data_len = sge->length & 0x1FFFF;
154 first_seg->nb_segs += 1;
155 rte_mbuf_refcnt_set(next_seg, 1);
156 cur_seg->next = next_seg;
157 next_seg->next = NULL;
160 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
161 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
162 rte_mbuf_refcnt_set(temp, 1);
163 rte_pktmbuf_free_seg(temp);
165 return (void *)first_seg;
168 static inline struct rte_mbuf *__attribute__((hot))
169 eth_fd_to_mbuf(const struct qbman_fd *fd)
171 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
172 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
173 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
175 /* need to repopulated some of the fields,
176 * as they may have changed in last transmission
180 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
181 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
182 mbuf->pkt_len = mbuf->data_len;
184 /* Parse the packet */
185 /* parse results are after the private - sw annotation area */
186 mbuf->packet_type = dpaa2_dev_rx_parse(
187 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
188 + DPAA2_FD_PTA_SIZE);
190 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
191 DPAA2_GET_FD_ADDR(fd)) +
192 DPAA2_FD_PTA_SIZE, mbuf);
195 rte_mbuf_refcnt_set(mbuf, 1);
197 PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
198 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
199 mbuf, mbuf->buf_addr, mbuf->data_off,
200 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
201 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
202 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
207 static int __attribute__ ((noinline)) __attribute__((hot))
208 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
209 struct qbman_fd *fd, uint16_t bpid)
211 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
212 struct qbman_sge *sgt, *sge = NULL;
215 /* First Prepare FD to be transmited*/
216 /* Resetting the buffer pool id and offset field*/
217 fd->simple.bpid_offset = 0;
219 temp = rte_pktmbuf_alloc(mbuf->pool);
221 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
225 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
226 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
227 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
228 DPAA2_SET_FD_BPID(fd, bpid);
229 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
230 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
231 /*Set Scatter gather table and Scatter gather entries*/
232 sgt = (struct qbman_sge *)(
233 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
234 + DPAA2_GET_FD_OFFSET(fd));
236 for (i = 0; i < mbuf->nb_segs; i++) {
238 /*Resetting the buffer pool id and offset field*/
239 sge->fin_bpid_offset = 0;
240 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
241 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
242 sge->length = cur_seg->data_len;
243 if (RTE_MBUF_DIRECT(cur_seg)) {
244 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
245 /* If refcnt > 1, invalid bpid is set to ensure
246 * buffer is not freed by HW
248 DPAA2_SET_FLE_IVP(sge);
249 rte_mbuf_refcnt_update(cur_seg, -1);
251 DPAA2_SET_FLE_BPID(sge,
252 mempool_to_bpid(cur_seg->pool));
253 cur_seg = cur_seg->next;
255 /* Get owner MBUF from indirect buffer */
256 mi = rte_mbuf_from_indirect(cur_seg);
257 if (rte_mbuf_refcnt_read(mi) > 1) {
258 /* If refcnt > 1, invalid bpid is set to ensure
259 * owner buffer is not freed by HW
261 DPAA2_SET_FLE_IVP(sge);
263 DPAA2_SET_FLE_BPID(sge,
264 mempool_to_bpid(mi->pool));
265 rte_mbuf_refcnt_update(mi, 1);
268 cur_seg = cur_seg->next;
269 prev_seg->next = NULL;
270 rte_pktmbuf_free(prev_seg);
273 DPAA2_SG_SET_FINAL(sge, true);
278 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
279 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
281 static void __attribute__ ((noinline)) __attribute__((hot))
282 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
283 struct qbman_fd *fd, uint16_t bpid)
285 /*Resetting the buffer pool id and offset field*/
286 fd->simple.bpid_offset = 0;
288 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
289 DPAA2_SET_FD_LEN(fd, mbuf->data_len);
290 DPAA2_SET_FD_BPID(fd, bpid);
291 DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
292 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
294 PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
295 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
296 mbuf, mbuf->buf_addr, mbuf->data_off,
297 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
298 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
299 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
300 if (RTE_MBUF_DIRECT(mbuf)) {
301 if (rte_mbuf_refcnt_read(mbuf) > 1) {
302 DPAA2_SET_FD_IVP(fd);
303 rte_mbuf_refcnt_update(mbuf, -1);
308 mi = rte_mbuf_from_indirect(mbuf);
309 if (rte_mbuf_refcnt_read(mi) > 1)
310 DPAA2_SET_FD_IVP(fd);
312 rte_mbuf_refcnt_update(mi, 1);
313 rte_pktmbuf_free(mbuf);
317 static inline int __attribute__((hot))
318 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
319 struct qbman_fd *fd, uint16_t bpid)
324 if (rte_dpaa2_mbuf_alloc_bulk(
325 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
326 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
329 m = (struct rte_mbuf *)mb;
330 memcpy((char *)m->buf_addr + mbuf->data_off,
331 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
334 /* Copy required fields */
335 m->data_off = mbuf->data_off;
336 m->ol_flags = mbuf->ol_flags;
337 m->packet_type = mbuf->packet_type;
338 m->tx_offload = mbuf->tx_offload;
340 /*Resetting the buffer pool id and offset field*/
341 fd->simple.bpid_offset = 0;
343 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
344 DPAA2_SET_FD_LEN(fd, mbuf->data_len);
345 DPAA2_SET_FD_BPID(fd, bpid);
346 DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
347 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
349 PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
350 (void *)mbuf, mbuf->buf_addr);
352 PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
353 DPAA2_GET_FD_ADDR(fd),
354 DPAA2_GET_FD_BPID(fd),
355 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
356 DPAA2_GET_FD_OFFSET(fd),
357 DPAA2_GET_FD_LEN(fd));
363 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
365 /* Function receive frames for a given device and VQ*/
366 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
367 struct qbman_result *dq_storage;
368 uint32_t fqid = dpaa2_q->fqid;
370 uint8_t is_last = 0, status;
371 struct qbman_swp *swp;
372 const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
373 struct qbman_pull_desc pulldesc;
374 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
375 struct rte_eth_dev *dev = dpaa2_q->dev;
377 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
378 ret = dpaa2_affine_qbman_swp();
380 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
384 swp = DPAA2_PER_LCORE_PORTAL;
385 if (!q_storage->active_dqs) {
386 q_storage->toggle = 0;
387 dq_storage = q_storage->dq_storage[q_storage->toggle];
388 qbman_pull_desc_clear(&pulldesc);
389 qbman_pull_desc_set_numframes(&pulldesc,
390 (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
391 DPAA2_DQRR_RING_SIZE : nb_pkts);
392 qbman_pull_desc_set_fq(&pulldesc, fqid);
393 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
394 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
395 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
396 while (!qbman_check_command_complete(
397 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
399 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
402 if (qbman_swp_pull(swp, &pulldesc)) {
403 PMD_RX_LOG(WARNING, "VDQ command is not issued."
405 /* Portal was busy, try again */
410 q_storage->active_dqs = dq_storage;
411 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
412 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
414 dq_storage = q_storage->active_dqs;
415 /* Check if the previous issued command is completed.
416 * Also seems like the SWP is shared between the Ethernet Driver
417 * and the SEC driver.
419 while (!qbman_check_command_complete(dq_storage))
421 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
422 clear_swp_active_dqs(q_storage->active_dpio_id);
424 /* Loop until the dq_storage is updated with
427 while (!qbman_check_new_result(dq_storage))
429 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
430 /* Check whether Last Pull command is Expired and
431 * setting Condition for Loop termination
433 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
435 /* Check for valid frame. */
436 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
437 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
440 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
442 /* Prefetch Annotation address for the parse results */
443 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
444 + DPAA2_FD_PTA_SIZE + 16));
446 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
447 bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
449 bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
450 bufs[num_rx]->port = dev->data->port_id;
452 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
453 rte_vlan_strip(bufs[num_rx]);
459 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
460 while (!qbman_check_command_complete(
461 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
463 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
465 q_storage->toggle ^= 1;
466 dq_storage = q_storage->dq_storage[q_storage->toggle];
467 qbman_pull_desc_clear(&pulldesc);
468 qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
469 qbman_pull_desc_set_fq(&pulldesc, fqid);
470 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
471 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
472 /* Issue a volatile dequeue command. */
474 if (qbman_swp_pull(swp, &pulldesc)) {
475 PMD_RX_LOG(WARNING, "VDQ command is not issued."
481 q_storage->active_dqs = dq_storage;
482 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
483 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
485 dpaa2_q->rx_pkts += num_rx;
487 /* Return the total number of packets received to DPAA2 app */
491 void __attribute__((hot))
492 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
493 const struct qbman_fd *fd,
494 const struct qbman_result *dq,
495 struct dpaa2_queue *rxq,
496 struct rte_event *ev)
498 ev->mbuf = eth_fd_to_mbuf(fd);
500 ev->flow_id = rxq->ev.flow_id;
501 ev->sub_event_type = rxq->ev.sub_event_type;
502 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
503 ev->op = RTE_EVENT_OP_NEW;
504 ev->sched_type = rxq->ev.sched_type;
505 ev->queue_id = rxq->ev.queue_id;
506 ev->priority = rxq->ev.priority;
508 qbman_swp_dqrr_consume(swp, dq);
512 * Callback to handle sending packets through WRIOP based interface
515 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
517 /* Function to transmit the frames to given device and VQ*/
518 uint32_t loop, retry_count;
520 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
522 uint32_t frames_to_send;
523 struct rte_mempool *mp;
524 struct qbman_eq_desc eqdesc;
525 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
526 struct qbman_swp *swp;
529 struct rte_eth_dev *dev = dpaa2_q->dev;
530 struct dpaa2_dev_priv *priv = dev->data->dev_private;
532 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
533 ret = dpaa2_affine_qbman_swp();
535 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
539 swp = DPAA2_PER_LCORE_PORTAL;
541 PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
543 /*Prepare enqueue descriptor*/
544 qbman_eq_desc_clear(&eqdesc);
545 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
546 qbman_eq_desc_set_response(&eqdesc, 0, 0);
547 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
548 dpaa2_q->flow_id, dpaa2_q->tc_index);
550 /*Clear the unused FD fields before sending*/
552 /*Check if the queue is congested*/
554 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
556 /* Retry for some time before giving up */
557 if (retry_count > CONG_RETRY_COUNT)
561 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
563 for (loop = 0; loop < frames_to_send; loop++) {
564 fd_arr[loop].simple.frc = 0;
565 DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
566 DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
567 if (RTE_MBUF_DIRECT(*bufs)) {
570 mi = rte_mbuf_from_indirect(*bufs);
573 /* Not a hw_pkt pool allocated frame */
574 if (unlikely(!mp || !priv->bp_list)) {
575 PMD_TX_LOG(ERR, "err: no bpool attached");
579 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
580 PMD_TX_LOG(ERR, "non hw offload bufffer ");
581 /* alloc should be from the default buffer pool
582 * attached to this interface
584 bpid = priv->bp_list->buf_pool.bpid;
586 if (unlikely((*bufs)->nb_segs > 1)) {
587 PMD_TX_LOG(ERR, "S/G support not added"
588 " for non hw offload buffer");
591 if (eth_copy_mbuf_to_fd(*bufs,
592 &fd_arr[loop], bpid)) {
595 /* free the original packet */
596 rte_pktmbuf_free(*bufs);
598 bpid = mempool_to_bpid(mp);
599 if (unlikely((*bufs)->nb_segs > 1)) {
600 if (eth_mbuf_to_sg_fd(*bufs,
601 &fd_arr[loop], bpid))
604 eth_mbuf_to_fd(*bufs,
605 &fd_arr[loop], bpid);
611 while (loop < frames_to_send) {
612 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
613 &fd_arr[loop], frames_to_send - loop);
616 num_tx += frames_to_send;
617 dpaa2_q->tx_pkts += frames_to_send;
618 nb_pkts -= frames_to_send;
623 /* send any already prepared fd */
628 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
629 &fd_arr[i], loop - i);
632 dpaa2_q->tx_pkts += loop;
639 * Dummy DPDK callback for TX.
641 * This function is used to temporarily replace the real callback during
642 * unsafe control operations on the queue, or in case of error.
645 * Generic pointer to TX queue structure.
647 * Packets to transmit.
649 * Number of packets in array.
652 * Number of packets successfully transmitted (<= pkts_n).
655 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)