1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
17 #include <rte_hexdump.h>
19 #include <rte_fslmc.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31 struct dpaa2_annot_hdr *annotation);
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
38 return RTE_MBUF_DYNFIELD(mbuf,
39 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
43 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47 DPAA2_SET_FD_FRC(_fd, 0); \
48 DPAA2_RESET_FD_CTRL(_fd); \
49 DPAA2_RESET_FD_FLC(_fd); \
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
56 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57 struct dpaa2_annot_hdr *annotation =
58 (struct dpaa2_annot_hdr *)hw_annot_addr;
60 m->packet_type = RTE_PTYPE_UNKNOWN;
62 case DPAA2_PKT_TYPE_ETHER:
63 m->packet_type = RTE_PTYPE_L2_ETHER;
65 case DPAA2_PKT_TYPE_IPV4:
66 m->packet_type = RTE_PTYPE_L2_ETHER |
69 case DPAA2_PKT_TYPE_IPV6:
70 m->packet_type = RTE_PTYPE_L2_ETHER |
73 case DPAA2_PKT_TYPE_IPV4_EXT:
74 m->packet_type = RTE_PTYPE_L2_ETHER |
75 RTE_PTYPE_L3_IPV4_EXT;
77 case DPAA2_PKT_TYPE_IPV6_EXT:
78 m->packet_type = RTE_PTYPE_L2_ETHER |
79 RTE_PTYPE_L3_IPV6_EXT;
81 case DPAA2_PKT_TYPE_IPV4_TCP:
82 m->packet_type = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
85 case DPAA2_PKT_TYPE_IPV6_TCP:
86 m->packet_type = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
89 case DPAA2_PKT_TYPE_IPV4_UDP:
90 m->packet_type = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
93 case DPAA2_PKT_TYPE_IPV6_UDP:
94 m->packet_type = RTE_PTYPE_L2_ETHER |
95 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
97 case DPAA2_PKT_TYPE_IPV4_SCTP:
98 m->packet_type = RTE_PTYPE_L2_ETHER |
99 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
101 case DPAA2_PKT_TYPE_IPV6_SCTP:
102 m->packet_type = RTE_PTYPE_L2_ETHER |
103 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
105 case DPAA2_PKT_TYPE_IPV4_ICMP:
106 m->packet_type = RTE_PTYPE_L2_ETHER |
107 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
109 case DPAA2_PKT_TYPE_IPV6_ICMP:
110 m->packet_type = RTE_PTYPE_L2_ETHER |
111 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
114 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
116 m->hash.rss = fd->simple.flc_hi;
117 m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
119 if (dpaa2_enable_ts[m->port]) {
120 *dpaa2_timestamp_dynfield(m) = annotation->word2;
121 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123 *dpaa2_timestamp_dynfield(m));
126 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127 "ol_flags =0x%" PRIx64 "",
128 frc, m->packet_type, m->ol_flags);
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133 struct dpaa2_annot_hdr *annotation)
135 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
138 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139 "(4)=0x%" PRIx64 "\t",
140 annotation->word3, annotation->word4);
142 #if defined(RTE_LIBRTE_IEEE1588)
143 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
144 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
145 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
149 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
150 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
151 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
152 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
153 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
154 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
155 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
156 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
157 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
158 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
159 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
160 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
163 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
164 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
166 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
167 pkt_type |= RTE_PTYPE_L2_ETHER;
172 if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
174 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
176 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
177 L3_IPV4_N_PRESENT)) {
178 pkt_type |= RTE_PTYPE_L3_IPV4;
179 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180 L3_IP_N_OPT_PRESENT))
181 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
183 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
184 L3_IPV6_N_PRESENT)) {
185 pkt_type |= RTE_PTYPE_L3_IPV6;
186 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
187 L3_IP_N_OPT_PRESENT))
188 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
193 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
194 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
195 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
196 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
198 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
199 L3_IP_1_MORE_FRAGMENT |
200 L3_IP_N_FIRST_FRAGMENT |
201 L3_IP_N_MORE_FRAGMENT)) {
202 pkt_type |= RTE_PTYPE_L4_FRAG;
205 pkt_type |= RTE_PTYPE_L4_NONFRAG;
208 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
209 pkt_type |= RTE_PTYPE_L4_UDP;
211 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
212 pkt_type |= RTE_PTYPE_L4_TCP;
214 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
215 pkt_type |= RTE_PTYPE_L4_SCTP;
217 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
218 pkt_type |= RTE_PTYPE_L4_ICMP;
220 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
221 pkt_type |= RTE_PTYPE_UNKNOWN;
227 static inline uint32_t __rte_hot
228 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
230 struct dpaa2_annot_hdr *annotation =
231 (struct dpaa2_annot_hdr *)hw_annot_addr;
233 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
236 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
237 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
238 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
239 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
241 if (dpaa2_enable_ts[mbuf->port]) {
242 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
243 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
244 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
245 *dpaa2_timestamp_dynfield(mbuf));
248 /* Check detailed parsing requirement */
249 if (annotation->word3 & 0x7FFFFC3FFFF)
250 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
252 /* Return some common types from parse processing */
253 switch (annotation->word4) {
255 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
257 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
258 case DPAA2_L3_IPv4_TCP:
259 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
261 case DPAA2_L3_IPv4_UDP:
262 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
264 case DPAA2_L3_IPv6_TCP:
265 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
267 case DPAA2_L3_IPv6_UDP:
268 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
274 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
277 static inline struct rte_mbuf *__rte_hot
278 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
281 struct qbman_sge *sgt, *sge;
282 size_t sg_addr, fd_addr;
285 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
287 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
288 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
290 /* Get Scatter gather table address */
291 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
294 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
296 /* First Scatter gather entry */
297 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
298 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
299 /* Prepare all the metadata for first segment */
300 first_seg->buf_addr = (uint8_t *)sg_addr;
301 first_seg->ol_flags = 0;
302 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
303 first_seg->data_len = sge->length & 0x1FFFF;
304 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
305 first_seg->nb_segs = 1;
306 first_seg->next = NULL;
307 first_seg->port = port_id;
308 if (dpaa2_svr_family == SVR_LX2160A)
309 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
311 first_seg->packet_type =
312 dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
314 rte_mbuf_refcnt_set(first_seg, 1);
316 while (!DPAA2_SG_IS_FINAL(sge)) {
318 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
319 DPAA2_GET_FLE_ADDR(sge));
320 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
321 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
322 next_seg->buf_addr = (uint8_t *)sg_addr;
323 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
324 next_seg->data_len = sge->length & 0x1FFFF;
325 first_seg->nb_segs += 1;
326 rte_mbuf_refcnt_set(next_seg, 1);
327 cur_seg->next = next_seg;
328 next_seg->next = NULL;
331 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
332 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
333 rte_mbuf_refcnt_set(temp, 1);
334 rte_pktmbuf_free_seg(temp);
336 return (void *)first_seg;
339 static inline struct rte_mbuf *__rte_hot
340 eth_fd_to_mbuf(const struct qbman_fd *fd,
343 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
344 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
345 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
346 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
348 /* need to repopulated some of the fields,
349 * as they may have changed in last transmission
353 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
354 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
355 mbuf->pkt_len = mbuf->data_len;
356 mbuf->port = port_id;
358 rte_mbuf_refcnt_set(mbuf, 1);
360 /* Parse the packet */
361 /* parse results for LX2 are there in FRC field of FD.
362 * For other DPAA2 platforms , parse results are after
363 * the private - sw annotation area
366 if (dpaa2_svr_family == SVR_LX2160A)
367 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
369 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
371 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
372 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
373 mbuf, mbuf->buf_addr, mbuf->data_off,
374 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
375 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
376 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
381 static int __rte_noinline __rte_hot
382 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
384 struct rte_mempool *mp, uint16_t bpid)
386 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
387 struct qbman_sge *sgt, *sge = NULL;
390 #ifdef RTE_LIBRTE_IEEE1588
391 /* annotation area for timestamp in first buffer */
394 if (RTE_MBUF_DIRECT(mbuf) &&
395 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
398 if (rte_mbuf_refcnt_read(temp) > 1) {
399 /* If refcnt > 1, invalid bpid is set to ensure
400 * buffer is not freed by HW
402 fd->simple.bpid_offset = 0;
403 DPAA2_SET_FD_IVP(fd);
404 rte_mbuf_refcnt_update(temp, -1);
406 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
408 DPAA2_SET_FD_OFFSET(fd, offset);
410 temp = rte_pktmbuf_alloc(mp);
412 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
415 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
416 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
418 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
419 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
420 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
421 DPAA2_RESET_FD_FRC(fd);
422 DPAA2_RESET_FD_CTRL(fd);
423 DPAA2_RESET_FD_FLC(fd);
424 /*Set Scatter gather table and Scatter gather entries*/
425 sgt = (struct qbman_sge *)(
426 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
427 + DPAA2_GET_FD_OFFSET(fd));
429 for (i = 0; i < mbuf->nb_segs; i++) {
431 /*Resetting the buffer pool id and offset field*/
432 sge->fin_bpid_offset = 0;
433 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
434 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
435 sge->length = cur_seg->data_len;
436 if (RTE_MBUF_DIRECT(cur_seg)) {
437 /* if we are using inline SGT in same buffers
438 * set the FLE FMT as Frame Data Section
440 if (temp == cur_seg) {
441 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
442 DPAA2_SET_FLE_IVP(sge);
444 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
445 /* If refcnt > 1, invalid bpid is set to ensure
446 * buffer is not freed by HW
448 DPAA2_SET_FLE_IVP(sge);
449 rte_mbuf_refcnt_update(cur_seg, -1);
451 DPAA2_SET_FLE_BPID(sge,
452 mempool_to_bpid(cur_seg->pool));
455 cur_seg = cur_seg->next;
456 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
457 DPAA2_SET_FLE_IVP(sge);
458 cur_seg = cur_seg->next;
460 /* Get owner MBUF from indirect buffer */
461 mi = rte_mbuf_from_indirect(cur_seg);
462 if (rte_mbuf_refcnt_read(mi) > 1) {
463 /* If refcnt > 1, invalid bpid is set to ensure
464 * owner buffer is not freed by HW
466 DPAA2_SET_FLE_IVP(sge);
468 DPAA2_SET_FLE_BPID(sge,
469 mempool_to_bpid(mi->pool));
470 rte_mbuf_refcnt_update(mi, 1);
473 cur_seg = cur_seg->next;
474 prev_seg->next = NULL;
475 rte_pktmbuf_free(prev_seg);
478 DPAA2_SG_SET_FINAL(sge, true);
483 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
484 struct qbman_fd *fd, uint16_t bpid) __rte_unused;
486 static void __rte_noinline __rte_hot
487 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
488 struct qbman_fd *fd, uint16_t bpid)
490 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
492 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
493 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
494 mbuf, mbuf->buf_addr, mbuf->data_off,
495 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
496 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
497 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
498 if (RTE_MBUF_DIRECT(mbuf)) {
499 if (rte_mbuf_refcnt_read(mbuf) > 1) {
500 DPAA2_SET_FD_IVP(fd);
501 rte_mbuf_refcnt_update(mbuf, -1);
503 } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
504 DPAA2_SET_FD_IVP(fd);
508 mi = rte_mbuf_from_indirect(mbuf);
509 if (rte_mbuf_refcnt_read(mi) > 1)
510 DPAA2_SET_FD_IVP(fd);
512 rte_mbuf_refcnt_update(mi, 1);
513 rte_pktmbuf_free(mbuf);
517 static inline int __rte_hot
518 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
519 struct qbman_fd *fd, uint16_t bpid)
524 if (rte_dpaa2_mbuf_alloc_bulk(
525 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
526 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
529 m = (struct rte_mbuf *)mb;
530 memcpy((char *)m->buf_addr + mbuf->data_off,
531 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
534 /* Copy required fields */
535 m->data_off = mbuf->data_off;
536 m->ol_flags = mbuf->ol_flags;
537 m->packet_type = mbuf->packet_type;
538 m->tx_offload = mbuf->tx_offload;
540 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
543 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
544 " meta: %d, off: %d, len: %d\n",
547 DPAA2_GET_FD_ADDR(fd),
548 DPAA2_GET_FD_BPID(fd),
549 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
550 DPAA2_GET_FD_OFFSET(fd),
551 DPAA2_GET_FD_LEN(fd));
557 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
559 /* Function receive frames for a given device and VQ */
560 struct qbman_result *dq_storage;
561 uint32_t fqid = dpaa2_q->fqid;
562 int ret, num_rx = 0, num_pulled;
563 uint8_t pending, status;
564 struct qbman_swp *swp;
565 const struct qbman_fd *fd;
566 struct qbman_pull_desc pulldesc;
567 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
568 uint32_t lcore_id = rte_lcore_id();
569 void *v_addr, *hw_annot_addr;
570 struct dpaa2_fas *fas;
572 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
573 ret = dpaa2_affine_qbman_swp();
575 DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
580 swp = DPAA2_PER_LCORE_PORTAL;
582 dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
583 qbman_pull_desc_clear(&pulldesc);
584 qbman_pull_desc_set_fq(&pulldesc, fqid);
585 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
586 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
587 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
590 if (qbman_swp_pull(swp, &pulldesc)) {
591 DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
592 /* Portal was busy, try again */
598 /* Check if the previous issued command is completed. */
599 while (!qbman_check_command_complete(dq_storage))
605 /* Loop until the dq_storage is updated with
608 while (!qbman_check_new_result(dq_storage))
611 /* Check whether Last Pull command is Expired and
612 * setting Condition for Loop termination
614 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
616 /* Check for valid frame. */
617 status = qbman_result_DQ_flags(dq_storage);
618 if (unlikely((status &
619 QBMAN_DQ_STAT_VALIDFRAME) == 0))
622 fd = qbman_result_DQ_fd(dq_storage);
623 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
624 hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
627 DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
628 " fd_off: %d, fd_err: %x, fas_status: %x",
629 rte_lcore_id(), eth_data->port_id,
630 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
632 rte_hexdump(stderr, "Error packet", v_addr,
633 DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
640 dpaa2_q->err_pkts += num_rx;
643 /* This function assumes that caller will be keep the same value for nb_pkts
644 * across calls per queue, if that is not the case, better use non-prefetch
645 * version of rx call.
646 * It will return the packets as requested in previous call without honoring
647 * the current nb_pkts or bufs space.
650 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
652 /* Function receive frames for a given device and VQ*/
653 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
654 struct qbman_result *dq_storage, *dq_storage1 = NULL;
655 uint32_t fqid = dpaa2_q->fqid;
656 int ret, num_rx = 0, pull_size;
657 uint8_t pending, status;
658 struct qbman_swp *swp;
659 const struct qbman_fd *fd;
660 struct qbman_pull_desc pulldesc;
661 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
662 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
663 struct dpaa2_dev_priv *priv = eth_data->dev_private;
665 if (unlikely(dpaa2_enable_err_queue))
666 dump_err_pkts(priv->rx_err_vq);
668 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
669 ret = dpaa2_affine_qbman_ethrx_swp();
671 DPAA2_PMD_ERR("Failure in affining portal");
676 if (unlikely(!rte_dpaa2_bpid_info &&
677 rte_eal_process_type() == RTE_PROC_SECONDARY))
678 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
680 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
681 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
682 if (unlikely(!q_storage->active_dqs)) {
683 q_storage->toggle = 0;
684 dq_storage = q_storage->dq_storage[q_storage->toggle];
685 q_storage->last_num_pkts = pull_size;
686 qbman_pull_desc_clear(&pulldesc);
687 qbman_pull_desc_set_numframes(&pulldesc,
688 q_storage->last_num_pkts);
689 qbman_pull_desc_set_fq(&pulldesc, fqid);
690 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
691 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
692 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
693 while (!qbman_check_command_complete(
695 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
697 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
700 if (qbman_swp_pull(swp, &pulldesc)) {
701 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
702 " QBMAN is busy (1)\n");
703 /* Portal was busy, try again */
708 q_storage->active_dqs = dq_storage;
709 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
710 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
714 dq_storage = q_storage->active_dqs;
715 rte_prefetch0((void *)(size_t)(dq_storage));
716 rte_prefetch0((void *)(size_t)(dq_storage + 1));
718 /* Prepare next pull descriptor. This will give space for the
719 * prefetching done on DQRR entries
721 q_storage->toggle ^= 1;
722 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
723 qbman_pull_desc_clear(&pulldesc);
724 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
725 qbman_pull_desc_set_fq(&pulldesc, fqid);
726 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
727 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
729 /* Check if the previous issued command is completed.
730 * Also seems like the SWP is shared between the Ethernet Driver
731 * and the SEC driver.
733 while (!qbman_check_command_complete(dq_storage))
735 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
736 clear_swp_active_dqs(q_storage->active_dpio_id);
741 /* Loop until the dq_storage is updated with
744 while (!qbman_check_new_result(dq_storage))
746 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
747 /* Check whether Last Pull command is Expired and
748 * setting Condition for Loop termination
750 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
752 /* Check for valid frame. */
753 status = qbman_result_DQ_flags(dq_storage);
754 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
757 fd = qbman_result_DQ_fd(dq_storage);
759 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
760 if (dpaa2_svr_family != SVR_LX2160A) {
761 const struct qbman_fd *next_fd =
762 qbman_result_DQ_fd(dq_storage + 1);
763 /* Prefetch Annotation address for the parse results */
764 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
765 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
769 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
770 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
772 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
773 #if defined(RTE_LIBRTE_IEEE1588)
774 if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
776 *dpaa2_timestamp_dynfield(bufs[num_rx]);
780 if (eth_data->dev_conf.rxmode.offloads &
781 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
782 rte_vlan_strip(bufs[num_rx]);
788 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
789 while (!qbman_check_command_complete(
790 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
792 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
794 /* issue a volatile dequeue command for next pull */
796 if (qbman_swp_pull(swp, &pulldesc)) {
797 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
798 "QBMAN is busy (2)\n");
803 q_storage->active_dqs = dq_storage1;
804 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
805 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
807 dpaa2_q->rx_pkts += num_rx;
813 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
814 const struct qbman_fd *fd,
815 const struct qbman_result *dq,
816 struct dpaa2_queue *rxq,
817 struct rte_event *ev)
819 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
820 DPAA2_FD_PTA_SIZE + 16));
822 ev->flow_id = rxq->ev.flow_id;
823 ev->sub_event_type = rxq->ev.sub_event_type;
824 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
825 ev->op = RTE_EVENT_OP_NEW;
826 ev->sched_type = rxq->ev.sched_type;
827 ev->queue_id = rxq->ev.queue_id;
828 ev->priority = rxq->ev.priority;
830 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
832 qbman_swp_dqrr_consume(swp, dq);
836 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
837 const struct qbman_fd *fd,
838 const struct qbman_result *dq,
839 struct dpaa2_queue *rxq,
840 struct rte_event *ev)
844 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
845 DPAA2_FD_PTA_SIZE + 16));
847 ev->flow_id = rxq->ev.flow_id;
848 ev->sub_event_type = rxq->ev.sub_event_type;
849 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
850 ev->op = RTE_EVENT_OP_NEW;
851 ev->sched_type = rxq->ev.sched_type;
852 ev->queue_id = rxq->ev.queue_id;
853 ev->priority = rxq->ev.priority;
855 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
857 dqrr_index = qbman_get_dqrr_idx(dq);
858 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
859 DPAA2_PER_LCORE_DQRR_SIZE++;
860 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
861 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
865 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
866 const struct qbman_fd *fd,
867 const struct qbman_result *dq,
868 struct dpaa2_queue *rxq,
869 struct rte_event *ev)
871 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
872 DPAA2_FD_PTA_SIZE + 16));
874 ev->flow_id = rxq->ev.flow_id;
875 ev->sub_event_type = rxq->ev.sub_event_type;
876 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
877 ev->op = RTE_EVENT_OP_NEW;
878 ev->sched_type = rxq->ev.sched_type;
879 ev->queue_id = rxq->ev.queue_id;
880 ev->priority = rxq->ev.priority;
882 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
884 *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
885 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
886 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
888 qbman_swp_dqrr_consume(swp, dq);
892 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
894 /* Function receive frames for a given device and VQ */
895 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
896 struct qbman_result *dq_storage;
897 uint32_t fqid = dpaa2_q->fqid;
898 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
899 uint8_t pending, status;
900 struct qbman_swp *swp;
901 const struct qbman_fd *fd;
902 struct qbman_pull_desc pulldesc;
903 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
904 struct dpaa2_dev_priv *priv = eth_data->dev_private;
906 if (unlikely(dpaa2_enable_err_queue))
907 dump_err_pkts(priv->rx_err_vq);
909 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
910 ret = dpaa2_affine_qbman_swp();
913 "Failed to allocate IO portal, tid: %d\n",
918 swp = DPAA2_PER_LCORE_PORTAL;
921 dq_storage = dpaa2_q->q_storage->dq_storage[0];
922 qbman_pull_desc_clear(&pulldesc);
923 qbman_pull_desc_set_fq(&pulldesc, fqid);
924 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
925 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
927 if (next_pull > dpaa2_dqrr_size) {
928 qbman_pull_desc_set_numframes(&pulldesc,
930 next_pull -= dpaa2_dqrr_size;
932 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
937 if (qbman_swp_pull(swp, &pulldesc)) {
939 "VDQ command is not issued.QBMAN is busy\n");
940 /* Portal was busy, try again */
946 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
947 /* Check if the previous issued command is completed. */
948 while (!qbman_check_command_complete(dq_storage))
954 /* Loop until the dq_storage is updated with
957 while (!qbman_check_new_result(dq_storage))
959 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
960 /* Check whether Last Pull command is Expired and
961 * setting Condition for Loop termination
963 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
965 /* Check for valid frame. */
966 status = qbman_result_DQ_flags(dq_storage);
967 if (unlikely((status &
968 QBMAN_DQ_STAT_VALIDFRAME) == 0))
971 fd = qbman_result_DQ_fd(dq_storage);
973 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
974 if (dpaa2_svr_family != SVR_LX2160A) {
975 const struct qbman_fd *next_fd =
976 qbman_result_DQ_fd(dq_storage + 1);
978 /* Prefetch Annotation address for the parse
981 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
982 DPAA2_GET_FD_ADDR(next_fd) +
983 DPAA2_FD_PTA_SIZE + 16)));
987 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
988 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
991 bufs[num_rx] = eth_fd_to_mbuf(fd,
994 #if defined(RTE_LIBRTE_IEEE1588)
995 if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
997 *dpaa2_timestamp_dynfield(bufs[num_rx]);
1001 if (eth_data->dev_conf.rxmode.offloads &
1002 RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1003 rte_vlan_strip(bufs[num_rx]);
1010 /* Last VDQ provided all packets and more packets are requested */
1011 } while (next_pull && num_pulled == dpaa2_dqrr_size);
1013 dpaa2_q->rx_pkts += num_rx;
1018 uint16_t dpaa2_dev_tx_conf(void *queue)
1020 /* Function receive frames for a given device and VQ */
1021 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1022 struct qbman_result *dq_storage;
1023 uint32_t fqid = dpaa2_q->fqid;
1024 int ret, num_tx_conf = 0, num_pulled;
1025 uint8_t pending, status;
1026 struct qbman_swp *swp;
1027 const struct qbman_fd *fd, *next_fd;
1028 struct qbman_pull_desc pulldesc;
1029 struct qbman_release_desc releasedesc;
1032 #if defined(RTE_LIBRTE_IEEE1588)
1033 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1034 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1035 struct dpaa2_annot_hdr *annotation;
1037 struct rte_mbuf *mbuf;
1040 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1041 ret = dpaa2_affine_qbman_swp();
1044 "Failed to allocate IO portal, tid: %d\n",
1049 swp = DPAA2_PER_LCORE_PORTAL;
1052 dq_storage = dpaa2_q->q_storage->dq_storage[0];
1053 qbman_pull_desc_clear(&pulldesc);
1054 qbman_pull_desc_set_fq(&pulldesc, fqid);
1055 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1056 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1058 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1061 if (qbman_swp_pull(swp, &pulldesc)) {
1062 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1064 /* Portal was busy, try again */
1070 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1071 /* Check if the previous issued command is completed. */
1072 while (!qbman_check_command_complete(dq_storage))
1078 /* Loop until the dq_storage is updated with
1079 * new token by QBMAN
1081 while (!qbman_check_new_result(dq_storage))
1083 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1084 /* Check whether Last Pull command is Expired and
1085 * setting Condition for Loop termination
1087 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1089 /* Check for valid frame. */
1090 status = qbman_result_DQ_flags(dq_storage);
1091 if (unlikely((status &
1092 QBMAN_DQ_STAT_VALIDFRAME) == 0))
1095 fd = qbman_result_DQ_fd(dq_storage);
1097 next_fd = qbman_result_DQ_fd(dq_storage + 1);
1098 /* Prefetch Annotation address for the parse results */
1099 rte_prefetch0((void *)(size_t)
1100 (DPAA2_GET_FD_ADDR(next_fd) +
1101 DPAA2_FD_PTA_SIZE + 16));
1103 bpid = DPAA2_GET_FD_BPID(fd);
1105 /* Create a release descriptor required for releasing
1106 * buffers into QBMAN
1108 qbman_release_desc_clear(&releasedesc);
1109 qbman_release_desc_set_bpid(&releasedesc, bpid);
1111 buf = DPAA2_GET_FD_ADDR(fd);
1112 /* feed them to bman */
1114 ret = qbman_swp_release(swp, &releasedesc,
1116 } while (ret == -EBUSY);
1121 #if defined(RTE_LIBRTE_IEEE1588)
1122 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1123 mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
1124 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1126 if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
1127 annotation = (struct dpaa2_annot_hdr *)((size_t)
1128 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1130 priv->tx_timestamp = annotation->word2;
1135 /* Last VDQ provided all packets and more packets are requested */
1136 } while (num_pulled == dpaa2_dqrr_size);
1138 dpaa2_q->rx_pkts += num_tx_conf;
1143 /* Configure the egress frame annotation for timestamp update */
1144 static void enable_tx_tstamp(struct qbman_fd *fd)
1146 struct dpaa2_faead *fd_faead;
1148 /* Set frame annotation status field as valid */
1149 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1151 /* Set frame annotation egress action descriptor as valid */
1152 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1154 /* Set Annotation Length as 128B */
1155 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1157 /* enable update of confirmation frame annotation */
1158 fd_faead = (struct dpaa2_faead *)((size_t)
1159 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1160 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1161 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1162 DPAA2_ANNOT_FAEAD_UPD;
1166 * Callback to handle sending packets through WRIOP based interface
1169 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1171 /* Function to transmit the frames to given device and VQ*/
1172 uint32_t loop, retry_count;
1174 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1175 struct rte_mbuf *mi;
1176 uint32_t frames_to_send;
1177 struct rte_mempool *mp;
1178 struct qbman_eq_desc eqdesc;
1179 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1180 struct qbman_swp *swp;
1181 uint16_t num_tx = 0;
1183 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1184 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1185 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1186 struct rte_mbuf **orig_bufs = bufs;
1188 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1189 ret = dpaa2_affine_qbman_swp();
1192 "Failed to allocate IO portal, tid: %d\n",
1197 swp = DPAA2_PER_LCORE_PORTAL;
1199 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1200 eth_data, dpaa2_q->fqid);
1202 #ifdef RTE_LIBRTE_IEEE1588
1203 /* IEEE1588 driver need pointer to tx confirmation queue
1204 * corresponding to last packet transmitted for reading
1207 if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
1208 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1209 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1210 priv->tx_timestamp = 0;
1214 /*Prepare enqueue descriptor*/
1215 qbman_eq_desc_clear(&eqdesc);
1216 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1217 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1219 /*Clear the unused FD fields before sending*/
1221 /*Check if the queue is congested*/
1223 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1225 /* Retry for some time before giving up */
1226 if (retry_count > CONG_RETRY_COUNT)
1230 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1231 dpaa2_eqcr_size : nb_pkts;
1233 for (loop = 0; loop < frames_to_send; loop++) {
1234 if (*dpaa2_seqn(*bufs)) {
1235 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1237 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1239 DPAA2_PER_LCORE_DQRR_SIZE--;
1240 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1241 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1244 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1246 /* Check the basic scenario and set
1247 * the FD appropriately here itself.
1249 if (likely(mp && mp->ops_index ==
1250 priv->bp_list->dpaa2_ops_index &&
1251 (*bufs)->nb_segs == 1 &&
1252 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1253 if (unlikely(((*bufs)->ol_flags
1254 & RTE_MBUF_F_TX_VLAN) ||
1255 (eth_data->dev_conf.txmode.offloads
1256 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1257 ret = rte_vlan_insert(bufs);
1261 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1262 &fd_arr[loop], mempool_to_bpid(mp));
1264 #ifdef RTE_LIBRTE_IEEE1588
1265 enable_tx_tstamp(&fd_arr[loop]);
1270 mi = rte_mbuf_from_indirect(*bufs);
1274 if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1275 if (unlikely((*bufs)->nb_segs > 1)) {
1276 if (eth_mbuf_to_sg_fd(*bufs,
1281 eth_mbuf_to_fd(*bufs,
1285 #ifdef RTE_LIBRTE_IEEE1588
1286 enable_tx_tstamp(&fd_arr[loop]);
1291 /* Not a hw_pkt pool allocated frame */
1292 if (unlikely(!mp || !priv->bp_list)) {
1293 DPAA2_PMD_ERR("Err: No buffer pool attached");
1297 if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
1298 (eth_data->dev_conf.txmode.offloads
1299 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1300 int ret = rte_vlan_insert(bufs);
1304 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1305 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1306 /* alloc should be from the default buffer pool
1307 * attached to this interface
1309 bpid = priv->bp_list->buf_pool.bpid;
1311 if (unlikely((*bufs)->nb_segs > 1)) {
1312 DPAA2_PMD_ERR("S/G support not added"
1313 " for non hw offload buffer");
1316 if (eth_copy_mbuf_to_fd(*bufs,
1317 &fd_arr[loop], bpid)) {
1320 /* free the original packet */
1321 rte_pktmbuf_free(*bufs);
1323 bpid = mempool_to_bpid(mp);
1324 if (unlikely((*bufs)->nb_segs > 1)) {
1325 if (eth_mbuf_to_sg_fd(*bufs,
1330 eth_mbuf_to_fd(*bufs,
1331 &fd_arr[loop], bpid);
1334 #ifdef RTE_LIBRTE_IEEE1588
1335 enable_tx_tstamp(&fd_arr[loop]);
1342 while (loop < frames_to_send) {
1343 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1344 &fd_arr[loop], &flags[loop],
1345 frames_to_send - loop);
1346 if (unlikely(ret < 0)) {
1348 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1362 dpaa2_q->tx_pkts += num_tx;
1365 while (loop < num_tx) {
1366 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1367 rte_pktmbuf_free(*orig_bufs);
1375 /* send any already prepared fd */
1381 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1385 if (unlikely(ret < 0)) {
1387 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1397 dpaa2_q->tx_pkts += num_tx;
1400 while (loop < num_tx) {
1401 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1402 rte_pktmbuf_free(*orig_bufs);
1411 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1413 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1414 struct qbman_fd *fd;
1417 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1419 /* Setting port id does not matter as we are to free the mbuf */
1420 m = eth_fd_to_mbuf(fd, 0);
1421 rte_pktmbuf_free(m);
1425 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1427 struct qbman_eq_desc *eqdesc)
1429 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1430 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1431 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1432 struct eqresp_metadata *eqresp_meta;
1433 uint16_t orpid, seqnum;
1436 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1438 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1439 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1440 DPAA2_EQCR_OPRID_SHIFT;
1441 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1442 DPAA2_EQCR_SEQNUM_SHIFT;
1444 if (!priv->en_loose_ordered) {
1445 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1446 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1447 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1448 dpio_dev->eqresp_pi]), 1);
1449 qbman_eq_desc_set_token(eqdesc, 1);
1451 eqresp_meta = &dpio_dev->eqresp_meta[
1452 dpio_dev->eqresp_pi];
1453 eqresp_meta->dpaa2_q = dpaa2_q;
1454 eqresp_meta->mp = m->pool;
1456 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1457 dpio_dev->eqresp_pi++ :
1458 (dpio_dev->eqresp_pi = 0);
1460 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1463 dq_idx = *dpaa2_seqn(m) - 1;
1464 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1465 DPAA2_PER_LCORE_DQRR_SIZE--;
1466 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1468 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1472 dpaa2_dev_tx_multi_txq_ordered(void **queue,
1473 struct rte_mbuf **bufs, uint16_t nb_pkts)
1475 /* Function to transmit the frames to multiple queues respectively.*/
1476 uint32_t loop, retry_count;
1478 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1479 uint32_t frames_to_send;
1480 struct rte_mempool *mp;
1481 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1482 struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1483 struct qbman_swp *swp;
1485 struct rte_mbuf *mi;
1486 struct rte_eth_dev_data *eth_data;
1487 struct dpaa2_dev_priv *priv;
1488 struct dpaa2_queue *order_sendq;
1490 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1491 ret = dpaa2_affine_qbman_swp();
1494 "Failed to allocate IO portal, tid: %d\n",
1499 swp = DPAA2_PER_LCORE_PORTAL;
1501 for (loop = 0; loop < nb_pkts; loop++) {
1502 dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1503 eth_data = dpaa2_q[loop]->eth_data;
1504 priv = eth_data->dev_private;
1505 qbman_eq_desc_clear(&eqdesc[loop]);
1506 if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1507 order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1508 dpaa2_set_enqueue_descriptor(order_sendq,
1512 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1513 DPAA2_EQ_RESP_ERR_FQ);
1514 qbman_eq_desc_set_fq(&eqdesc[loop],
1515 dpaa2_q[loop]->fqid);
1519 while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1521 /* Retry for some time before giving up */
1522 if (retry_count > CONG_RETRY_COUNT)
1526 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1528 /* Check the basic scenario and set
1529 * the FD appropriately here itself.
1531 if (likely(mp && mp->ops_index ==
1532 priv->bp_list->dpaa2_ops_index &&
1533 (*bufs)->nb_segs == 1 &&
1534 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1535 if (unlikely((*bufs)->ol_flags
1536 & RTE_MBUF_F_TX_VLAN)) {
1537 ret = rte_vlan_insert(bufs);
1541 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1543 mempool_to_bpid(mp));
1549 mi = rte_mbuf_from_indirect(*bufs);
1552 /* Not a hw_pkt pool allocated frame */
1553 if (unlikely(!mp || !priv->bp_list)) {
1554 DPAA2_PMD_ERR("Err: No buffer pool attached");
1558 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1559 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1560 /* alloc should be from the default buffer pool
1561 * attached to this interface
1563 bpid = priv->bp_list->buf_pool.bpid;
1565 if (unlikely((*bufs)->nb_segs > 1)) {
1567 "S/G not supp for non hw offload buffer");
1570 if (eth_copy_mbuf_to_fd(*bufs,
1571 &fd_arr[loop], bpid)) {
1574 /* free the original packet */
1575 rte_pktmbuf_free(*bufs);
1577 bpid = mempool_to_bpid(mp);
1578 if (unlikely((*bufs)->nb_segs > 1)) {
1579 if (eth_mbuf_to_sg_fd(*bufs,
1585 eth_mbuf_to_fd(*bufs,
1586 &fd_arr[loop], bpid);
1595 frames_to_send = loop;
1597 while (loop < frames_to_send) {
1598 ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1600 frames_to_send - loop);
1601 if (likely(ret > 0)) {
1605 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1613 /* Callback to handle sending ordered packets through WRIOP based interface */
1615 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1617 /* Function to transmit the frames to given device and VQ*/
1618 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1619 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1620 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1621 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1622 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1623 struct rte_mbuf *mi;
1624 struct rte_mempool *mp;
1625 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1626 struct qbman_swp *swp;
1627 uint32_t frames_to_send, num_free_eq_desc;
1628 uint32_t loop, retry_count;
1630 uint16_t num_tx = 0;
1633 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1634 ret = dpaa2_affine_qbman_swp();
1637 "Failed to allocate IO portal, tid: %d\n",
1642 swp = DPAA2_PER_LCORE_PORTAL;
1644 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1645 eth_data, dpaa2_q->fqid);
1647 /* This would also handle normal and atomic queues as any type
1648 * of packet can be enqueued when ordered queues are being used.
1651 /*Check if the queue is congested*/
1653 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1655 /* Retry for some time before giving up */
1656 if (retry_count > CONG_RETRY_COUNT)
1660 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1661 dpaa2_eqcr_size : nb_pkts;
1663 if (!priv->en_loose_ordered) {
1664 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1665 num_free_eq_desc = dpaa2_free_eq_descriptors();
1666 if (num_free_eq_desc < frames_to_send)
1667 frames_to_send = num_free_eq_desc;
1671 for (loop = 0; loop < frames_to_send; loop++) {
1672 /*Prepare enqueue descriptor*/
1673 qbman_eq_desc_clear(&eqdesc[loop]);
1675 if (*dpaa2_seqn(*bufs)) {
1676 /* Use only queue 0 for Tx in case of atomic/
1677 * ordered packets as packets can get unordered
1678 * when being transmitted out from the interface
1680 dpaa2_set_enqueue_descriptor(order_sendq,
1684 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1685 DPAA2_EQ_RESP_ERR_FQ);
1686 qbman_eq_desc_set_fq(&eqdesc[loop],
1690 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1692 /* Check the basic scenario and set
1693 * the FD appropriately here itself.
1695 if (likely(mp && mp->ops_index ==
1696 priv->bp_list->dpaa2_ops_index &&
1697 (*bufs)->nb_segs == 1 &&
1698 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1699 if (unlikely((*bufs)->ol_flags
1700 & RTE_MBUF_F_TX_VLAN)) {
1701 ret = rte_vlan_insert(bufs);
1705 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1707 mempool_to_bpid(mp));
1712 mi = rte_mbuf_from_indirect(*bufs);
1715 /* Not a hw_pkt pool allocated frame */
1716 if (unlikely(!mp || !priv->bp_list)) {
1717 DPAA2_PMD_ERR("Err: No buffer pool attached");
1721 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1722 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1723 /* alloc should be from the default buffer pool
1724 * attached to this interface
1726 bpid = priv->bp_list->buf_pool.bpid;
1728 if (unlikely((*bufs)->nb_segs > 1)) {
1730 "S/G not supp for non hw offload buffer");
1733 if (eth_copy_mbuf_to_fd(*bufs,
1734 &fd_arr[loop], bpid)) {
1737 /* free the original packet */
1738 rte_pktmbuf_free(*bufs);
1740 bpid = mempool_to_bpid(mp);
1741 if (unlikely((*bufs)->nb_segs > 1)) {
1742 if (eth_mbuf_to_sg_fd(*bufs,
1748 eth_mbuf_to_fd(*bufs,
1749 &fd_arr[loop], bpid);
1757 while (loop < frames_to_send) {
1758 ret = qbman_swp_enqueue_multiple_desc(swp,
1759 &eqdesc[loop], &fd_arr[loop],
1760 frames_to_send - loop);
1761 if (unlikely(ret < 0)) {
1763 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1777 dpaa2_q->tx_pkts += num_tx;
1781 /* send any already prepared fd */
1787 ret = qbman_swp_enqueue_multiple_desc(swp,
1788 &eqdesc[loop], &fd_arr[i], loop - i);
1789 if (unlikely(ret < 0)) {
1791 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1801 dpaa2_q->tx_pkts += num_tx;
1806 * Dummy DPDK callback for TX.
1808 * This function is used to temporarily replace the real callback during
1809 * unsafe control operations on the queue, or in case of error.
1812 * Generic pointer to TX queue structure.
1814 * Packets to transmit.
1816 * Number of packets in array.
1819 * Number of packets successfully transmitted (<= pkts_n).
1822 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1830 #if defined(RTE_TOOLCHAIN_GCC)
1831 #pragma GCC diagnostic push
1832 #pragma GCC diagnostic ignored "-Wcast-qual"
1833 #elif defined(RTE_TOOLCHAIN_CLANG)
1834 #pragma clang diagnostic push
1835 #pragma clang diagnostic ignored "-Wcast-qual"
1838 /* This function loopbacks all the received packets.*/
1840 dpaa2_dev_loopback_rx(void *queue,
1841 struct rte_mbuf **bufs __rte_unused,
1844 /* Function receive frames for a given device and VQ*/
1845 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1846 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1847 uint32_t fqid = dpaa2_q->fqid;
1848 int ret, num_rx = 0, num_tx = 0, pull_size;
1849 uint8_t pending, status;
1850 struct qbman_swp *swp;
1851 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1852 struct qbman_pull_desc pulldesc;
1853 struct qbman_eq_desc eqdesc;
1854 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1855 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1856 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1857 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1858 /* todo - currently we are using 1st TX queue only for loopback*/
1860 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1861 ret = dpaa2_affine_qbman_ethrx_swp();
1863 DPAA2_PMD_ERR("Failure in affining portal");
1867 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1868 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1869 if (unlikely(!q_storage->active_dqs)) {
1870 q_storage->toggle = 0;
1871 dq_storage = q_storage->dq_storage[q_storage->toggle];
1872 q_storage->last_num_pkts = pull_size;
1873 qbman_pull_desc_clear(&pulldesc);
1874 qbman_pull_desc_set_numframes(&pulldesc,
1875 q_storage->last_num_pkts);
1876 qbman_pull_desc_set_fq(&pulldesc, fqid);
1877 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1878 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1879 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1880 while (!qbman_check_command_complete(
1882 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1884 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1887 if (qbman_swp_pull(swp, &pulldesc)) {
1889 "VDQ command not issued.QBMAN busy\n");
1890 /* Portal was busy, try again */
1895 q_storage->active_dqs = dq_storage;
1896 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1897 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1901 dq_storage = q_storage->active_dqs;
1902 rte_prefetch0((void *)(size_t)(dq_storage));
1903 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1905 /* Prepare next pull descriptor. This will give space for the
1906 * prefetching done on DQRR entries
1908 q_storage->toggle ^= 1;
1909 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1910 qbman_pull_desc_clear(&pulldesc);
1911 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1912 qbman_pull_desc_set_fq(&pulldesc, fqid);
1913 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1914 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1916 /*Prepare enqueue descriptor*/
1917 qbman_eq_desc_clear(&eqdesc);
1918 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1919 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1920 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1922 /* Check if the previous issued command is completed.
1923 * Also seems like the SWP is shared between the Ethernet Driver
1924 * and the SEC driver.
1926 while (!qbman_check_command_complete(dq_storage))
1928 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1929 clear_swp_active_dqs(q_storage->active_dpio_id);
1934 /* Loop until the dq_storage is updated with
1935 * new token by QBMAN
1937 while (!qbman_check_new_result(dq_storage))
1939 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1940 /* Check whether Last Pull command is Expired and
1941 * setting Condition for Loop termination
1943 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1945 /* Check for valid frame. */
1946 status = qbman_result_DQ_flags(dq_storage);
1947 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1950 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1956 while (num_tx < num_rx) {
1957 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1958 &fd[num_tx], 0, num_rx - num_tx);
1961 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1962 while (!qbman_check_command_complete(
1963 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1965 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1967 /* issue a volatile dequeue command for next pull */
1969 if (qbman_swp_pull(swp, &pulldesc)) {
1970 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1971 "QBMAN is busy (2)\n");
1976 q_storage->active_dqs = dq_storage1;
1977 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1978 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1980 dpaa2_q->rx_pkts += num_rx;
1981 dpaa2_q->tx_pkts += num_tx;
1985 #if defined(RTE_TOOLCHAIN_GCC)
1986 #pragma GCC diagnostic pop
1987 #elif defined(RTE_TOOLCHAIN_CLANG)
1988 #pragma clang diagnostic pop