1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __attribute__((hot))
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
33 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
34 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
35 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
36 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
37 DPAA2_SET_FD_FRC(_fd, 0); \
38 DPAA2_RESET_FD_CTRL(_fd); \
39 DPAA2_RESET_FD_FLC(_fd); \
42 static inline void __attribute__((hot))
43 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd)
45 struct dpaa2_annot_hdr *annotation;
46 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
48 m->packet_type = RTE_PTYPE_UNKNOWN;
50 case DPAA2_PKT_TYPE_ETHER:
51 m->packet_type = RTE_PTYPE_L2_ETHER;
53 case DPAA2_PKT_TYPE_IPV4:
54 m->packet_type = RTE_PTYPE_L2_ETHER |
57 case DPAA2_PKT_TYPE_IPV6:
58 m->packet_type = RTE_PTYPE_L2_ETHER |
61 case DPAA2_PKT_TYPE_IPV4_EXT:
62 m->packet_type = RTE_PTYPE_L2_ETHER |
63 RTE_PTYPE_L3_IPV4_EXT;
65 case DPAA2_PKT_TYPE_IPV6_EXT:
66 m->packet_type = RTE_PTYPE_L2_ETHER |
67 RTE_PTYPE_L3_IPV6_EXT;
69 case DPAA2_PKT_TYPE_IPV4_TCP:
70 m->packet_type = RTE_PTYPE_L2_ETHER |
71 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
73 case DPAA2_PKT_TYPE_IPV6_TCP:
74 m->packet_type = RTE_PTYPE_L2_ETHER |
75 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
77 case DPAA2_PKT_TYPE_IPV4_UDP:
78 m->packet_type = RTE_PTYPE_L2_ETHER |
79 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
81 case DPAA2_PKT_TYPE_IPV6_UDP:
82 m->packet_type = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
85 case DPAA2_PKT_TYPE_IPV4_SCTP:
86 m->packet_type = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
89 case DPAA2_PKT_TYPE_IPV6_SCTP:
90 m->packet_type = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
93 case DPAA2_PKT_TYPE_IPV4_ICMP:
94 m->packet_type = RTE_PTYPE_L2_ETHER |
95 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
97 case DPAA2_PKT_TYPE_IPV6_ICMP:
98 m->packet_type = RTE_PTYPE_L2_ETHER |
99 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
102 m->packet_type = dpaa2_dev_rx_parse_slow(m,
103 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
104 + DPAA2_FD_PTA_SIZE));
106 m->hash.rss = fd->simple.flc_hi;
107 m->ol_flags |= PKT_RX_RSS_HASH;
109 if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) {
110 annotation = (struct dpaa2_annot_hdr *)
111 ((size_t)DPAA2_IOVA_TO_VADDR(
112 DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE);
113 m->timestamp = annotation->word2;
114 m->ol_flags |= PKT_RX_TIMESTAMP;
115 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp);
118 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
119 "ol_flags =0x%" PRIx64 "",
120 frc, m->packet_type, m->ol_flags);
123 static inline uint32_t __attribute__((hot))
124 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
125 struct dpaa2_annot_hdr *annotation)
127 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
130 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
131 "(4)=0x%" PRIx64 "\t",
132 annotation->word3, annotation->word4);
134 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
135 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
136 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
137 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
138 mbuf->ol_flags |= PKT_RX_VLAN;
139 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
140 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
141 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
142 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
143 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
144 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
145 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
148 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
149 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
151 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
152 pkt_type |= RTE_PTYPE_L2_ETHER;
157 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
158 L3_IPV4_N_PRESENT)) {
159 pkt_type |= RTE_PTYPE_L3_IPV4;
160 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
161 L3_IP_N_OPT_PRESENT))
162 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
164 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
165 L3_IPV6_N_PRESENT)) {
166 pkt_type |= RTE_PTYPE_L3_IPV6;
167 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
168 L3_IP_N_OPT_PRESENT))
169 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
174 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
175 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
176 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
177 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
179 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
180 L3_IP_1_MORE_FRAGMENT |
181 L3_IP_N_FIRST_FRAGMENT |
182 L3_IP_N_MORE_FRAGMENT)) {
183 pkt_type |= RTE_PTYPE_L4_FRAG;
186 pkt_type |= RTE_PTYPE_L4_NONFRAG;
189 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
190 pkt_type |= RTE_PTYPE_L4_UDP;
192 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
193 pkt_type |= RTE_PTYPE_L4_TCP;
195 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
196 pkt_type |= RTE_PTYPE_L4_SCTP;
198 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
199 pkt_type |= RTE_PTYPE_L4_ICMP;
201 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
202 pkt_type |= RTE_PTYPE_UNKNOWN;
208 static inline uint32_t __attribute__((hot))
209 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
211 struct dpaa2_annot_hdr *annotation =
212 (struct dpaa2_annot_hdr *)hw_annot_addr;
214 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
217 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
218 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
219 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
220 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
222 mbuf->ol_flags |= PKT_RX_TIMESTAMP;
223 mbuf->timestamp = annotation->word2;
224 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp);
226 /* Check detailed parsing requirement */
227 if (annotation->word3 & 0x7FFFFC3FFFF)
228 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
230 /* Return some common types from parse processing */
231 switch (annotation->word4) {
233 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
235 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
236 case DPAA2_L3_IPv4_TCP:
237 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
239 case DPAA2_L3_IPv4_UDP:
240 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
242 case DPAA2_L3_IPv6_TCP:
243 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
245 case DPAA2_L3_IPv6_UDP:
246 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
252 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
255 static inline struct rte_mbuf *__attribute__((hot))
256 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
258 struct qbman_sge *sgt, *sge;
259 size_t sg_addr, fd_addr;
261 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
263 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
265 /* Get Scatter gather table address */
266 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
269 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
271 /* First Scatter gather entry */
272 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
273 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
274 /* Prepare all the metadata for first segment */
275 first_seg->buf_addr = (uint8_t *)sg_addr;
276 first_seg->ol_flags = 0;
277 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
278 first_seg->data_len = sge->length & 0x1FFFF;
279 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
280 first_seg->nb_segs = 1;
281 first_seg->next = NULL;
282 if (dpaa2_svr_family == SVR_LX2160A)
283 dpaa2_dev_rx_parse_new(first_seg, fd);
285 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
286 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
287 + DPAA2_FD_PTA_SIZE));
289 rte_mbuf_refcnt_set(first_seg, 1);
291 while (!DPAA2_SG_IS_FINAL(sge)) {
293 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
294 DPAA2_GET_FLE_ADDR(sge));
295 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
296 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
297 next_seg->buf_addr = (uint8_t *)sg_addr;
298 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
299 next_seg->data_len = sge->length & 0x1FFFF;
300 first_seg->nb_segs += 1;
301 rte_mbuf_refcnt_set(next_seg, 1);
302 cur_seg->next = next_seg;
303 next_seg->next = NULL;
306 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
307 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
308 rte_mbuf_refcnt_set(temp, 1);
309 rte_pktmbuf_free_seg(temp);
311 return (void *)first_seg;
314 static inline struct rte_mbuf *__attribute__((hot))
315 eth_fd_to_mbuf(const struct qbman_fd *fd)
317 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
318 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
319 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
321 /* need to repopulated some of the fields,
322 * as they may have changed in last transmission
326 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
327 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
328 mbuf->pkt_len = mbuf->data_len;
330 rte_mbuf_refcnt_set(mbuf, 1);
332 /* Parse the packet */
333 /* parse results for LX2 are there in FRC field of FD.
334 * For other DPAA2 platforms , parse results are after
335 * the private - sw annotation area
338 if (dpaa2_svr_family == SVR_LX2160A)
339 dpaa2_dev_rx_parse_new(mbuf, fd);
341 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
342 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
343 + DPAA2_FD_PTA_SIZE));
345 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
346 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
347 mbuf, mbuf->buf_addr, mbuf->data_off,
348 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
349 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
350 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
355 static int __attribute__ ((noinline)) __attribute__((hot))
356 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
357 struct qbman_fd *fd, uint16_t bpid)
359 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
360 struct qbman_sge *sgt, *sge = NULL;
363 temp = rte_pktmbuf_alloc(mbuf->pool);
365 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
369 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
370 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
371 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
372 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
373 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
374 DPAA2_RESET_FD_FRC(fd);
375 DPAA2_RESET_FD_CTRL(fd);
376 /*Set Scatter gather table and Scatter gather entries*/
377 sgt = (struct qbman_sge *)(
378 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
379 + DPAA2_GET_FD_OFFSET(fd));
381 for (i = 0; i < mbuf->nb_segs; i++) {
383 /*Resetting the buffer pool id and offset field*/
384 sge->fin_bpid_offset = 0;
385 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
386 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
387 sge->length = cur_seg->data_len;
388 if (RTE_MBUF_DIRECT(cur_seg)) {
389 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
390 /* If refcnt > 1, invalid bpid is set to ensure
391 * buffer is not freed by HW
393 DPAA2_SET_FLE_IVP(sge);
394 rte_mbuf_refcnt_update(cur_seg, -1);
396 DPAA2_SET_FLE_BPID(sge,
397 mempool_to_bpid(cur_seg->pool));
398 cur_seg = cur_seg->next;
400 /* Get owner MBUF from indirect buffer */
401 mi = rte_mbuf_from_indirect(cur_seg);
402 if (rte_mbuf_refcnt_read(mi) > 1) {
403 /* If refcnt > 1, invalid bpid is set to ensure
404 * owner buffer is not freed by HW
406 DPAA2_SET_FLE_IVP(sge);
408 DPAA2_SET_FLE_BPID(sge,
409 mempool_to_bpid(mi->pool));
410 rte_mbuf_refcnt_update(mi, 1);
413 cur_seg = cur_seg->next;
414 prev_seg->next = NULL;
415 rte_pktmbuf_free(prev_seg);
418 DPAA2_SG_SET_FINAL(sge, true);
423 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
424 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
426 static void __attribute__ ((noinline)) __attribute__((hot))
427 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
428 struct qbman_fd *fd, uint16_t bpid)
430 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
432 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
433 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
434 mbuf, mbuf->buf_addr, mbuf->data_off,
435 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
436 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
437 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
438 if (RTE_MBUF_DIRECT(mbuf)) {
439 if (rte_mbuf_refcnt_read(mbuf) > 1) {
440 DPAA2_SET_FD_IVP(fd);
441 rte_mbuf_refcnt_update(mbuf, -1);
446 mi = rte_mbuf_from_indirect(mbuf);
447 if (rte_mbuf_refcnt_read(mi) > 1)
448 DPAA2_SET_FD_IVP(fd);
450 rte_mbuf_refcnt_update(mi, 1);
451 rte_pktmbuf_free(mbuf);
455 static inline int __attribute__((hot))
456 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
457 struct qbman_fd *fd, uint16_t bpid)
462 if (rte_dpaa2_mbuf_alloc_bulk(
463 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
464 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
467 m = (struct rte_mbuf *)mb;
468 memcpy((char *)m->buf_addr + mbuf->data_off,
469 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
472 /* Copy required fields */
473 m->data_off = mbuf->data_off;
474 m->ol_flags = mbuf->ol_flags;
475 m->packet_type = mbuf->packet_type;
476 m->tx_offload = mbuf->tx_offload;
478 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
481 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
482 " meta: %d, off: %d, len: %d\n",
485 DPAA2_GET_FD_ADDR(fd),
486 DPAA2_GET_FD_BPID(fd),
487 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
488 DPAA2_GET_FD_OFFSET(fd),
489 DPAA2_GET_FD_LEN(fd));
494 /* This function assumes that caller will be keep the same value for nb_pkts
495 * across calls per queue, if that is not the case, better use non-prefetch
496 * version of rx call.
497 * It will return the packets as requested in previous call without honoring
498 * the current nb_pkts or bufs space.
501 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
503 /* Function receive frames for a given device and VQ*/
504 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
505 struct qbman_result *dq_storage, *dq_storage1 = NULL;
506 uint32_t fqid = dpaa2_q->fqid;
507 int ret, num_rx = 0, pull_size;
508 uint8_t pending, status;
509 struct qbman_swp *swp;
510 const struct qbman_fd *fd, *next_fd;
511 struct qbman_pull_desc pulldesc;
512 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
513 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
515 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
516 ret = dpaa2_affine_qbman_ethrx_swp();
518 DPAA2_PMD_ERR("Failure in affining portal");
523 if (unlikely(!rte_dpaa2_bpid_info &&
524 rte_eal_process_type() == RTE_PROC_SECONDARY))
525 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
527 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
528 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
529 if (unlikely(!q_storage->active_dqs)) {
530 q_storage->toggle = 0;
531 dq_storage = q_storage->dq_storage[q_storage->toggle];
532 q_storage->last_num_pkts = pull_size;
533 qbman_pull_desc_clear(&pulldesc);
534 qbman_pull_desc_set_numframes(&pulldesc,
535 q_storage->last_num_pkts);
536 qbman_pull_desc_set_fq(&pulldesc, fqid);
537 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
538 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
539 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
540 while (!qbman_check_command_complete(
542 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
544 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
547 if (qbman_swp_pull(swp, &pulldesc)) {
548 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
549 " QBMAN is busy (1)\n");
550 /* Portal was busy, try again */
555 q_storage->active_dqs = dq_storage;
556 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
557 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
561 dq_storage = q_storage->active_dqs;
562 rte_prefetch0((void *)(size_t)(dq_storage));
563 rte_prefetch0((void *)(size_t)(dq_storage + 1));
565 /* Prepare next pull descriptor. This will give space for the
566 * prefething done on DQRR entries
568 q_storage->toggle ^= 1;
569 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
570 qbman_pull_desc_clear(&pulldesc);
571 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
572 qbman_pull_desc_set_fq(&pulldesc, fqid);
573 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
574 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
576 /* Check if the previous issued command is completed.
577 * Also seems like the SWP is shared between the Ethernet Driver
578 * and the SEC driver.
580 while (!qbman_check_command_complete(dq_storage))
582 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
583 clear_swp_active_dqs(q_storage->active_dpio_id);
588 /* Loop until the dq_storage is updated with
591 while (!qbman_check_new_result(dq_storage))
593 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
594 /* Check whether Last Pull command is Expired and
595 * setting Condition for Loop termination
597 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
599 /* Check for valid frame. */
600 status = qbman_result_DQ_flags(dq_storage);
601 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
604 fd = qbman_result_DQ_fd(dq_storage);
606 if (dpaa2_svr_family != SVR_LX2160A) {
607 next_fd = qbman_result_DQ_fd(dq_storage + 1);
608 /* Prefetch Annotation address for the parse results */
609 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(
610 next_fd) + DPAA2_FD_PTA_SIZE + 16));
613 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
614 bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
616 bufs[num_rx] = eth_fd_to_mbuf(fd);
617 bufs[num_rx]->port = eth_data->port_id;
619 if (eth_data->dev_conf.rxmode.offloads &
620 DEV_RX_OFFLOAD_VLAN_STRIP)
621 rte_vlan_strip(bufs[num_rx]);
627 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
628 while (!qbman_check_command_complete(
629 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
631 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
633 /* issue a volatile dequeue command for next pull */
635 if (qbman_swp_pull(swp, &pulldesc)) {
636 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
637 "QBMAN is busy (2)\n");
642 q_storage->active_dqs = dq_storage1;
643 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
644 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
646 dpaa2_q->rx_pkts += num_rx;
651 void __attribute__((hot))
652 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
653 const struct qbman_fd *fd,
654 const struct qbman_result *dq,
655 struct dpaa2_queue *rxq,
656 struct rte_event *ev)
658 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
659 DPAA2_FD_PTA_SIZE + 16));
661 ev->flow_id = rxq->ev.flow_id;
662 ev->sub_event_type = rxq->ev.sub_event_type;
663 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
664 ev->op = RTE_EVENT_OP_NEW;
665 ev->sched_type = rxq->ev.sched_type;
666 ev->queue_id = rxq->ev.queue_id;
667 ev->priority = rxq->ev.priority;
669 ev->mbuf = eth_fd_to_mbuf(fd);
671 qbman_swp_dqrr_consume(swp, dq);
674 void __attribute__((hot))
675 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
676 const struct qbman_fd *fd,
677 const struct qbman_result *dq,
678 struct dpaa2_queue *rxq,
679 struct rte_event *ev)
683 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
684 DPAA2_FD_PTA_SIZE + 16));
686 ev->flow_id = rxq->ev.flow_id;
687 ev->sub_event_type = rxq->ev.sub_event_type;
688 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
689 ev->op = RTE_EVENT_OP_NEW;
690 ev->sched_type = rxq->ev.sched_type;
691 ev->queue_id = rxq->ev.queue_id;
692 ev->priority = rxq->ev.priority;
694 ev->mbuf = eth_fd_to_mbuf(fd);
696 dqrr_index = qbman_get_dqrr_idx(dq);
697 ev->mbuf->seqn = dqrr_index + 1;
698 DPAA2_PER_LCORE_DQRR_SIZE++;
699 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
700 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
703 void __attribute__((hot))
704 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
705 const struct qbman_fd *fd,
706 const struct qbman_result *dq,
707 struct dpaa2_queue *rxq,
708 struct rte_event *ev)
710 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
711 DPAA2_FD_PTA_SIZE + 16));
713 ev->flow_id = rxq->ev.flow_id;
714 ev->sub_event_type = rxq->ev.sub_event_type;
715 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
716 ev->op = RTE_EVENT_OP_NEW;
717 ev->sched_type = rxq->ev.sched_type;
718 ev->queue_id = rxq->ev.queue_id;
719 ev->priority = rxq->ev.priority;
721 ev->mbuf = eth_fd_to_mbuf(fd);
723 ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
724 ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
725 ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
727 qbman_swp_dqrr_consume(swp, dq);
731 * Callback to handle sending packets through WRIOP based interface
734 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
736 /* Function to transmit the frames to given device and VQ*/
737 uint32_t loop, retry_count;
739 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
741 uint32_t frames_to_send;
742 struct rte_mempool *mp;
743 struct qbman_eq_desc eqdesc;
744 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
745 struct qbman_swp *swp;
748 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
749 struct dpaa2_dev_priv *priv = eth_data->dev_private;
750 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
752 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
753 ret = dpaa2_affine_qbman_swp();
755 DPAA2_PMD_ERR("Failure in affining portal");
759 swp = DPAA2_PER_LCORE_PORTAL;
761 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
762 eth_data, dpaa2_q->fqid);
764 /*Prepare enqueue descriptor*/
765 qbman_eq_desc_clear(&eqdesc);
766 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
767 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
768 dpaa2_q->flow_id, dpaa2_q->tc_index);
769 /*Clear the unused FD fields before sending*/
771 /*Check if the queue is congested*/
773 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
775 /* Retry for some time before giving up */
776 if (retry_count > CONG_RETRY_COUNT)
780 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
781 dpaa2_eqcr_size : nb_pkts;
783 for (loop = 0; loop < frames_to_send; loop++) {
785 uint8_t dqrr_index = (*bufs)->seqn - 1;
787 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
789 DPAA2_PER_LCORE_DQRR_SIZE--;
790 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
791 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
794 if (likely(RTE_MBUF_DIRECT(*bufs))) {
796 /* Check the basic scenario and set
797 * the FD appropriately here itself.
799 if (likely(mp && mp->ops_index ==
800 priv->bp_list->dpaa2_ops_index &&
801 (*bufs)->nb_segs == 1 &&
802 rte_mbuf_refcnt_read((*bufs)) == 1)) {
803 if (unlikely(((*bufs)->ol_flags
804 & PKT_TX_VLAN_PKT) ||
805 (eth_data->dev_conf.txmode.offloads
806 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
807 ret = rte_vlan_insert(bufs);
811 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
812 &fd_arr[loop], mempool_to_bpid(mp));
817 mi = rte_mbuf_from_indirect(*bufs);
820 /* Not a hw_pkt pool allocated frame */
821 if (unlikely(!mp || !priv->bp_list)) {
822 DPAA2_PMD_ERR("Err: No buffer pool attached");
826 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
827 (eth_data->dev_conf.txmode.offloads
828 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
829 int ret = rte_vlan_insert(bufs);
833 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
834 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
835 /* alloc should be from the default buffer pool
836 * attached to this interface
838 bpid = priv->bp_list->buf_pool.bpid;
840 if (unlikely((*bufs)->nb_segs > 1)) {
841 DPAA2_PMD_ERR("S/G support not added"
842 " for non hw offload buffer");
845 if (eth_copy_mbuf_to_fd(*bufs,
846 &fd_arr[loop], bpid)) {
849 /* free the original packet */
850 rte_pktmbuf_free(*bufs);
852 bpid = mempool_to_bpid(mp);
853 if (unlikely((*bufs)->nb_segs > 1)) {
854 if (eth_mbuf_to_sg_fd(*bufs,
855 &fd_arr[loop], bpid))
858 eth_mbuf_to_fd(*bufs,
859 &fd_arr[loop], bpid);
865 while (loop < frames_to_send) {
866 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
867 &fd_arr[loop], &flags[loop],
868 frames_to_send - loop);
871 num_tx += frames_to_send;
872 nb_pkts -= frames_to_send;
874 dpaa2_q->tx_pkts += num_tx;
878 /* send any already prepared fd */
883 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
891 dpaa2_q->tx_pkts += num_tx;
896 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
898 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
902 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
903 m = eth_fd_to_mbuf(fd);
908 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
910 struct qbman_eq_desc *eqdesc)
912 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
913 struct dpaa2_dev_priv *priv = eth_data->dev_private;
914 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
915 struct eqresp_metadata *eqresp_meta;
916 uint16_t orpid, seqnum;
919 qbman_eq_desc_set_qd(eqdesc, priv->qdid, dpaa2_q->flow_id,
922 if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
923 orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
924 DPAA2_EQCR_OPRID_SHIFT;
925 seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
926 DPAA2_EQCR_SEQNUM_SHIFT;
928 if (!priv->en_loose_ordered) {
929 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
930 qbman_eq_desc_set_response(eqdesc, (uint64_t)
931 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
932 dpio_dev->eqresp_pi]), 1);
933 qbman_eq_desc_set_token(eqdesc, 1);
935 eqresp_meta = &dpio_dev->eqresp_meta[
936 dpio_dev->eqresp_pi];
937 eqresp_meta->dpaa2_q = dpaa2_q;
938 eqresp_meta->mp = m->pool;
940 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
941 dpio_dev->eqresp_pi++ :
942 (dpio_dev->eqresp_pi = 0);
944 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
947 dq_idx = m->seqn - 1;
948 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
949 DPAA2_PER_LCORE_DQRR_SIZE--;
950 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
952 m->seqn = DPAA2_INVALID_MBUF_SEQN;
955 /* Callback to handle sending ordered packets through WRIOP based interface */
957 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
959 /* Function to transmit the frames to given device and VQ*/
960 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
961 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
962 struct dpaa2_dev_priv *priv = eth_data->dev_private;
963 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
964 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
966 struct rte_mempool *mp;
967 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
968 struct qbman_swp *swp;
969 uint32_t frames_to_send, num_free_eq_desc;
970 uint32_t loop, retry_count;
975 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
976 ret = dpaa2_affine_qbman_swp();
978 DPAA2_PMD_ERR("Failure in affining portal");
982 swp = DPAA2_PER_LCORE_PORTAL;
984 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
985 eth_data, dpaa2_q->fqid);
987 /* This would also handle normal and atomic queues as any type
988 * of packet can be enqueued when ordered queues are being used.
991 /*Check if the queue is congested*/
993 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
995 /* Retry for some time before giving up */
996 if (retry_count > CONG_RETRY_COUNT)
1000 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1001 dpaa2_eqcr_size : nb_pkts;
1003 if (!priv->en_loose_ordered) {
1004 if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1005 num_free_eq_desc = dpaa2_free_eq_descriptors();
1006 if (num_free_eq_desc < frames_to_send)
1007 frames_to_send = num_free_eq_desc;
1011 for (loop = 0; loop < frames_to_send; loop++) {
1012 /*Prepare enqueue descriptor*/
1013 qbman_eq_desc_clear(&eqdesc[loop]);
1015 if ((*bufs)->seqn) {
1016 /* Use only queue 0 for Tx in case of atomic/
1017 * ordered packets as packets can get unordered
1018 * when being tranmitted out from the interface
1020 dpaa2_set_enqueue_descriptor(order_sendq,
1024 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1025 DPAA2_EQ_RESP_ERR_FQ);
1026 qbman_eq_desc_set_qd(&eqdesc[loop], priv->qdid,
1031 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1033 /* Check the basic scenario and set
1034 * the FD appropriately here itself.
1036 if (likely(mp && mp->ops_index ==
1037 priv->bp_list->dpaa2_ops_index &&
1038 (*bufs)->nb_segs == 1 &&
1039 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1040 if (unlikely((*bufs)->ol_flags
1041 & PKT_TX_VLAN_PKT)) {
1042 ret = rte_vlan_insert(bufs);
1046 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1048 mempool_to_bpid(mp));
1053 mi = rte_mbuf_from_indirect(*bufs);
1056 /* Not a hw_pkt pool allocated frame */
1057 if (unlikely(!mp || !priv->bp_list)) {
1058 DPAA2_PMD_ERR("Err: No buffer pool attached");
1062 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1063 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1064 /* alloc should be from the default buffer pool
1065 * attached to this interface
1067 bpid = priv->bp_list->buf_pool.bpid;
1069 if (unlikely((*bufs)->nb_segs > 1)) {
1071 "S/G not supp for non hw offload buffer");
1074 if (eth_copy_mbuf_to_fd(*bufs,
1075 &fd_arr[loop], bpid)) {
1078 /* free the original packet */
1079 rte_pktmbuf_free(*bufs);
1081 bpid = mempool_to_bpid(mp);
1082 if (unlikely((*bufs)->nb_segs > 1)) {
1083 if (eth_mbuf_to_sg_fd(*bufs,
1088 eth_mbuf_to_fd(*bufs,
1089 &fd_arr[loop], bpid);
1095 while (loop < frames_to_send) {
1096 loop += qbman_swp_enqueue_multiple_desc(swp,
1097 &eqdesc[loop], &fd_arr[loop],
1098 frames_to_send - loop);
1101 num_tx += frames_to_send;
1102 nb_pkts -= frames_to_send;
1104 dpaa2_q->tx_pkts += num_tx;
1108 /* send any already prepared fd */
1113 i += qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1114 &fd_arr[i], loop - i);
1119 dpaa2_q->tx_pkts += num_tx;
1124 * Dummy DPDK callback for TX.
1126 * This function is used to temporarily replace the real callback during
1127 * unsafe control operations on the queue, or in case of error.
1130 * Generic pointer to TX queue structure.
1132 * Packets to transmit.
1134 * Number of packets in array.
1137 * Number of packets successfully transmitted (<= pkts_n).
1140 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1148 #if defined(RTE_TOOLCHAIN_GCC)
1149 #pragma GCC diagnostic push
1150 #pragma GCC diagnostic ignored "-Wcast-qual"
1151 #elif defined(RTE_TOOLCHAIN_CLANG)
1152 #pragma clang diagnostic push
1153 #pragma clang diagnostic ignored "-Wcast-qual"
1156 /* This function loopbacks all the received packets.*/
1158 dpaa2_dev_loopback_rx(void *queue,
1159 struct rte_mbuf **bufs __rte_unused,
1162 /* Function receive frames for a given device and VQ*/
1163 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1164 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1165 uint32_t fqid = dpaa2_q->fqid;
1166 int ret, num_rx = 0, num_tx = 0, pull_size;
1167 uint8_t pending, status;
1168 struct qbman_swp *swp;
1169 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1170 struct qbman_pull_desc pulldesc;
1171 struct qbman_eq_desc eqdesc;
1172 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1173 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1174 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1175 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1176 /* todo - currently we are using 1st TX queue only for loopback*/
1178 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1179 ret = dpaa2_affine_qbman_ethrx_swp();
1181 DPAA2_PMD_ERR("Failure in affining portal");
1185 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1186 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1187 if (unlikely(!q_storage->active_dqs)) {
1188 q_storage->toggle = 0;
1189 dq_storage = q_storage->dq_storage[q_storage->toggle];
1190 q_storage->last_num_pkts = pull_size;
1191 qbman_pull_desc_clear(&pulldesc);
1192 qbman_pull_desc_set_numframes(&pulldesc,
1193 q_storage->last_num_pkts);
1194 qbman_pull_desc_set_fq(&pulldesc, fqid);
1195 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1196 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1197 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1198 while (!qbman_check_command_complete(
1200 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1202 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1205 if (qbman_swp_pull(swp, &pulldesc)) {
1207 "VDQ command not issued.QBMAN busy\n");
1208 /* Portal was busy, try again */
1213 q_storage->active_dqs = dq_storage;
1214 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1215 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1219 dq_storage = q_storage->active_dqs;
1220 rte_prefetch0((void *)(size_t)(dq_storage));
1221 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1223 /* Prepare next pull descriptor. This will give space for the
1224 * prefething done on DQRR entries
1226 q_storage->toggle ^= 1;
1227 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1228 qbman_pull_desc_clear(&pulldesc);
1229 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1230 qbman_pull_desc_set_fq(&pulldesc, fqid);
1231 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1232 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1234 /*Prepare enqueue descriptor*/
1235 qbman_eq_desc_clear(&eqdesc);
1236 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1237 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1238 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1240 /* Check if the previous issued command is completed.
1241 * Also seems like the SWP is shared between the Ethernet Driver
1242 * and the SEC driver.
1244 while (!qbman_check_command_complete(dq_storage))
1246 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1247 clear_swp_active_dqs(q_storage->active_dpio_id);
1252 /* Loop until the dq_storage is updated with
1253 * new token by QBMAN
1255 while (!qbman_check_new_result(dq_storage))
1257 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1258 /* Check whether Last Pull command is Expired and
1259 * setting Condition for Loop termination
1261 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1263 /* Check for valid frame. */
1264 status = qbman_result_DQ_flags(dq_storage);
1265 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1268 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1274 while (num_tx < num_rx) {
1275 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1276 &fd[num_tx], 0, num_rx - num_tx);
1279 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1280 while (!qbman_check_command_complete(
1281 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1283 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1285 /* issue a volatile dequeue command for next pull */
1287 if (qbman_swp_pull(swp, &pulldesc)) {
1288 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1289 "QBMAN is busy (2)\n");
1294 q_storage->active_dqs = dq_storage1;
1295 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1296 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1298 dpaa2_q->rx_pkts += num_rx;
1299 dpaa2_q->tx_pkts += num_tx;
1303 #if defined(RTE_TOOLCHAIN_GCC)
1304 #pragma GCC diagnostic pop
1305 #elif defined(RTE_TOOLCHAIN_CLANG)
1306 #pragma clang diagnostic pop