1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
17 #include <rte_hexdump.h>
19 #include <rte_fslmc.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31 struct dpaa2_annot_hdr *annotation);
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
38 return RTE_MBUF_DYNFIELD(mbuf,
39 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
43 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47 DPAA2_SET_FD_FRC(_fd, 0); \
48 DPAA2_RESET_FD_CTRL(_fd); \
49 DPAA2_RESET_FD_FLC(_fd); \
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
56 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57 struct dpaa2_annot_hdr *annotation =
58 (struct dpaa2_annot_hdr *)hw_annot_addr;
60 m->packet_type = RTE_PTYPE_UNKNOWN;
62 case DPAA2_PKT_TYPE_ETHER:
63 m->packet_type = RTE_PTYPE_L2_ETHER;
65 case DPAA2_PKT_TYPE_IPV4:
66 m->packet_type = RTE_PTYPE_L2_ETHER |
69 case DPAA2_PKT_TYPE_IPV6:
70 m->packet_type = RTE_PTYPE_L2_ETHER |
73 case DPAA2_PKT_TYPE_IPV4_EXT:
74 m->packet_type = RTE_PTYPE_L2_ETHER |
75 RTE_PTYPE_L3_IPV4_EXT;
77 case DPAA2_PKT_TYPE_IPV6_EXT:
78 m->packet_type = RTE_PTYPE_L2_ETHER |
79 RTE_PTYPE_L3_IPV6_EXT;
81 case DPAA2_PKT_TYPE_IPV4_TCP:
82 m->packet_type = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
85 case DPAA2_PKT_TYPE_IPV6_TCP:
86 m->packet_type = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
89 case DPAA2_PKT_TYPE_IPV4_UDP:
90 m->packet_type = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
93 case DPAA2_PKT_TYPE_IPV6_UDP:
94 m->packet_type = RTE_PTYPE_L2_ETHER |
95 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
97 case DPAA2_PKT_TYPE_IPV4_SCTP:
98 m->packet_type = RTE_PTYPE_L2_ETHER |
99 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
101 case DPAA2_PKT_TYPE_IPV6_SCTP:
102 m->packet_type = RTE_PTYPE_L2_ETHER |
103 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
105 case DPAA2_PKT_TYPE_IPV4_ICMP:
106 m->packet_type = RTE_PTYPE_L2_ETHER |
107 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
109 case DPAA2_PKT_TYPE_IPV6_ICMP:
110 m->packet_type = RTE_PTYPE_L2_ETHER |
111 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
114 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
116 m->hash.rss = fd->simple.flc_hi;
117 m->ol_flags |= PKT_RX_RSS_HASH;
119 if (dpaa2_enable_ts[m->port]) {
120 *dpaa2_timestamp_dynfield(m) = annotation->word2;
121 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123 *dpaa2_timestamp_dynfield(m));
126 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127 "ol_flags =0x%" PRIx64 "",
128 frc, m->packet_type, m->ol_flags);
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133 struct dpaa2_annot_hdr *annotation)
135 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
138 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139 "(4)=0x%" PRIx64 "\t",
140 annotation->word3, annotation->word4);
142 #if defined(RTE_LIBRTE_IEEE1588)
143 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
144 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
147 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
148 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
149 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
150 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
151 mbuf->ol_flags |= PKT_RX_VLAN;
152 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
153 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
154 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
155 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
156 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
157 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
158 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
161 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
162 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
164 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
165 pkt_type |= RTE_PTYPE_L2_ETHER;
170 if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
172 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
174 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
175 L3_IPV4_N_PRESENT)) {
176 pkt_type |= RTE_PTYPE_L3_IPV4;
177 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
178 L3_IP_N_OPT_PRESENT))
179 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
181 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
182 L3_IPV6_N_PRESENT)) {
183 pkt_type |= RTE_PTYPE_L3_IPV6;
184 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
185 L3_IP_N_OPT_PRESENT))
186 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
191 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
192 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
193 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
194 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
196 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
197 L3_IP_1_MORE_FRAGMENT |
198 L3_IP_N_FIRST_FRAGMENT |
199 L3_IP_N_MORE_FRAGMENT)) {
200 pkt_type |= RTE_PTYPE_L4_FRAG;
203 pkt_type |= RTE_PTYPE_L4_NONFRAG;
206 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
207 pkt_type |= RTE_PTYPE_L4_UDP;
209 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
210 pkt_type |= RTE_PTYPE_L4_TCP;
212 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
213 pkt_type |= RTE_PTYPE_L4_SCTP;
215 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
216 pkt_type |= RTE_PTYPE_L4_ICMP;
218 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
219 pkt_type |= RTE_PTYPE_UNKNOWN;
225 static inline uint32_t __rte_hot
226 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
228 struct dpaa2_annot_hdr *annotation =
229 (struct dpaa2_annot_hdr *)hw_annot_addr;
231 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
234 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
235 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
236 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
237 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
239 if (dpaa2_enable_ts[mbuf->port]) {
240 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
241 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
242 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
243 *dpaa2_timestamp_dynfield(mbuf));
246 /* Check detailed parsing requirement */
247 if (annotation->word3 & 0x7FFFFC3FFFF)
248 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
250 /* Return some common types from parse processing */
251 switch (annotation->word4) {
253 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
255 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
256 case DPAA2_L3_IPv4_TCP:
257 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
259 case DPAA2_L3_IPv4_UDP:
260 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
262 case DPAA2_L3_IPv6_TCP:
263 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
265 case DPAA2_L3_IPv6_UDP:
266 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
272 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
275 static inline struct rte_mbuf *__rte_hot
276 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
279 struct qbman_sge *sgt, *sge;
280 size_t sg_addr, fd_addr;
283 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
285 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
286 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
288 /* Get Scatter gather table address */
289 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
292 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
294 /* First Scatter gather entry */
295 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
296 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
297 /* Prepare all the metadata for first segment */
298 first_seg->buf_addr = (uint8_t *)sg_addr;
299 first_seg->ol_flags = 0;
300 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
301 first_seg->data_len = sge->length & 0x1FFFF;
302 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
303 first_seg->nb_segs = 1;
304 first_seg->next = NULL;
305 first_seg->port = port_id;
306 if (dpaa2_svr_family == SVR_LX2160A)
307 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
309 first_seg->packet_type =
310 dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
312 rte_mbuf_refcnt_set(first_seg, 1);
314 while (!DPAA2_SG_IS_FINAL(sge)) {
316 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
317 DPAA2_GET_FLE_ADDR(sge));
318 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
319 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
320 next_seg->buf_addr = (uint8_t *)sg_addr;
321 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
322 next_seg->data_len = sge->length & 0x1FFFF;
323 first_seg->nb_segs += 1;
324 rte_mbuf_refcnt_set(next_seg, 1);
325 cur_seg->next = next_seg;
326 next_seg->next = NULL;
329 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
330 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
331 rte_mbuf_refcnt_set(temp, 1);
332 rte_pktmbuf_free_seg(temp);
334 return (void *)first_seg;
337 static inline struct rte_mbuf *__rte_hot
338 eth_fd_to_mbuf(const struct qbman_fd *fd,
341 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
342 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
343 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
344 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
346 /* need to repopulated some of the fields,
347 * as they may have changed in last transmission
351 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
352 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
353 mbuf->pkt_len = mbuf->data_len;
354 mbuf->port = port_id;
356 rte_mbuf_refcnt_set(mbuf, 1);
358 /* Parse the packet */
359 /* parse results for LX2 are there in FRC field of FD.
360 * For other DPAA2 platforms , parse results are after
361 * the private - sw annotation area
364 if (dpaa2_svr_family == SVR_LX2160A)
365 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
367 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
369 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
370 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
371 mbuf, mbuf->buf_addr, mbuf->data_off,
372 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
373 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
374 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
379 static int __rte_noinline __rte_hot
380 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
382 struct rte_mempool *mp, uint16_t bpid)
384 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
385 struct qbman_sge *sgt, *sge = NULL;
388 #ifdef RTE_LIBRTE_IEEE1588
389 /* annotation area for timestamp in first buffer */
392 if (RTE_MBUF_DIRECT(mbuf) &&
393 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
396 if (rte_mbuf_refcnt_read(temp) > 1) {
397 /* If refcnt > 1, invalid bpid is set to ensure
398 * buffer is not freed by HW
400 fd->simple.bpid_offset = 0;
401 DPAA2_SET_FD_IVP(fd);
402 rte_mbuf_refcnt_update(temp, -1);
404 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
406 DPAA2_SET_FD_OFFSET(fd, offset);
408 temp = rte_pktmbuf_alloc(mp);
410 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
413 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
414 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
416 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
417 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
418 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
419 DPAA2_RESET_FD_FRC(fd);
420 DPAA2_RESET_FD_CTRL(fd);
421 DPAA2_RESET_FD_FLC(fd);
422 /*Set Scatter gather table and Scatter gather entries*/
423 sgt = (struct qbman_sge *)(
424 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
425 + DPAA2_GET_FD_OFFSET(fd));
427 for (i = 0; i < mbuf->nb_segs; i++) {
429 /*Resetting the buffer pool id and offset field*/
430 sge->fin_bpid_offset = 0;
431 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
432 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
433 sge->length = cur_seg->data_len;
434 if (RTE_MBUF_DIRECT(cur_seg)) {
435 /* if we are using inline SGT in same buffers
436 * set the FLE FMT as Frame Data Section
438 if (temp == cur_seg) {
439 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
440 DPAA2_SET_FLE_IVP(sge);
442 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
443 /* If refcnt > 1, invalid bpid is set to ensure
444 * buffer is not freed by HW
446 DPAA2_SET_FLE_IVP(sge);
447 rte_mbuf_refcnt_update(cur_seg, -1);
449 DPAA2_SET_FLE_BPID(sge,
450 mempool_to_bpid(cur_seg->pool));
453 cur_seg = cur_seg->next;
454 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
455 DPAA2_SET_FLE_IVP(sge);
456 cur_seg = cur_seg->next;
458 /* Get owner MBUF from indirect buffer */
459 mi = rte_mbuf_from_indirect(cur_seg);
460 if (rte_mbuf_refcnt_read(mi) > 1) {
461 /* If refcnt > 1, invalid bpid is set to ensure
462 * owner buffer is not freed by HW
464 DPAA2_SET_FLE_IVP(sge);
466 DPAA2_SET_FLE_BPID(sge,
467 mempool_to_bpid(mi->pool));
468 rte_mbuf_refcnt_update(mi, 1);
471 cur_seg = cur_seg->next;
472 prev_seg->next = NULL;
473 rte_pktmbuf_free(prev_seg);
476 DPAA2_SG_SET_FINAL(sge, true);
481 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
482 struct qbman_fd *fd, uint16_t bpid) __rte_unused;
484 static void __rte_noinline __rte_hot
485 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
486 struct qbman_fd *fd, uint16_t bpid)
488 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
490 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
491 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
492 mbuf, mbuf->buf_addr, mbuf->data_off,
493 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
494 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
495 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
496 if (RTE_MBUF_DIRECT(mbuf)) {
497 if (rte_mbuf_refcnt_read(mbuf) > 1) {
498 DPAA2_SET_FD_IVP(fd);
499 rte_mbuf_refcnt_update(mbuf, -1);
501 } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
502 DPAA2_SET_FD_IVP(fd);
506 mi = rte_mbuf_from_indirect(mbuf);
507 if (rte_mbuf_refcnt_read(mi) > 1)
508 DPAA2_SET_FD_IVP(fd);
510 rte_mbuf_refcnt_update(mi, 1);
511 rte_pktmbuf_free(mbuf);
515 static inline int __rte_hot
516 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
517 struct qbman_fd *fd, uint16_t bpid)
522 if (rte_dpaa2_mbuf_alloc_bulk(
523 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
524 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
527 m = (struct rte_mbuf *)mb;
528 memcpy((char *)m->buf_addr + mbuf->data_off,
529 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
532 /* Copy required fields */
533 m->data_off = mbuf->data_off;
534 m->ol_flags = mbuf->ol_flags;
535 m->packet_type = mbuf->packet_type;
536 m->tx_offload = mbuf->tx_offload;
538 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
541 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
542 " meta: %d, off: %d, len: %d\n",
545 DPAA2_GET_FD_ADDR(fd),
546 DPAA2_GET_FD_BPID(fd),
547 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
548 DPAA2_GET_FD_OFFSET(fd),
549 DPAA2_GET_FD_LEN(fd));
555 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
557 /* Function receive frames for a given device and VQ */
558 struct qbman_result *dq_storage;
559 uint32_t fqid = dpaa2_q->fqid;
560 int ret, num_rx = 0, num_pulled;
561 uint8_t pending, status;
562 struct qbman_swp *swp;
563 const struct qbman_fd *fd;
564 struct qbman_pull_desc pulldesc;
565 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
566 uint32_t lcore_id = rte_lcore_id();
567 void *v_addr, *hw_annot_addr;
568 struct dpaa2_fas *fas;
570 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
571 ret = dpaa2_affine_qbman_swp();
573 DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
578 swp = DPAA2_PER_LCORE_PORTAL;
580 dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
581 qbman_pull_desc_clear(&pulldesc);
582 qbman_pull_desc_set_fq(&pulldesc, fqid);
583 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
584 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
585 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
588 if (qbman_swp_pull(swp, &pulldesc)) {
589 DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
590 /* Portal was busy, try again */
596 /* Check if the previous issued command is completed. */
597 while (!qbman_check_command_complete(dq_storage))
603 /* Loop until the dq_storage is updated with
606 while (!qbman_check_new_result(dq_storage))
609 /* Check whether Last Pull command is Expired and
610 * setting Condition for Loop termination
612 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
614 /* Check for valid frame. */
615 status = qbman_result_DQ_flags(dq_storage);
616 if (unlikely((status &
617 QBMAN_DQ_STAT_VALIDFRAME) == 0))
620 fd = qbman_result_DQ_fd(dq_storage);
621 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
622 hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
625 DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
626 " fd_off: %d, fd_err: %x, fas_status: %x",
627 rte_lcore_id(), eth_data->port_id,
628 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
630 rte_hexdump(stderr, "Error packet", v_addr,
631 DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
638 dpaa2_q->err_pkts += num_rx;
641 /* This function assumes that caller will be keep the same value for nb_pkts
642 * across calls per queue, if that is not the case, better use non-prefetch
643 * version of rx call.
644 * It will return the packets as requested in previous call without honoring
645 * the current nb_pkts or bufs space.
648 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
650 /* Function receive frames for a given device and VQ*/
651 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
652 struct qbman_result *dq_storage, *dq_storage1 = NULL;
653 uint32_t fqid = dpaa2_q->fqid;
654 int ret, num_rx = 0, pull_size;
655 uint8_t pending, status;
656 struct qbman_swp *swp;
657 const struct qbman_fd *fd;
658 struct qbman_pull_desc pulldesc;
659 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
660 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
661 struct dpaa2_dev_priv *priv = eth_data->dev_private;
663 if (unlikely(dpaa2_enable_err_queue))
664 dump_err_pkts(priv->rx_err_vq);
666 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
667 ret = dpaa2_affine_qbman_ethrx_swp();
669 DPAA2_PMD_ERR("Failure in affining portal");
674 if (unlikely(!rte_dpaa2_bpid_info &&
675 rte_eal_process_type() == RTE_PROC_SECONDARY))
676 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
678 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
679 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
680 if (unlikely(!q_storage->active_dqs)) {
681 q_storage->toggle = 0;
682 dq_storage = q_storage->dq_storage[q_storage->toggle];
683 q_storage->last_num_pkts = pull_size;
684 qbman_pull_desc_clear(&pulldesc);
685 qbman_pull_desc_set_numframes(&pulldesc,
686 q_storage->last_num_pkts);
687 qbman_pull_desc_set_fq(&pulldesc, fqid);
688 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
689 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
690 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
691 while (!qbman_check_command_complete(
693 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
695 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
698 if (qbman_swp_pull(swp, &pulldesc)) {
699 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
700 " QBMAN is busy (1)\n");
701 /* Portal was busy, try again */
706 q_storage->active_dqs = dq_storage;
707 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
708 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
712 dq_storage = q_storage->active_dqs;
713 rte_prefetch0((void *)(size_t)(dq_storage));
714 rte_prefetch0((void *)(size_t)(dq_storage + 1));
716 /* Prepare next pull descriptor. This will give space for the
717 * prefething done on DQRR entries
719 q_storage->toggle ^= 1;
720 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
721 qbman_pull_desc_clear(&pulldesc);
722 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
723 qbman_pull_desc_set_fq(&pulldesc, fqid);
724 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
725 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
727 /* Check if the previous issued command is completed.
728 * Also seems like the SWP is shared between the Ethernet Driver
729 * and the SEC driver.
731 while (!qbman_check_command_complete(dq_storage))
733 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
734 clear_swp_active_dqs(q_storage->active_dpio_id);
739 /* Loop until the dq_storage is updated with
742 while (!qbman_check_new_result(dq_storage))
744 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
745 /* Check whether Last Pull command is Expired and
746 * setting Condition for Loop termination
748 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
750 /* Check for valid frame. */
751 status = qbman_result_DQ_flags(dq_storage);
752 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
755 fd = qbman_result_DQ_fd(dq_storage);
757 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
758 if (dpaa2_svr_family != SVR_LX2160A) {
759 const struct qbman_fd *next_fd =
760 qbman_result_DQ_fd(dq_storage + 1);
761 /* Prefetch Annotation address for the parse results */
762 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
763 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
767 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
768 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
770 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
771 #if defined(RTE_LIBRTE_IEEE1588)
772 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
775 if (eth_data->dev_conf.rxmode.offloads &
776 DEV_RX_OFFLOAD_VLAN_STRIP)
777 rte_vlan_strip(bufs[num_rx]);
783 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
784 while (!qbman_check_command_complete(
785 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
787 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
789 /* issue a volatile dequeue command for next pull */
791 if (qbman_swp_pull(swp, &pulldesc)) {
792 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
793 "QBMAN is busy (2)\n");
798 q_storage->active_dqs = dq_storage1;
799 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
800 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
802 dpaa2_q->rx_pkts += num_rx;
808 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
809 const struct qbman_fd *fd,
810 const struct qbman_result *dq,
811 struct dpaa2_queue *rxq,
812 struct rte_event *ev)
814 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
815 DPAA2_FD_PTA_SIZE + 16));
817 ev->flow_id = rxq->ev.flow_id;
818 ev->sub_event_type = rxq->ev.sub_event_type;
819 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
820 ev->op = RTE_EVENT_OP_NEW;
821 ev->sched_type = rxq->ev.sched_type;
822 ev->queue_id = rxq->ev.queue_id;
823 ev->priority = rxq->ev.priority;
825 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
827 qbman_swp_dqrr_consume(swp, dq);
831 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
832 const struct qbman_fd *fd,
833 const struct qbman_result *dq,
834 struct dpaa2_queue *rxq,
835 struct rte_event *ev)
839 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
840 DPAA2_FD_PTA_SIZE + 16));
842 ev->flow_id = rxq->ev.flow_id;
843 ev->sub_event_type = rxq->ev.sub_event_type;
844 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
845 ev->op = RTE_EVENT_OP_NEW;
846 ev->sched_type = rxq->ev.sched_type;
847 ev->queue_id = rxq->ev.queue_id;
848 ev->priority = rxq->ev.priority;
850 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
852 dqrr_index = qbman_get_dqrr_idx(dq);
853 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
854 DPAA2_PER_LCORE_DQRR_SIZE++;
855 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
856 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
860 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
861 const struct qbman_fd *fd,
862 const struct qbman_result *dq,
863 struct dpaa2_queue *rxq,
864 struct rte_event *ev)
866 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
867 DPAA2_FD_PTA_SIZE + 16));
869 ev->flow_id = rxq->ev.flow_id;
870 ev->sub_event_type = rxq->ev.sub_event_type;
871 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
872 ev->op = RTE_EVENT_OP_NEW;
873 ev->sched_type = rxq->ev.sched_type;
874 ev->queue_id = rxq->ev.queue_id;
875 ev->priority = rxq->ev.priority;
877 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
879 *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
880 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
881 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
883 qbman_swp_dqrr_consume(swp, dq);
887 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
889 /* Function receive frames for a given device and VQ */
890 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
891 struct qbman_result *dq_storage;
892 uint32_t fqid = dpaa2_q->fqid;
893 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
894 uint8_t pending, status;
895 struct qbman_swp *swp;
896 const struct qbman_fd *fd;
897 struct qbman_pull_desc pulldesc;
898 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
899 struct dpaa2_dev_priv *priv = eth_data->dev_private;
901 if (unlikely(dpaa2_enable_err_queue))
902 dump_err_pkts(priv->rx_err_vq);
904 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
905 ret = dpaa2_affine_qbman_swp();
908 "Failed to allocate IO portal, tid: %d\n",
913 swp = DPAA2_PER_LCORE_PORTAL;
916 dq_storage = dpaa2_q->q_storage->dq_storage[0];
917 qbman_pull_desc_clear(&pulldesc);
918 qbman_pull_desc_set_fq(&pulldesc, fqid);
919 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
920 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
922 if (next_pull > dpaa2_dqrr_size) {
923 qbman_pull_desc_set_numframes(&pulldesc,
925 next_pull -= dpaa2_dqrr_size;
927 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
932 if (qbman_swp_pull(swp, &pulldesc)) {
934 "VDQ command is not issued.QBMAN is busy\n");
935 /* Portal was busy, try again */
941 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
942 /* Check if the previous issued command is completed. */
943 while (!qbman_check_command_complete(dq_storage))
949 /* Loop until the dq_storage is updated with
952 while (!qbman_check_new_result(dq_storage))
954 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
955 /* Check whether Last Pull command is Expired and
956 * setting Condition for Loop termination
958 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
960 /* Check for valid frame. */
961 status = qbman_result_DQ_flags(dq_storage);
962 if (unlikely((status &
963 QBMAN_DQ_STAT_VALIDFRAME) == 0))
966 fd = qbman_result_DQ_fd(dq_storage);
968 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
969 if (dpaa2_svr_family != SVR_LX2160A) {
970 const struct qbman_fd *next_fd =
971 qbman_result_DQ_fd(dq_storage + 1);
973 /* Prefetch Annotation address for the parse
976 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
977 DPAA2_GET_FD_ADDR(next_fd) +
978 DPAA2_FD_PTA_SIZE + 16)));
982 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
983 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
986 bufs[num_rx] = eth_fd_to_mbuf(fd,
989 if (eth_data->dev_conf.rxmode.offloads &
990 DEV_RX_OFFLOAD_VLAN_STRIP) {
991 rte_vlan_strip(bufs[num_rx]);
998 /* Last VDQ provided all packets and more packets are requested */
999 } while (next_pull && num_pulled == dpaa2_dqrr_size);
1001 dpaa2_q->rx_pkts += num_rx;
1006 uint16_t dpaa2_dev_tx_conf(void *queue)
1008 /* Function receive frames for a given device and VQ */
1009 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1010 struct qbman_result *dq_storage;
1011 uint32_t fqid = dpaa2_q->fqid;
1012 int ret, num_tx_conf = 0, num_pulled;
1013 uint8_t pending, status;
1014 struct qbman_swp *swp;
1015 const struct qbman_fd *fd, *next_fd;
1016 struct qbman_pull_desc pulldesc;
1017 struct qbman_release_desc releasedesc;
1020 #if defined(RTE_LIBRTE_IEEE1588)
1021 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1022 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1023 struct dpaa2_annot_hdr *annotation;
1026 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1027 ret = dpaa2_affine_qbman_swp();
1030 "Failed to allocate IO portal, tid: %d\n",
1035 swp = DPAA2_PER_LCORE_PORTAL;
1038 dq_storage = dpaa2_q->q_storage->dq_storage[0];
1039 qbman_pull_desc_clear(&pulldesc);
1040 qbman_pull_desc_set_fq(&pulldesc, fqid);
1041 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1042 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1044 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1047 if (qbman_swp_pull(swp, &pulldesc)) {
1048 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1050 /* Portal was busy, try again */
1056 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1057 /* Check if the previous issued command is completed. */
1058 while (!qbman_check_command_complete(dq_storage))
1064 /* Loop until the dq_storage is updated with
1065 * new token by QBMAN
1067 while (!qbman_check_new_result(dq_storage))
1069 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1070 /* Check whether Last Pull command is Expired and
1071 * setting Condition for Loop termination
1073 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1075 /* Check for valid frame. */
1076 status = qbman_result_DQ_flags(dq_storage);
1077 if (unlikely((status &
1078 QBMAN_DQ_STAT_VALIDFRAME) == 0))
1081 fd = qbman_result_DQ_fd(dq_storage);
1083 next_fd = qbman_result_DQ_fd(dq_storage + 1);
1084 /* Prefetch Annotation address for the parse results */
1085 rte_prefetch0((void *)(size_t)
1086 (DPAA2_GET_FD_ADDR(next_fd) +
1087 DPAA2_FD_PTA_SIZE + 16));
1089 bpid = DPAA2_GET_FD_BPID(fd);
1091 /* Create a release descriptor required for releasing
1092 * buffers into QBMAN
1094 qbman_release_desc_clear(&releasedesc);
1095 qbman_release_desc_set_bpid(&releasedesc, bpid);
1097 buf = DPAA2_GET_FD_ADDR(fd);
1098 /* feed them to bman */
1100 ret = qbman_swp_release(swp, &releasedesc,
1102 } while (ret == -EBUSY);
1107 #if defined(RTE_LIBRTE_IEEE1588)
1108 annotation = (struct dpaa2_annot_hdr *)((size_t)
1109 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1111 priv->tx_timestamp = annotation->word2;
1115 /* Last VDQ provided all packets and more packets are requested */
1116 } while (num_pulled == dpaa2_dqrr_size);
1118 dpaa2_q->rx_pkts += num_tx_conf;
1123 /* Configure the egress frame annotation for timestamp update */
1124 static void enable_tx_tstamp(struct qbman_fd *fd)
1126 struct dpaa2_faead *fd_faead;
1128 /* Set frame annotation status field as valid */
1129 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1131 /* Set frame annotation egress action descriptor as valid */
1132 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1134 /* Set Annotation Length as 128B */
1135 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1137 /* enable update of confirmation frame annotation */
1138 fd_faead = (struct dpaa2_faead *)((size_t)
1139 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1140 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1141 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1142 DPAA2_ANNOT_FAEAD_UPD;
1146 * Callback to handle sending packets through WRIOP based interface
1149 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1151 /* Function to transmit the frames to given device and VQ*/
1152 uint32_t loop, retry_count;
1154 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1155 struct rte_mbuf *mi;
1156 uint32_t frames_to_send;
1157 struct rte_mempool *mp;
1158 struct qbman_eq_desc eqdesc;
1159 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1160 struct qbman_swp *swp;
1161 uint16_t num_tx = 0;
1163 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1164 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1165 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1166 struct rte_mbuf **orig_bufs = bufs;
1168 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1169 ret = dpaa2_affine_qbman_swp();
1172 "Failed to allocate IO portal, tid: %d\n",
1177 swp = DPAA2_PER_LCORE_PORTAL;
1179 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1180 eth_data, dpaa2_q->fqid);
1182 #ifdef RTE_LIBRTE_IEEE1588
1183 /* IEEE1588 driver need pointer to tx confirmation queue
1184 * corresponding to last packet transmitted for reading
1187 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1188 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1191 /*Prepare enqueue descriptor*/
1192 qbman_eq_desc_clear(&eqdesc);
1193 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1194 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1196 /*Clear the unused FD fields before sending*/
1198 /*Check if the queue is congested*/
1200 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1202 /* Retry for some time before giving up */
1203 if (retry_count > CONG_RETRY_COUNT)
1207 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1208 dpaa2_eqcr_size : nb_pkts;
1210 for (loop = 0; loop < frames_to_send; loop++) {
1211 if (*dpaa2_seqn(*bufs)) {
1212 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1214 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1216 DPAA2_PER_LCORE_DQRR_SIZE--;
1217 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1218 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1221 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1223 /* Check the basic scenario and set
1224 * the FD appropriately here itself.
1226 if (likely(mp && mp->ops_index ==
1227 priv->bp_list->dpaa2_ops_index &&
1228 (*bufs)->nb_segs == 1 &&
1229 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1230 if (unlikely(((*bufs)->ol_flags
1231 & PKT_TX_VLAN_PKT) ||
1232 (eth_data->dev_conf.txmode.offloads
1233 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1234 ret = rte_vlan_insert(bufs);
1238 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1239 &fd_arr[loop], mempool_to_bpid(mp));
1241 #ifdef RTE_LIBRTE_IEEE1588
1242 enable_tx_tstamp(&fd_arr[loop]);
1247 mi = rte_mbuf_from_indirect(*bufs);
1251 if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1252 if (unlikely((*bufs)->nb_segs > 1)) {
1253 if (eth_mbuf_to_sg_fd(*bufs,
1258 eth_mbuf_to_fd(*bufs,
1262 #ifdef RTE_LIBRTE_IEEE1588
1263 enable_tx_tstamp(&fd_arr[loop]);
1268 /* Not a hw_pkt pool allocated frame */
1269 if (unlikely(!mp || !priv->bp_list)) {
1270 DPAA2_PMD_ERR("Err: No buffer pool attached");
1274 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1275 (eth_data->dev_conf.txmode.offloads
1276 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1277 int ret = rte_vlan_insert(bufs);
1281 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1282 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1283 /* alloc should be from the default buffer pool
1284 * attached to this interface
1286 bpid = priv->bp_list->buf_pool.bpid;
1288 if (unlikely((*bufs)->nb_segs > 1)) {
1289 DPAA2_PMD_ERR("S/G support not added"
1290 " for non hw offload buffer");
1293 if (eth_copy_mbuf_to_fd(*bufs,
1294 &fd_arr[loop], bpid)) {
1297 /* free the original packet */
1298 rte_pktmbuf_free(*bufs);
1300 bpid = mempool_to_bpid(mp);
1301 if (unlikely((*bufs)->nb_segs > 1)) {
1302 if (eth_mbuf_to_sg_fd(*bufs,
1307 eth_mbuf_to_fd(*bufs,
1308 &fd_arr[loop], bpid);
1311 #ifdef RTE_LIBRTE_IEEE1588
1312 enable_tx_tstamp(&fd_arr[loop]);
1319 while (loop < frames_to_send) {
1320 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1321 &fd_arr[loop], &flags[loop],
1322 frames_to_send - loop);
1323 if (unlikely(ret < 0)) {
1325 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1339 dpaa2_q->tx_pkts += num_tx;
1342 while (loop < num_tx) {
1343 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1344 rte_pktmbuf_free(*orig_bufs);
1352 /* send any already prepared fd */
1358 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1362 if (unlikely(ret < 0)) {
1364 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1374 dpaa2_q->tx_pkts += num_tx;
1377 while (loop < num_tx) {
1378 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1379 rte_pktmbuf_free(*orig_bufs);
1388 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1390 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1391 struct qbman_fd *fd;
1394 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1396 /* Setting port id does not matter as we are to free the mbuf */
1397 m = eth_fd_to_mbuf(fd, 0);
1398 rte_pktmbuf_free(m);
1402 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1404 struct qbman_eq_desc *eqdesc)
1406 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1407 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1408 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1409 struct eqresp_metadata *eqresp_meta;
1410 uint16_t orpid, seqnum;
1413 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1415 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1416 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1417 DPAA2_EQCR_OPRID_SHIFT;
1418 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1419 DPAA2_EQCR_SEQNUM_SHIFT;
1421 if (!priv->en_loose_ordered) {
1422 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1423 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1424 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1425 dpio_dev->eqresp_pi]), 1);
1426 qbman_eq_desc_set_token(eqdesc, 1);
1428 eqresp_meta = &dpio_dev->eqresp_meta[
1429 dpio_dev->eqresp_pi];
1430 eqresp_meta->dpaa2_q = dpaa2_q;
1431 eqresp_meta->mp = m->pool;
1433 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1434 dpio_dev->eqresp_pi++ :
1435 (dpio_dev->eqresp_pi = 0);
1437 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1440 dq_idx = *dpaa2_seqn(m) - 1;
1441 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1442 DPAA2_PER_LCORE_DQRR_SIZE--;
1443 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1445 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1448 /* Callback to handle sending ordered packets through WRIOP based interface */
1450 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1452 /* Function to transmit the frames to given device and VQ*/
1453 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1454 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1455 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1456 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1457 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1458 struct rte_mbuf *mi;
1459 struct rte_mempool *mp;
1460 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1461 struct qbman_swp *swp;
1462 uint32_t frames_to_send, num_free_eq_desc;
1463 uint32_t loop, retry_count;
1465 uint16_t num_tx = 0;
1468 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1469 ret = dpaa2_affine_qbman_swp();
1472 "Failed to allocate IO portal, tid: %d\n",
1477 swp = DPAA2_PER_LCORE_PORTAL;
1479 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1480 eth_data, dpaa2_q->fqid);
1482 /* This would also handle normal and atomic queues as any type
1483 * of packet can be enqueued when ordered queues are being used.
1486 /*Check if the queue is congested*/
1488 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1490 /* Retry for some time before giving up */
1491 if (retry_count > CONG_RETRY_COUNT)
1495 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1496 dpaa2_eqcr_size : nb_pkts;
1498 if (!priv->en_loose_ordered) {
1499 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1500 num_free_eq_desc = dpaa2_free_eq_descriptors();
1501 if (num_free_eq_desc < frames_to_send)
1502 frames_to_send = num_free_eq_desc;
1506 for (loop = 0; loop < frames_to_send; loop++) {
1507 /*Prepare enqueue descriptor*/
1508 qbman_eq_desc_clear(&eqdesc[loop]);
1510 if (*dpaa2_seqn(*bufs)) {
1511 /* Use only queue 0 for Tx in case of atomic/
1512 * ordered packets as packets can get unordered
1513 * when being tranmitted out from the interface
1515 dpaa2_set_enqueue_descriptor(order_sendq,
1519 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1520 DPAA2_EQ_RESP_ERR_FQ);
1521 qbman_eq_desc_set_fq(&eqdesc[loop],
1525 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1527 /* Check the basic scenario and set
1528 * the FD appropriately here itself.
1530 if (likely(mp && mp->ops_index ==
1531 priv->bp_list->dpaa2_ops_index &&
1532 (*bufs)->nb_segs == 1 &&
1533 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1534 if (unlikely((*bufs)->ol_flags
1535 & PKT_TX_VLAN_PKT)) {
1536 ret = rte_vlan_insert(bufs);
1540 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1542 mempool_to_bpid(mp));
1547 mi = rte_mbuf_from_indirect(*bufs);
1550 /* Not a hw_pkt pool allocated frame */
1551 if (unlikely(!mp || !priv->bp_list)) {
1552 DPAA2_PMD_ERR("Err: No buffer pool attached");
1556 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1557 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1558 /* alloc should be from the default buffer pool
1559 * attached to this interface
1561 bpid = priv->bp_list->buf_pool.bpid;
1563 if (unlikely((*bufs)->nb_segs > 1)) {
1565 "S/G not supp for non hw offload buffer");
1568 if (eth_copy_mbuf_to_fd(*bufs,
1569 &fd_arr[loop], bpid)) {
1572 /* free the original packet */
1573 rte_pktmbuf_free(*bufs);
1575 bpid = mempool_to_bpid(mp);
1576 if (unlikely((*bufs)->nb_segs > 1)) {
1577 if (eth_mbuf_to_sg_fd(*bufs,
1583 eth_mbuf_to_fd(*bufs,
1584 &fd_arr[loop], bpid);
1592 while (loop < frames_to_send) {
1593 ret = qbman_swp_enqueue_multiple_desc(swp,
1594 &eqdesc[loop], &fd_arr[loop],
1595 frames_to_send - loop);
1596 if (unlikely(ret < 0)) {
1598 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1612 dpaa2_q->tx_pkts += num_tx;
1616 /* send any already prepared fd */
1622 ret = qbman_swp_enqueue_multiple_desc(swp,
1623 &eqdesc[loop], &fd_arr[i], loop - i);
1624 if (unlikely(ret < 0)) {
1626 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1636 dpaa2_q->tx_pkts += num_tx;
1641 * Dummy DPDK callback for TX.
1643 * This function is used to temporarily replace the real callback during
1644 * unsafe control operations on the queue, or in case of error.
1647 * Generic pointer to TX queue structure.
1649 * Packets to transmit.
1651 * Number of packets in array.
1654 * Number of packets successfully transmitted (<= pkts_n).
1657 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1665 #if defined(RTE_TOOLCHAIN_GCC)
1666 #pragma GCC diagnostic push
1667 #pragma GCC diagnostic ignored "-Wcast-qual"
1668 #elif defined(RTE_TOOLCHAIN_CLANG)
1669 #pragma clang diagnostic push
1670 #pragma clang diagnostic ignored "-Wcast-qual"
1673 /* This function loopbacks all the received packets.*/
1675 dpaa2_dev_loopback_rx(void *queue,
1676 struct rte_mbuf **bufs __rte_unused,
1679 /* Function receive frames for a given device and VQ*/
1680 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1681 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1682 uint32_t fqid = dpaa2_q->fqid;
1683 int ret, num_rx = 0, num_tx = 0, pull_size;
1684 uint8_t pending, status;
1685 struct qbman_swp *swp;
1686 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1687 struct qbman_pull_desc pulldesc;
1688 struct qbman_eq_desc eqdesc;
1689 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1690 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1691 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1692 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1693 /* todo - currently we are using 1st TX queue only for loopback*/
1695 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1696 ret = dpaa2_affine_qbman_ethrx_swp();
1698 DPAA2_PMD_ERR("Failure in affining portal");
1702 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1703 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1704 if (unlikely(!q_storage->active_dqs)) {
1705 q_storage->toggle = 0;
1706 dq_storage = q_storage->dq_storage[q_storage->toggle];
1707 q_storage->last_num_pkts = pull_size;
1708 qbman_pull_desc_clear(&pulldesc);
1709 qbman_pull_desc_set_numframes(&pulldesc,
1710 q_storage->last_num_pkts);
1711 qbman_pull_desc_set_fq(&pulldesc, fqid);
1712 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1713 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1714 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1715 while (!qbman_check_command_complete(
1717 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1719 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1722 if (qbman_swp_pull(swp, &pulldesc)) {
1724 "VDQ command not issued.QBMAN busy\n");
1725 /* Portal was busy, try again */
1730 q_storage->active_dqs = dq_storage;
1731 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1732 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1736 dq_storage = q_storage->active_dqs;
1737 rte_prefetch0((void *)(size_t)(dq_storage));
1738 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1740 /* Prepare next pull descriptor. This will give space for the
1741 * prefething done on DQRR entries
1743 q_storage->toggle ^= 1;
1744 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1745 qbman_pull_desc_clear(&pulldesc);
1746 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1747 qbman_pull_desc_set_fq(&pulldesc, fqid);
1748 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1749 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1751 /*Prepare enqueue descriptor*/
1752 qbman_eq_desc_clear(&eqdesc);
1753 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1754 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1755 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1757 /* Check if the previous issued command is completed.
1758 * Also seems like the SWP is shared between the Ethernet Driver
1759 * and the SEC driver.
1761 while (!qbman_check_command_complete(dq_storage))
1763 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1764 clear_swp_active_dqs(q_storage->active_dpio_id);
1769 /* Loop until the dq_storage is updated with
1770 * new token by QBMAN
1772 while (!qbman_check_new_result(dq_storage))
1774 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1775 /* Check whether Last Pull command is Expired and
1776 * setting Condition for Loop termination
1778 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1780 /* Check for valid frame. */
1781 status = qbman_result_DQ_flags(dq_storage);
1782 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1785 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1791 while (num_tx < num_rx) {
1792 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1793 &fd[num_tx], 0, num_rx - num_tx);
1796 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1797 while (!qbman_check_command_complete(
1798 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1800 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1802 /* issue a volatile dequeue command for next pull */
1804 if (qbman_swp_pull(swp, &pulldesc)) {
1805 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1806 "QBMAN is busy (2)\n");
1811 q_storage->active_dqs = dq_storage1;
1812 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1813 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1815 dpaa2_q->rx_pkts += num_rx;
1816 dpaa2_q->tx_pkts += num_tx;
1820 #if defined(RTE_TOOLCHAIN_GCC)
1821 #pragma GCC diagnostic pop
1822 #elif defined(RTE_TOOLCHAIN_CLANG)
1823 #pragma clang diagnostic pop