1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2021 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 static inline rte_mbuf_timestamp_t *
35 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 return RTE_MBUF_DYNFIELD(mbuf,
38 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
41 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
42 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
43 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
44 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
45 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
46 DPAA2_SET_FD_FRC(_fd, 0); \
47 DPAA2_RESET_FD_CTRL(_fd); \
48 DPAA2_RESET_FD_FLC(_fd); \
51 static inline void __rte_hot
52 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
55 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
56 struct dpaa2_annot_hdr *annotation =
57 (struct dpaa2_annot_hdr *)hw_annot_addr;
59 m->packet_type = RTE_PTYPE_UNKNOWN;
61 case DPAA2_PKT_TYPE_ETHER:
62 m->packet_type = RTE_PTYPE_L2_ETHER;
64 case DPAA2_PKT_TYPE_IPV4:
65 m->packet_type = RTE_PTYPE_L2_ETHER |
68 case DPAA2_PKT_TYPE_IPV6:
69 m->packet_type = RTE_PTYPE_L2_ETHER |
72 case DPAA2_PKT_TYPE_IPV4_EXT:
73 m->packet_type = RTE_PTYPE_L2_ETHER |
74 RTE_PTYPE_L3_IPV4_EXT;
76 case DPAA2_PKT_TYPE_IPV6_EXT:
77 m->packet_type = RTE_PTYPE_L2_ETHER |
78 RTE_PTYPE_L3_IPV6_EXT;
80 case DPAA2_PKT_TYPE_IPV4_TCP:
81 m->packet_type = RTE_PTYPE_L2_ETHER |
82 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84 case DPAA2_PKT_TYPE_IPV6_TCP:
85 m->packet_type = RTE_PTYPE_L2_ETHER |
86 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88 case DPAA2_PKT_TYPE_IPV4_UDP:
89 m->packet_type = RTE_PTYPE_L2_ETHER |
90 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92 case DPAA2_PKT_TYPE_IPV6_UDP:
93 m->packet_type = RTE_PTYPE_L2_ETHER |
94 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96 case DPAA2_PKT_TYPE_IPV4_SCTP:
97 m->packet_type = RTE_PTYPE_L2_ETHER |
98 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100 case DPAA2_PKT_TYPE_IPV6_SCTP:
101 m->packet_type = RTE_PTYPE_L2_ETHER |
102 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104 case DPAA2_PKT_TYPE_IPV4_ICMP:
105 m->packet_type = RTE_PTYPE_L2_ETHER |
106 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108 case DPAA2_PKT_TYPE_IPV6_ICMP:
109 m->packet_type = RTE_PTYPE_L2_ETHER |
110 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
113 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115 m->hash.rss = fd->simple.flc_hi;
116 m->ol_flags |= PKT_RX_RSS_HASH;
118 if (dpaa2_enable_ts[m->port]) {
119 *dpaa2_timestamp_dynfield(m) = annotation->word2;
120 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
121 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
122 *dpaa2_timestamp_dynfield(m));
125 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
126 "ol_flags =0x%" PRIx64 "",
127 frc, m->packet_type, m->ol_flags);
130 static inline uint32_t __rte_hot
131 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
132 struct dpaa2_annot_hdr *annotation)
134 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
137 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
138 "(4)=0x%" PRIx64 "\t",
139 annotation->word3, annotation->word4);
141 #if defined(RTE_LIBRTE_IEEE1588)
142 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
143 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
146 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
147 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
148 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
149 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
150 mbuf->ol_flags |= PKT_RX_VLAN;
151 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
152 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
153 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
154 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
155 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
156 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
157 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
160 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
161 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
163 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
164 pkt_type |= RTE_PTYPE_L2_ETHER;
169 if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
171 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
173 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
174 L3_IPV4_N_PRESENT)) {
175 pkt_type |= RTE_PTYPE_L3_IPV4;
176 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
177 L3_IP_N_OPT_PRESENT))
178 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
180 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
181 L3_IPV6_N_PRESENT)) {
182 pkt_type |= RTE_PTYPE_L3_IPV6;
183 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
184 L3_IP_N_OPT_PRESENT))
185 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
190 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
191 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
192 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
193 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
195 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
196 L3_IP_1_MORE_FRAGMENT |
197 L3_IP_N_FIRST_FRAGMENT |
198 L3_IP_N_MORE_FRAGMENT)) {
199 pkt_type |= RTE_PTYPE_L4_FRAG;
202 pkt_type |= RTE_PTYPE_L4_NONFRAG;
205 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
206 pkt_type |= RTE_PTYPE_L4_UDP;
208 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
209 pkt_type |= RTE_PTYPE_L4_TCP;
211 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
212 pkt_type |= RTE_PTYPE_L4_SCTP;
214 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
215 pkt_type |= RTE_PTYPE_L4_ICMP;
217 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
218 pkt_type |= RTE_PTYPE_UNKNOWN;
224 static inline uint32_t __rte_hot
225 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
227 struct dpaa2_annot_hdr *annotation =
228 (struct dpaa2_annot_hdr *)hw_annot_addr;
230 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
233 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
234 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
235 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
236 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
238 if (dpaa2_enable_ts[mbuf->port]) {
239 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
240 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
241 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
242 *dpaa2_timestamp_dynfield(mbuf));
245 /* Check detailed parsing requirement */
246 if (annotation->word3 & 0x7FFFFC3FFFF)
247 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
249 /* Return some common types from parse processing */
250 switch (annotation->word4) {
252 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
254 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
255 case DPAA2_L3_IPv4_TCP:
256 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
258 case DPAA2_L3_IPv4_UDP:
259 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
261 case DPAA2_L3_IPv6_TCP:
262 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
264 case DPAA2_L3_IPv6_UDP:
265 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
271 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
274 static inline struct rte_mbuf *__rte_hot
275 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
278 struct qbman_sge *sgt, *sge;
279 size_t sg_addr, fd_addr;
282 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
284 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
285 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
287 /* Get Scatter gather table address */
288 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
291 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
293 /* First Scatter gather entry */
294 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
295 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
296 /* Prepare all the metadata for first segment */
297 first_seg->buf_addr = (uint8_t *)sg_addr;
298 first_seg->ol_flags = 0;
299 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
300 first_seg->data_len = sge->length & 0x1FFFF;
301 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
302 first_seg->nb_segs = 1;
303 first_seg->next = NULL;
304 first_seg->port = port_id;
305 if (dpaa2_svr_family == SVR_LX2160A)
306 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
308 first_seg->packet_type =
309 dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
311 rte_mbuf_refcnt_set(first_seg, 1);
313 while (!DPAA2_SG_IS_FINAL(sge)) {
315 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
316 DPAA2_GET_FLE_ADDR(sge));
317 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
318 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
319 next_seg->buf_addr = (uint8_t *)sg_addr;
320 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
321 next_seg->data_len = sge->length & 0x1FFFF;
322 first_seg->nb_segs += 1;
323 rte_mbuf_refcnt_set(next_seg, 1);
324 cur_seg->next = next_seg;
325 next_seg->next = NULL;
328 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
329 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
330 rte_mbuf_refcnt_set(temp, 1);
331 rte_pktmbuf_free_seg(temp);
333 return (void *)first_seg;
336 static inline struct rte_mbuf *__rte_hot
337 eth_fd_to_mbuf(const struct qbman_fd *fd,
340 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
341 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
342 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
343 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
345 /* need to repopulated some of the fields,
346 * as they may have changed in last transmission
350 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
351 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
352 mbuf->pkt_len = mbuf->data_len;
353 mbuf->port = port_id;
355 rte_mbuf_refcnt_set(mbuf, 1);
357 /* Parse the packet */
358 /* parse results for LX2 are there in FRC field of FD.
359 * For other DPAA2 platforms , parse results are after
360 * the private - sw annotation area
363 if (dpaa2_svr_family == SVR_LX2160A)
364 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
366 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
368 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
369 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
370 mbuf, mbuf->buf_addr, mbuf->data_off,
371 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
372 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
373 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
378 static int __rte_noinline __rte_hot
379 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
381 struct rte_mempool *mp, uint16_t bpid)
383 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
384 struct qbman_sge *sgt, *sge = NULL;
387 #ifdef RTE_LIBRTE_IEEE1588
388 /* annotation area for timestamp in first buffer */
391 if (RTE_MBUF_DIRECT(mbuf) &&
392 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
395 if (rte_mbuf_refcnt_read(temp) > 1) {
396 /* If refcnt > 1, invalid bpid is set to ensure
397 * buffer is not freed by HW
399 fd->simple.bpid_offset = 0;
400 DPAA2_SET_FD_IVP(fd);
401 rte_mbuf_refcnt_update(temp, -1);
403 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
405 DPAA2_SET_FD_OFFSET(fd, offset);
407 temp = rte_pktmbuf_alloc(mp);
409 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
412 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
413 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
415 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
416 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
417 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
418 DPAA2_RESET_FD_FRC(fd);
419 DPAA2_RESET_FD_CTRL(fd);
420 DPAA2_RESET_FD_FLC(fd);
421 /*Set Scatter gather table and Scatter gather entries*/
422 sgt = (struct qbman_sge *)(
423 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
424 + DPAA2_GET_FD_OFFSET(fd));
426 for (i = 0; i < mbuf->nb_segs; i++) {
428 /*Resetting the buffer pool id and offset field*/
429 sge->fin_bpid_offset = 0;
430 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
431 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
432 sge->length = cur_seg->data_len;
433 if (RTE_MBUF_DIRECT(cur_seg)) {
434 /* if we are using inline SGT in same buffers
435 * set the FLE FMT as Frame Data Section
437 if (temp == cur_seg) {
438 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
439 DPAA2_SET_FLE_IVP(sge);
441 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
442 /* If refcnt > 1, invalid bpid is set to ensure
443 * buffer is not freed by HW
445 DPAA2_SET_FLE_IVP(sge);
446 rte_mbuf_refcnt_update(cur_seg, -1);
448 DPAA2_SET_FLE_BPID(sge,
449 mempool_to_bpid(cur_seg->pool));
452 cur_seg = cur_seg->next;
453 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
454 DPAA2_SET_FLE_IVP(sge);
455 cur_seg = cur_seg->next;
457 /* Get owner MBUF from indirect buffer */
458 mi = rte_mbuf_from_indirect(cur_seg);
459 if (rte_mbuf_refcnt_read(mi) > 1) {
460 /* If refcnt > 1, invalid bpid is set to ensure
461 * owner buffer is not freed by HW
463 DPAA2_SET_FLE_IVP(sge);
465 DPAA2_SET_FLE_BPID(sge,
466 mempool_to_bpid(mi->pool));
467 rte_mbuf_refcnt_update(mi, 1);
470 cur_seg = cur_seg->next;
471 prev_seg->next = NULL;
472 rte_pktmbuf_free(prev_seg);
475 DPAA2_SG_SET_FINAL(sge, true);
480 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
481 struct qbman_fd *fd, uint16_t bpid) __rte_unused;
483 static void __rte_noinline __rte_hot
484 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
485 struct qbman_fd *fd, uint16_t bpid)
487 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
489 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
490 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
491 mbuf, mbuf->buf_addr, mbuf->data_off,
492 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
493 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
494 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
495 if (RTE_MBUF_DIRECT(mbuf)) {
496 if (rte_mbuf_refcnt_read(mbuf) > 1) {
497 DPAA2_SET_FD_IVP(fd);
498 rte_mbuf_refcnt_update(mbuf, -1);
500 } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
501 DPAA2_SET_FD_IVP(fd);
505 mi = rte_mbuf_from_indirect(mbuf);
506 if (rte_mbuf_refcnt_read(mi) > 1)
507 DPAA2_SET_FD_IVP(fd);
509 rte_mbuf_refcnt_update(mi, 1);
510 rte_pktmbuf_free(mbuf);
514 static inline int __rte_hot
515 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
516 struct qbman_fd *fd, uint16_t bpid)
521 if (rte_dpaa2_mbuf_alloc_bulk(
522 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
523 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
526 m = (struct rte_mbuf *)mb;
527 memcpy((char *)m->buf_addr + mbuf->data_off,
528 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
531 /* Copy required fields */
532 m->data_off = mbuf->data_off;
533 m->ol_flags = mbuf->ol_flags;
534 m->packet_type = mbuf->packet_type;
535 m->tx_offload = mbuf->tx_offload;
537 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
540 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
541 " meta: %d, off: %d, len: %d\n",
544 DPAA2_GET_FD_ADDR(fd),
545 DPAA2_GET_FD_BPID(fd),
546 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
547 DPAA2_GET_FD_OFFSET(fd),
548 DPAA2_GET_FD_LEN(fd));
553 /* This function assumes that caller will be keep the same value for nb_pkts
554 * across calls per queue, if that is not the case, better use non-prefetch
555 * version of rx call.
556 * It will return the packets as requested in previous call without honoring
557 * the current nb_pkts or bufs space.
560 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
562 /* Function receive frames for a given device and VQ*/
563 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
564 struct qbman_result *dq_storage, *dq_storage1 = NULL;
565 uint32_t fqid = dpaa2_q->fqid;
566 int ret, num_rx = 0, pull_size;
567 uint8_t pending, status;
568 struct qbman_swp *swp;
569 const struct qbman_fd *fd;
570 struct qbman_pull_desc pulldesc;
571 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
572 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
573 #if defined(RTE_LIBRTE_IEEE1588)
574 struct dpaa2_dev_priv *priv = eth_data->dev_private;
577 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
578 ret = dpaa2_affine_qbman_ethrx_swp();
580 DPAA2_PMD_ERR("Failure in affining portal");
585 if (unlikely(!rte_dpaa2_bpid_info &&
586 rte_eal_process_type() == RTE_PROC_SECONDARY))
587 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
589 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
590 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
591 if (unlikely(!q_storage->active_dqs)) {
592 q_storage->toggle = 0;
593 dq_storage = q_storage->dq_storage[q_storage->toggle];
594 q_storage->last_num_pkts = pull_size;
595 qbman_pull_desc_clear(&pulldesc);
596 qbman_pull_desc_set_numframes(&pulldesc,
597 q_storage->last_num_pkts);
598 qbman_pull_desc_set_fq(&pulldesc, fqid);
599 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
600 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
601 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
602 while (!qbman_check_command_complete(
604 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
606 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
609 if (qbman_swp_pull(swp, &pulldesc)) {
610 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
611 " QBMAN is busy (1)\n");
612 /* Portal was busy, try again */
617 q_storage->active_dqs = dq_storage;
618 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
619 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
623 dq_storage = q_storage->active_dqs;
624 rte_prefetch0((void *)(size_t)(dq_storage));
625 rte_prefetch0((void *)(size_t)(dq_storage + 1));
627 /* Prepare next pull descriptor. This will give space for the
628 * prefething done on DQRR entries
630 q_storage->toggle ^= 1;
631 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
632 qbman_pull_desc_clear(&pulldesc);
633 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
634 qbman_pull_desc_set_fq(&pulldesc, fqid);
635 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
636 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
638 /* Check if the previous issued command is completed.
639 * Also seems like the SWP is shared between the Ethernet Driver
640 * and the SEC driver.
642 while (!qbman_check_command_complete(dq_storage))
644 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
645 clear_swp_active_dqs(q_storage->active_dpio_id);
650 /* Loop until the dq_storage is updated with
653 while (!qbman_check_new_result(dq_storage))
655 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
656 /* Check whether Last Pull command is Expired and
657 * setting Condition for Loop termination
659 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
661 /* Check for valid frame. */
662 status = qbman_result_DQ_flags(dq_storage);
663 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
666 fd = qbman_result_DQ_fd(dq_storage);
668 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
669 if (dpaa2_svr_family != SVR_LX2160A) {
670 const struct qbman_fd *next_fd =
671 qbman_result_DQ_fd(dq_storage + 1);
672 /* Prefetch Annotation address for the parse results */
673 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
674 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
678 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
679 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
681 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
682 #if defined(RTE_LIBRTE_IEEE1588)
683 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
686 if (eth_data->dev_conf.rxmode.offloads &
687 DEV_RX_OFFLOAD_VLAN_STRIP)
688 rte_vlan_strip(bufs[num_rx]);
694 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
695 while (!qbman_check_command_complete(
696 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
698 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
700 /* issue a volatile dequeue command for next pull */
702 if (qbman_swp_pull(swp, &pulldesc)) {
703 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
704 "QBMAN is busy (2)\n");
709 q_storage->active_dqs = dq_storage1;
710 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
711 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
713 dpaa2_q->rx_pkts += num_rx;
719 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
720 const struct qbman_fd *fd,
721 const struct qbman_result *dq,
722 struct dpaa2_queue *rxq,
723 struct rte_event *ev)
725 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
726 DPAA2_FD_PTA_SIZE + 16));
728 ev->flow_id = rxq->ev.flow_id;
729 ev->sub_event_type = rxq->ev.sub_event_type;
730 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
731 ev->op = RTE_EVENT_OP_NEW;
732 ev->sched_type = rxq->ev.sched_type;
733 ev->queue_id = rxq->ev.queue_id;
734 ev->priority = rxq->ev.priority;
736 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
738 qbman_swp_dqrr_consume(swp, dq);
742 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
743 const struct qbman_fd *fd,
744 const struct qbman_result *dq,
745 struct dpaa2_queue *rxq,
746 struct rte_event *ev)
750 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
751 DPAA2_FD_PTA_SIZE + 16));
753 ev->flow_id = rxq->ev.flow_id;
754 ev->sub_event_type = rxq->ev.sub_event_type;
755 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
756 ev->op = RTE_EVENT_OP_NEW;
757 ev->sched_type = rxq->ev.sched_type;
758 ev->queue_id = rxq->ev.queue_id;
759 ev->priority = rxq->ev.priority;
761 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
763 dqrr_index = qbman_get_dqrr_idx(dq);
764 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
765 DPAA2_PER_LCORE_DQRR_SIZE++;
766 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
767 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
771 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
772 const struct qbman_fd *fd,
773 const struct qbman_result *dq,
774 struct dpaa2_queue *rxq,
775 struct rte_event *ev)
777 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
778 DPAA2_FD_PTA_SIZE + 16));
780 ev->flow_id = rxq->ev.flow_id;
781 ev->sub_event_type = rxq->ev.sub_event_type;
782 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
783 ev->op = RTE_EVENT_OP_NEW;
784 ev->sched_type = rxq->ev.sched_type;
785 ev->queue_id = rxq->ev.queue_id;
786 ev->priority = rxq->ev.priority;
788 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
790 *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
791 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
792 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
794 qbman_swp_dqrr_consume(swp, dq);
798 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
800 /* Function receive frames for a given device and VQ */
801 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
802 struct qbman_result *dq_storage;
803 uint32_t fqid = dpaa2_q->fqid;
804 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
805 uint8_t pending, status;
806 struct qbman_swp *swp;
807 const struct qbman_fd *fd;
808 struct qbman_pull_desc pulldesc;
809 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
811 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
812 ret = dpaa2_affine_qbman_swp();
815 "Failed to allocate IO portal, tid: %d\n",
820 swp = DPAA2_PER_LCORE_PORTAL;
823 dq_storage = dpaa2_q->q_storage->dq_storage[0];
824 qbman_pull_desc_clear(&pulldesc);
825 qbman_pull_desc_set_fq(&pulldesc, fqid);
826 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
827 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
829 if (next_pull > dpaa2_dqrr_size) {
830 qbman_pull_desc_set_numframes(&pulldesc,
832 next_pull -= dpaa2_dqrr_size;
834 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
839 if (qbman_swp_pull(swp, &pulldesc)) {
841 "VDQ command is not issued.QBMAN is busy\n");
842 /* Portal was busy, try again */
848 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
849 /* Check if the previous issued command is completed. */
850 while (!qbman_check_command_complete(dq_storage))
856 /* Loop until the dq_storage is updated with
859 while (!qbman_check_new_result(dq_storage))
861 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
862 /* Check whether Last Pull command is Expired and
863 * setting Condition for Loop termination
865 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
867 /* Check for valid frame. */
868 status = qbman_result_DQ_flags(dq_storage);
869 if (unlikely((status &
870 QBMAN_DQ_STAT_VALIDFRAME) == 0))
873 fd = qbman_result_DQ_fd(dq_storage);
875 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
876 if (dpaa2_svr_family != SVR_LX2160A) {
877 const struct qbman_fd *next_fd =
878 qbman_result_DQ_fd(dq_storage + 1);
880 /* Prefetch Annotation address for the parse
883 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
884 DPAA2_GET_FD_ADDR(next_fd) +
885 DPAA2_FD_PTA_SIZE + 16)));
889 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
890 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
893 bufs[num_rx] = eth_fd_to_mbuf(fd,
896 if (eth_data->dev_conf.rxmode.offloads &
897 DEV_RX_OFFLOAD_VLAN_STRIP) {
898 rte_vlan_strip(bufs[num_rx]);
905 /* Last VDQ provided all packets and more packets are requested */
906 } while (next_pull && num_pulled == dpaa2_dqrr_size);
908 dpaa2_q->rx_pkts += num_rx;
913 uint16_t dpaa2_dev_tx_conf(void *queue)
915 /* Function receive frames for a given device and VQ */
916 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
917 struct qbman_result *dq_storage;
918 uint32_t fqid = dpaa2_q->fqid;
919 int ret, num_tx_conf = 0, num_pulled;
920 uint8_t pending, status;
921 struct qbman_swp *swp;
922 const struct qbman_fd *fd, *next_fd;
923 struct qbman_pull_desc pulldesc;
924 struct qbman_release_desc releasedesc;
927 #if defined(RTE_LIBRTE_IEEE1588)
928 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
929 struct dpaa2_dev_priv *priv = eth_data->dev_private;
930 struct dpaa2_annot_hdr *annotation;
933 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
934 ret = dpaa2_affine_qbman_swp();
937 "Failed to allocate IO portal, tid: %d\n",
942 swp = DPAA2_PER_LCORE_PORTAL;
945 dq_storage = dpaa2_q->q_storage->dq_storage[0];
946 qbman_pull_desc_clear(&pulldesc);
947 qbman_pull_desc_set_fq(&pulldesc, fqid);
948 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
949 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
951 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
954 if (qbman_swp_pull(swp, &pulldesc)) {
955 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
957 /* Portal was busy, try again */
963 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
964 /* Check if the previous issued command is completed. */
965 while (!qbman_check_command_complete(dq_storage))
971 /* Loop until the dq_storage is updated with
974 while (!qbman_check_new_result(dq_storage))
976 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
977 /* Check whether Last Pull command is Expired and
978 * setting Condition for Loop termination
980 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
982 /* Check for valid frame. */
983 status = qbman_result_DQ_flags(dq_storage);
984 if (unlikely((status &
985 QBMAN_DQ_STAT_VALIDFRAME) == 0))
988 fd = qbman_result_DQ_fd(dq_storage);
990 next_fd = qbman_result_DQ_fd(dq_storage + 1);
991 /* Prefetch Annotation address for the parse results */
992 rte_prefetch0((void *)(size_t)
993 (DPAA2_GET_FD_ADDR(next_fd) +
994 DPAA2_FD_PTA_SIZE + 16));
996 bpid = DPAA2_GET_FD_BPID(fd);
998 /* Create a release descriptor required for releasing
1001 qbman_release_desc_clear(&releasedesc);
1002 qbman_release_desc_set_bpid(&releasedesc, bpid);
1004 buf = DPAA2_GET_FD_ADDR(fd);
1005 /* feed them to bman */
1007 ret = qbman_swp_release(swp, &releasedesc,
1009 } while (ret == -EBUSY);
1014 #if defined(RTE_LIBRTE_IEEE1588)
1015 annotation = (struct dpaa2_annot_hdr *)((size_t)
1016 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1018 priv->tx_timestamp = annotation->word2;
1022 /* Last VDQ provided all packets and more packets are requested */
1023 } while (num_pulled == dpaa2_dqrr_size);
1025 dpaa2_q->rx_pkts += num_tx_conf;
1030 /* Configure the egress frame annotation for timestamp update */
1031 static void enable_tx_tstamp(struct qbman_fd *fd)
1033 struct dpaa2_faead *fd_faead;
1035 /* Set frame annotation status field as valid */
1036 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1038 /* Set frame annotation egress action descriptor as valid */
1039 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1041 /* Set Annotation Length as 128B */
1042 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1044 /* enable update of confirmation frame annotation */
1045 fd_faead = (struct dpaa2_faead *)((size_t)
1046 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1047 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1048 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1049 DPAA2_ANNOT_FAEAD_UPD;
1053 * Callback to handle sending packets through WRIOP based interface
1056 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1058 /* Function to transmit the frames to given device and VQ*/
1059 uint32_t loop, retry_count;
1061 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1062 struct rte_mbuf *mi;
1063 uint32_t frames_to_send;
1064 struct rte_mempool *mp;
1065 struct qbman_eq_desc eqdesc;
1066 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1067 struct qbman_swp *swp;
1068 uint16_t num_tx = 0;
1070 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1071 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1072 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1073 struct rte_mbuf **orig_bufs = bufs;
1075 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1076 ret = dpaa2_affine_qbman_swp();
1079 "Failed to allocate IO portal, tid: %d\n",
1084 swp = DPAA2_PER_LCORE_PORTAL;
1086 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1087 eth_data, dpaa2_q->fqid);
1089 #ifdef RTE_LIBRTE_IEEE1588
1090 /* IEEE1588 driver need pointer to tx confirmation queue
1091 * corresponding to last packet transmitted for reading
1094 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1095 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1098 /*Prepare enqueue descriptor*/
1099 qbman_eq_desc_clear(&eqdesc);
1100 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1101 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1103 /*Clear the unused FD fields before sending*/
1105 /*Check if the queue is congested*/
1107 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1109 /* Retry for some time before giving up */
1110 if (retry_count > CONG_RETRY_COUNT)
1114 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1115 dpaa2_eqcr_size : nb_pkts;
1117 for (loop = 0; loop < frames_to_send; loop++) {
1118 if (*dpaa2_seqn(*bufs)) {
1119 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1121 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1123 DPAA2_PER_LCORE_DQRR_SIZE--;
1124 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1125 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1128 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1130 /* Check the basic scenario and set
1131 * the FD appropriately here itself.
1133 if (likely(mp && mp->ops_index ==
1134 priv->bp_list->dpaa2_ops_index &&
1135 (*bufs)->nb_segs == 1 &&
1136 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1137 if (unlikely(((*bufs)->ol_flags
1138 & PKT_TX_VLAN_PKT) ||
1139 (eth_data->dev_conf.txmode.offloads
1140 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1141 ret = rte_vlan_insert(bufs);
1145 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1146 &fd_arr[loop], mempool_to_bpid(mp));
1148 #ifdef RTE_LIBRTE_IEEE1588
1149 enable_tx_tstamp(&fd_arr[loop]);
1154 mi = rte_mbuf_from_indirect(*bufs);
1158 if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1159 if (unlikely((*bufs)->nb_segs > 1)) {
1160 if (eth_mbuf_to_sg_fd(*bufs,
1165 eth_mbuf_to_fd(*bufs,
1169 #ifdef RTE_LIBRTE_IEEE1588
1170 enable_tx_tstamp(&fd_arr[loop]);
1175 /* Not a hw_pkt pool allocated frame */
1176 if (unlikely(!mp || !priv->bp_list)) {
1177 DPAA2_PMD_ERR("Err: No buffer pool attached");
1181 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1182 (eth_data->dev_conf.txmode.offloads
1183 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1184 int ret = rte_vlan_insert(bufs);
1188 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1189 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1190 /* alloc should be from the default buffer pool
1191 * attached to this interface
1193 bpid = priv->bp_list->buf_pool.bpid;
1195 if (unlikely((*bufs)->nb_segs > 1)) {
1196 DPAA2_PMD_ERR("S/G support not added"
1197 " for non hw offload buffer");
1200 if (eth_copy_mbuf_to_fd(*bufs,
1201 &fd_arr[loop], bpid)) {
1204 /* free the original packet */
1205 rte_pktmbuf_free(*bufs);
1207 bpid = mempool_to_bpid(mp);
1208 if (unlikely((*bufs)->nb_segs > 1)) {
1209 if (eth_mbuf_to_sg_fd(*bufs,
1214 eth_mbuf_to_fd(*bufs,
1215 &fd_arr[loop], bpid);
1218 #ifdef RTE_LIBRTE_IEEE1588
1219 enable_tx_tstamp(&fd_arr[loop]);
1226 while (loop < frames_to_send) {
1227 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1228 &fd_arr[loop], &flags[loop],
1229 frames_to_send - loop);
1230 if (unlikely(ret < 0)) {
1232 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1246 dpaa2_q->tx_pkts += num_tx;
1249 while (loop < num_tx) {
1250 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1251 rte_pktmbuf_free(*orig_bufs);
1259 /* send any already prepared fd */
1265 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1269 if (unlikely(ret < 0)) {
1271 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1281 dpaa2_q->tx_pkts += num_tx;
1284 while (loop < num_tx) {
1285 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1286 rte_pktmbuf_free(*orig_bufs);
1295 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1297 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1298 struct qbman_fd *fd;
1301 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1303 /* Setting port id does not matter as we are to free the mbuf */
1304 m = eth_fd_to_mbuf(fd, 0);
1305 rte_pktmbuf_free(m);
1309 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1311 struct qbman_eq_desc *eqdesc)
1313 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1314 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1315 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1316 struct eqresp_metadata *eqresp_meta;
1317 uint16_t orpid, seqnum;
1320 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1322 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1323 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1324 DPAA2_EQCR_OPRID_SHIFT;
1325 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1326 DPAA2_EQCR_SEQNUM_SHIFT;
1328 if (!priv->en_loose_ordered) {
1329 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1330 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1331 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1332 dpio_dev->eqresp_pi]), 1);
1333 qbman_eq_desc_set_token(eqdesc, 1);
1335 eqresp_meta = &dpio_dev->eqresp_meta[
1336 dpio_dev->eqresp_pi];
1337 eqresp_meta->dpaa2_q = dpaa2_q;
1338 eqresp_meta->mp = m->pool;
1340 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1341 dpio_dev->eqresp_pi++ :
1342 (dpio_dev->eqresp_pi = 0);
1344 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1347 dq_idx = *dpaa2_seqn(m) - 1;
1348 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1349 DPAA2_PER_LCORE_DQRR_SIZE--;
1350 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1352 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1355 /* Callback to handle sending ordered packets through WRIOP based interface */
1357 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1359 /* Function to transmit the frames to given device and VQ*/
1360 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1361 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1362 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1363 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1364 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1365 struct rte_mbuf *mi;
1366 struct rte_mempool *mp;
1367 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1368 struct qbman_swp *swp;
1369 uint32_t frames_to_send, num_free_eq_desc;
1370 uint32_t loop, retry_count;
1372 uint16_t num_tx = 0;
1375 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1376 ret = dpaa2_affine_qbman_swp();
1379 "Failed to allocate IO portal, tid: %d\n",
1384 swp = DPAA2_PER_LCORE_PORTAL;
1386 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1387 eth_data, dpaa2_q->fqid);
1389 /* This would also handle normal and atomic queues as any type
1390 * of packet can be enqueued when ordered queues are being used.
1393 /*Check if the queue is congested*/
1395 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1397 /* Retry for some time before giving up */
1398 if (retry_count > CONG_RETRY_COUNT)
1402 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1403 dpaa2_eqcr_size : nb_pkts;
1405 if (!priv->en_loose_ordered) {
1406 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1407 num_free_eq_desc = dpaa2_free_eq_descriptors();
1408 if (num_free_eq_desc < frames_to_send)
1409 frames_to_send = num_free_eq_desc;
1413 for (loop = 0; loop < frames_to_send; loop++) {
1414 /*Prepare enqueue descriptor*/
1415 qbman_eq_desc_clear(&eqdesc[loop]);
1417 if (*dpaa2_seqn(*bufs)) {
1418 /* Use only queue 0 for Tx in case of atomic/
1419 * ordered packets as packets can get unordered
1420 * when being tranmitted out from the interface
1422 dpaa2_set_enqueue_descriptor(order_sendq,
1426 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1427 DPAA2_EQ_RESP_ERR_FQ);
1428 qbman_eq_desc_set_fq(&eqdesc[loop],
1432 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1434 /* Check the basic scenario and set
1435 * the FD appropriately here itself.
1437 if (likely(mp && mp->ops_index ==
1438 priv->bp_list->dpaa2_ops_index &&
1439 (*bufs)->nb_segs == 1 &&
1440 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1441 if (unlikely((*bufs)->ol_flags
1442 & PKT_TX_VLAN_PKT)) {
1443 ret = rte_vlan_insert(bufs);
1447 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1449 mempool_to_bpid(mp));
1454 mi = rte_mbuf_from_indirect(*bufs);
1457 /* Not a hw_pkt pool allocated frame */
1458 if (unlikely(!mp || !priv->bp_list)) {
1459 DPAA2_PMD_ERR("Err: No buffer pool attached");
1463 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1464 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1465 /* alloc should be from the default buffer pool
1466 * attached to this interface
1468 bpid = priv->bp_list->buf_pool.bpid;
1470 if (unlikely((*bufs)->nb_segs > 1)) {
1472 "S/G not supp for non hw offload buffer");
1475 if (eth_copy_mbuf_to_fd(*bufs,
1476 &fd_arr[loop], bpid)) {
1479 /* free the original packet */
1480 rte_pktmbuf_free(*bufs);
1482 bpid = mempool_to_bpid(mp);
1483 if (unlikely((*bufs)->nb_segs > 1)) {
1484 if (eth_mbuf_to_sg_fd(*bufs,
1490 eth_mbuf_to_fd(*bufs,
1491 &fd_arr[loop], bpid);
1499 while (loop < frames_to_send) {
1500 ret = qbman_swp_enqueue_multiple_desc(swp,
1501 &eqdesc[loop], &fd_arr[loop],
1502 frames_to_send - loop);
1503 if (unlikely(ret < 0)) {
1505 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1519 dpaa2_q->tx_pkts += num_tx;
1523 /* send any already prepared fd */
1529 ret = qbman_swp_enqueue_multiple_desc(swp,
1530 &eqdesc[loop], &fd_arr[i], loop - i);
1531 if (unlikely(ret < 0)) {
1533 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1543 dpaa2_q->tx_pkts += num_tx;
1548 * Dummy DPDK callback for TX.
1550 * This function is used to temporarily replace the real callback during
1551 * unsafe control operations on the queue, or in case of error.
1554 * Generic pointer to TX queue structure.
1556 * Packets to transmit.
1558 * Number of packets in array.
1561 * Number of packets successfully transmitted (<= pkts_n).
1564 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1572 #if defined(RTE_TOOLCHAIN_GCC)
1573 #pragma GCC diagnostic push
1574 #pragma GCC diagnostic ignored "-Wcast-qual"
1575 #elif defined(RTE_TOOLCHAIN_CLANG)
1576 #pragma clang diagnostic push
1577 #pragma clang diagnostic ignored "-Wcast-qual"
1580 /* This function loopbacks all the received packets.*/
1582 dpaa2_dev_loopback_rx(void *queue,
1583 struct rte_mbuf **bufs __rte_unused,
1586 /* Function receive frames for a given device and VQ*/
1587 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1588 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1589 uint32_t fqid = dpaa2_q->fqid;
1590 int ret, num_rx = 0, num_tx = 0, pull_size;
1591 uint8_t pending, status;
1592 struct qbman_swp *swp;
1593 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1594 struct qbman_pull_desc pulldesc;
1595 struct qbman_eq_desc eqdesc;
1596 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1597 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1598 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1599 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1600 /* todo - currently we are using 1st TX queue only for loopback*/
1602 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1603 ret = dpaa2_affine_qbman_ethrx_swp();
1605 DPAA2_PMD_ERR("Failure in affining portal");
1609 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1610 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1611 if (unlikely(!q_storage->active_dqs)) {
1612 q_storage->toggle = 0;
1613 dq_storage = q_storage->dq_storage[q_storage->toggle];
1614 q_storage->last_num_pkts = pull_size;
1615 qbman_pull_desc_clear(&pulldesc);
1616 qbman_pull_desc_set_numframes(&pulldesc,
1617 q_storage->last_num_pkts);
1618 qbman_pull_desc_set_fq(&pulldesc, fqid);
1619 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1620 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1621 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1622 while (!qbman_check_command_complete(
1624 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1626 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1629 if (qbman_swp_pull(swp, &pulldesc)) {
1631 "VDQ command not issued.QBMAN busy\n");
1632 /* Portal was busy, try again */
1637 q_storage->active_dqs = dq_storage;
1638 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1639 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1643 dq_storage = q_storage->active_dqs;
1644 rte_prefetch0((void *)(size_t)(dq_storage));
1645 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1647 /* Prepare next pull descriptor. This will give space for the
1648 * prefething done on DQRR entries
1650 q_storage->toggle ^= 1;
1651 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1652 qbman_pull_desc_clear(&pulldesc);
1653 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1654 qbman_pull_desc_set_fq(&pulldesc, fqid);
1655 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1656 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1658 /*Prepare enqueue descriptor*/
1659 qbman_eq_desc_clear(&eqdesc);
1660 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1661 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1662 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1664 /* Check if the previous issued command is completed.
1665 * Also seems like the SWP is shared between the Ethernet Driver
1666 * and the SEC driver.
1668 while (!qbman_check_command_complete(dq_storage))
1670 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1671 clear_swp_active_dqs(q_storage->active_dpio_id);
1676 /* Loop until the dq_storage is updated with
1677 * new token by QBMAN
1679 while (!qbman_check_new_result(dq_storage))
1681 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1682 /* Check whether Last Pull command is Expired and
1683 * setting Condition for Loop termination
1685 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1687 /* Check for valid frame. */
1688 status = qbman_result_DQ_flags(dq_storage);
1689 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1692 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1698 while (num_tx < num_rx) {
1699 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1700 &fd[num_tx], 0, num_rx - num_tx);
1703 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1704 while (!qbman_check_command_complete(
1705 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1707 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1709 /* issue a volatile dequeue command for next pull */
1711 if (qbman_swp_pull(swp, &pulldesc)) {
1712 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1713 "QBMAN is busy (2)\n");
1718 q_storage->active_dqs = dq_storage1;
1719 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1720 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1722 dpaa2_q->rx_pkts += num_rx;
1723 dpaa2_q->tx_pkts += num_tx;
1727 #if defined(RTE_TOOLCHAIN_GCC)
1728 #pragma GCC diagnostic pop
1729 #elif defined(RTE_TOOLCHAIN_CLANG)
1730 #pragma clang diagnostic pop