1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2020 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 static inline rte_mbuf_timestamp_t *
35 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 return RTE_MBUF_DYNFIELD(mbuf,
38 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
41 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
42 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
43 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
44 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
45 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
46 DPAA2_SET_FD_FRC(_fd, 0); \
47 DPAA2_RESET_FD_CTRL(_fd); \
48 DPAA2_RESET_FD_FLC(_fd); \
51 static inline void __rte_hot
52 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
55 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
56 struct dpaa2_annot_hdr *annotation =
57 (struct dpaa2_annot_hdr *)hw_annot_addr;
59 m->packet_type = RTE_PTYPE_UNKNOWN;
61 case DPAA2_PKT_TYPE_ETHER:
62 m->packet_type = RTE_PTYPE_L2_ETHER;
64 case DPAA2_PKT_TYPE_IPV4:
65 m->packet_type = RTE_PTYPE_L2_ETHER |
68 case DPAA2_PKT_TYPE_IPV6:
69 m->packet_type = RTE_PTYPE_L2_ETHER |
72 case DPAA2_PKT_TYPE_IPV4_EXT:
73 m->packet_type = RTE_PTYPE_L2_ETHER |
74 RTE_PTYPE_L3_IPV4_EXT;
76 case DPAA2_PKT_TYPE_IPV6_EXT:
77 m->packet_type = RTE_PTYPE_L2_ETHER |
78 RTE_PTYPE_L3_IPV6_EXT;
80 case DPAA2_PKT_TYPE_IPV4_TCP:
81 m->packet_type = RTE_PTYPE_L2_ETHER |
82 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84 case DPAA2_PKT_TYPE_IPV6_TCP:
85 m->packet_type = RTE_PTYPE_L2_ETHER |
86 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88 case DPAA2_PKT_TYPE_IPV4_UDP:
89 m->packet_type = RTE_PTYPE_L2_ETHER |
90 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92 case DPAA2_PKT_TYPE_IPV6_UDP:
93 m->packet_type = RTE_PTYPE_L2_ETHER |
94 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96 case DPAA2_PKT_TYPE_IPV4_SCTP:
97 m->packet_type = RTE_PTYPE_L2_ETHER |
98 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100 case DPAA2_PKT_TYPE_IPV6_SCTP:
101 m->packet_type = RTE_PTYPE_L2_ETHER |
102 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104 case DPAA2_PKT_TYPE_IPV4_ICMP:
105 m->packet_type = RTE_PTYPE_L2_ETHER |
106 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108 case DPAA2_PKT_TYPE_IPV6_ICMP:
109 m->packet_type = RTE_PTYPE_L2_ETHER |
110 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
113 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115 m->hash.rss = fd->simple.flc_hi;
116 m->ol_flags |= PKT_RX_RSS_HASH;
118 if (dpaa2_enable_ts[m->port]) {
119 *dpaa2_timestamp_dynfield(m) = annotation->word2;
120 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
121 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
122 *dpaa2_timestamp_dynfield(m));
125 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
126 "ol_flags =0x%" PRIx64 "",
127 frc, m->packet_type, m->ol_flags);
130 static inline uint32_t __rte_hot
131 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
132 struct dpaa2_annot_hdr *annotation)
134 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
137 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
138 "(4)=0x%" PRIx64 "\t",
139 annotation->word3, annotation->word4);
141 #if defined(RTE_LIBRTE_IEEE1588)
142 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
143 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
146 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
147 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
148 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
149 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
150 mbuf->ol_flags |= PKT_RX_VLAN;
151 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
152 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
153 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
154 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
155 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
156 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
157 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
160 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
161 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
163 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
164 pkt_type |= RTE_PTYPE_L2_ETHER;
169 if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
171 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
173 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
174 L3_IPV4_N_PRESENT)) {
175 pkt_type |= RTE_PTYPE_L3_IPV4;
176 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
177 L3_IP_N_OPT_PRESENT))
178 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
180 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
181 L3_IPV6_N_PRESENT)) {
182 pkt_type |= RTE_PTYPE_L3_IPV6;
183 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
184 L3_IP_N_OPT_PRESENT))
185 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
190 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
191 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
192 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
193 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
195 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
196 L3_IP_1_MORE_FRAGMENT |
197 L3_IP_N_FIRST_FRAGMENT |
198 L3_IP_N_MORE_FRAGMENT)) {
199 pkt_type |= RTE_PTYPE_L4_FRAG;
202 pkt_type |= RTE_PTYPE_L4_NONFRAG;
205 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
206 pkt_type |= RTE_PTYPE_L4_UDP;
208 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
209 pkt_type |= RTE_PTYPE_L4_TCP;
211 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
212 pkt_type |= RTE_PTYPE_L4_SCTP;
214 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
215 pkt_type |= RTE_PTYPE_L4_ICMP;
217 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
218 pkt_type |= RTE_PTYPE_UNKNOWN;
224 static inline uint32_t __rte_hot
225 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
227 struct dpaa2_annot_hdr *annotation =
228 (struct dpaa2_annot_hdr *)hw_annot_addr;
230 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
233 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
234 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
235 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
236 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
238 if (dpaa2_enable_ts[mbuf->port]) {
239 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
240 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
241 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
242 *dpaa2_timestamp_dynfield(mbuf));
245 /* Check detailed parsing requirement */
246 if (annotation->word3 & 0x7FFFFC3FFFF)
247 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
249 /* Return some common types from parse processing */
250 switch (annotation->word4) {
252 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
254 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
255 case DPAA2_L3_IPv4_TCP:
256 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
258 case DPAA2_L3_IPv4_UDP:
259 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
261 case DPAA2_L3_IPv6_TCP:
262 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
264 case DPAA2_L3_IPv6_UDP:
265 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
271 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
274 static inline struct rte_mbuf *__rte_hot
275 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
278 struct qbman_sge *sgt, *sge;
279 size_t sg_addr, fd_addr;
282 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
284 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
285 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
287 /* Get Scatter gather table address */
288 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
291 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
293 /* First Scatter gather entry */
294 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
295 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
296 /* Prepare all the metadata for first segment */
297 first_seg->buf_addr = (uint8_t *)sg_addr;
298 first_seg->ol_flags = 0;
299 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
300 first_seg->data_len = sge->length & 0x1FFFF;
301 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
302 first_seg->nb_segs = 1;
303 first_seg->next = NULL;
304 first_seg->port = port_id;
305 if (dpaa2_svr_family == SVR_LX2160A)
306 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
308 first_seg->packet_type =
309 dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
311 rte_mbuf_refcnt_set(first_seg, 1);
313 while (!DPAA2_SG_IS_FINAL(sge)) {
315 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
316 DPAA2_GET_FLE_ADDR(sge));
317 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
318 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
319 next_seg->buf_addr = (uint8_t *)sg_addr;
320 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
321 next_seg->data_len = sge->length & 0x1FFFF;
322 first_seg->nb_segs += 1;
323 rte_mbuf_refcnt_set(next_seg, 1);
324 cur_seg->next = next_seg;
325 next_seg->next = NULL;
328 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
329 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
330 rte_mbuf_refcnt_set(temp, 1);
331 rte_pktmbuf_free_seg(temp);
333 return (void *)first_seg;
336 static inline struct rte_mbuf *__rte_hot
337 eth_fd_to_mbuf(const struct qbman_fd *fd,
340 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
341 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
342 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
343 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
345 /* need to repopulated some of the fields,
346 * as they may have changed in last transmission
350 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
351 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
352 mbuf->pkt_len = mbuf->data_len;
353 mbuf->port = port_id;
355 rte_mbuf_refcnt_set(mbuf, 1);
357 /* Parse the packet */
358 /* parse results for LX2 are there in FRC field of FD.
359 * For other DPAA2 platforms , parse results are after
360 * the private - sw annotation area
363 if (dpaa2_svr_family == SVR_LX2160A)
364 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
366 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
368 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
369 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
370 mbuf, mbuf->buf_addr, mbuf->data_off,
371 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
372 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
373 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
378 static int __rte_noinline __rte_hot
379 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
380 struct qbman_fd *fd, uint16_t bpid)
382 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
383 struct qbman_sge *sgt, *sge = NULL;
386 temp = rte_pktmbuf_alloc(mbuf->pool);
388 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
392 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
393 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
394 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
395 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
396 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
397 DPAA2_RESET_FD_FRC(fd);
398 DPAA2_RESET_FD_CTRL(fd);
399 /*Set Scatter gather table and Scatter gather entries*/
400 sgt = (struct qbman_sge *)(
401 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
402 + DPAA2_GET_FD_OFFSET(fd));
404 for (i = 0; i < mbuf->nb_segs; i++) {
406 /*Resetting the buffer pool id and offset field*/
407 sge->fin_bpid_offset = 0;
408 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
409 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
410 sge->length = cur_seg->data_len;
411 if (RTE_MBUF_DIRECT(cur_seg)) {
412 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
413 /* If refcnt > 1, invalid bpid is set to ensure
414 * buffer is not freed by HW
416 DPAA2_SET_FLE_IVP(sge);
417 rte_mbuf_refcnt_update(cur_seg, -1);
419 DPAA2_SET_FLE_BPID(sge,
420 mempool_to_bpid(cur_seg->pool));
421 cur_seg = cur_seg->next;
423 /* Get owner MBUF from indirect buffer */
424 mi = rte_mbuf_from_indirect(cur_seg);
425 if (rte_mbuf_refcnt_read(mi) > 1) {
426 /* If refcnt > 1, invalid bpid is set to ensure
427 * owner buffer is not freed by HW
429 DPAA2_SET_FLE_IVP(sge);
431 DPAA2_SET_FLE_BPID(sge,
432 mempool_to_bpid(mi->pool));
433 rte_mbuf_refcnt_update(mi, 1);
436 cur_seg = cur_seg->next;
437 prev_seg->next = NULL;
438 rte_pktmbuf_free(prev_seg);
441 DPAA2_SG_SET_FINAL(sge, true);
446 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
447 struct qbman_fd *fd, uint16_t bpid) __rte_unused;
449 static void __rte_noinline __rte_hot
450 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
451 struct qbman_fd *fd, uint16_t bpid)
453 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
455 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
456 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
457 mbuf, mbuf->buf_addr, mbuf->data_off,
458 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
459 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
460 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
461 if (RTE_MBUF_DIRECT(mbuf)) {
462 if (rte_mbuf_refcnt_read(mbuf) > 1) {
463 DPAA2_SET_FD_IVP(fd);
464 rte_mbuf_refcnt_update(mbuf, -1);
469 mi = rte_mbuf_from_indirect(mbuf);
470 if (rte_mbuf_refcnt_read(mi) > 1)
471 DPAA2_SET_FD_IVP(fd);
473 rte_mbuf_refcnt_update(mi, 1);
474 rte_pktmbuf_free(mbuf);
478 static inline int __rte_hot
479 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
480 struct qbman_fd *fd, uint16_t bpid)
485 if (rte_dpaa2_mbuf_alloc_bulk(
486 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
487 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
490 m = (struct rte_mbuf *)mb;
491 memcpy((char *)m->buf_addr + mbuf->data_off,
492 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
495 /* Copy required fields */
496 m->data_off = mbuf->data_off;
497 m->ol_flags = mbuf->ol_flags;
498 m->packet_type = mbuf->packet_type;
499 m->tx_offload = mbuf->tx_offload;
501 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
504 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
505 " meta: %d, off: %d, len: %d\n",
508 DPAA2_GET_FD_ADDR(fd),
509 DPAA2_GET_FD_BPID(fd),
510 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
511 DPAA2_GET_FD_OFFSET(fd),
512 DPAA2_GET_FD_LEN(fd));
517 /* This function assumes that caller will be keep the same value for nb_pkts
518 * across calls per queue, if that is not the case, better use non-prefetch
519 * version of rx call.
520 * It will return the packets as requested in previous call without honoring
521 * the current nb_pkts or bufs space.
524 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
526 /* Function receive frames for a given device and VQ*/
527 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
528 struct qbman_result *dq_storage, *dq_storage1 = NULL;
529 uint32_t fqid = dpaa2_q->fqid;
530 int ret, num_rx = 0, pull_size;
531 uint8_t pending, status;
532 struct qbman_swp *swp;
533 const struct qbman_fd *fd;
534 struct qbman_pull_desc pulldesc;
535 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
536 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
537 #if defined(RTE_LIBRTE_IEEE1588)
538 struct dpaa2_dev_priv *priv = eth_data->dev_private;
541 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
542 ret = dpaa2_affine_qbman_ethrx_swp();
544 DPAA2_PMD_ERR("Failure in affining portal");
549 if (unlikely(!rte_dpaa2_bpid_info &&
550 rte_eal_process_type() == RTE_PROC_SECONDARY))
551 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
553 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
554 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
555 if (unlikely(!q_storage->active_dqs)) {
556 q_storage->toggle = 0;
557 dq_storage = q_storage->dq_storage[q_storage->toggle];
558 q_storage->last_num_pkts = pull_size;
559 qbman_pull_desc_clear(&pulldesc);
560 qbman_pull_desc_set_numframes(&pulldesc,
561 q_storage->last_num_pkts);
562 qbman_pull_desc_set_fq(&pulldesc, fqid);
563 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
564 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
565 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
566 while (!qbman_check_command_complete(
568 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
570 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
573 if (qbman_swp_pull(swp, &pulldesc)) {
574 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
575 " QBMAN is busy (1)\n");
576 /* Portal was busy, try again */
581 q_storage->active_dqs = dq_storage;
582 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
583 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
587 dq_storage = q_storage->active_dqs;
588 rte_prefetch0((void *)(size_t)(dq_storage));
589 rte_prefetch0((void *)(size_t)(dq_storage + 1));
591 /* Prepare next pull descriptor. This will give space for the
592 * prefething done on DQRR entries
594 q_storage->toggle ^= 1;
595 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
596 qbman_pull_desc_clear(&pulldesc);
597 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
598 qbman_pull_desc_set_fq(&pulldesc, fqid);
599 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
600 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
602 /* Check if the previous issued command is completed.
603 * Also seems like the SWP is shared between the Ethernet Driver
604 * and the SEC driver.
606 while (!qbman_check_command_complete(dq_storage))
608 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
609 clear_swp_active_dqs(q_storage->active_dpio_id);
614 /* Loop until the dq_storage is updated with
617 while (!qbman_check_new_result(dq_storage))
619 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
620 /* Check whether Last Pull command is Expired and
621 * setting Condition for Loop termination
623 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
625 /* Check for valid frame. */
626 status = qbman_result_DQ_flags(dq_storage);
627 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
630 fd = qbman_result_DQ_fd(dq_storage);
632 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
633 if (dpaa2_svr_family != SVR_LX2160A) {
634 const struct qbman_fd *next_fd =
635 qbman_result_DQ_fd(dq_storage + 1);
636 /* Prefetch Annotation address for the parse results */
637 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
638 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
642 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
643 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
645 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
646 #if defined(RTE_LIBRTE_IEEE1588)
647 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
650 if (eth_data->dev_conf.rxmode.offloads &
651 DEV_RX_OFFLOAD_VLAN_STRIP)
652 rte_vlan_strip(bufs[num_rx]);
658 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
659 while (!qbman_check_command_complete(
660 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
662 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
664 /* issue a volatile dequeue command for next pull */
666 if (qbman_swp_pull(swp, &pulldesc)) {
667 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
668 "QBMAN is busy (2)\n");
673 q_storage->active_dqs = dq_storage1;
674 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
675 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
677 dpaa2_q->rx_pkts += num_rx;
683 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
684 const struct qbman_fd *fd,
685 const struct qbman_result *dq,
686 struct dpaa2_queue *rxq,
687 struct rte_event *ev)
689 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
690 DPAA2_FD_PTA_SIZE + 16));
692 ev->flow_id = rxq->ev.flow_id;
693 ev->sub_event_type = rxq->ev.sub_event_type;
694 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
695 ev->op = RTE_EVENT_OP_NEW;
696 ev->sched_type = rxq->ev.sched_type;
697 ev->queue_id = rxq->ev.queue_id;
698 ev->priority = rxq->ev.priority;
700 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
702 qbman_swp_dqrr_consume(swp, dq);
706 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
707 const struct qbman_fd *fd,
708 const struct qbman_result *dq,
709 struct dpaa2_queue *rxq,
710 struct rte_event *ev)
714 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
715 DPAA2_FD_PTA_SIZE + 16));
717 ev->flow_id = rxq->ev.flow_id;
718 ev->sub_event_type = rxq->ev.sub_event_type;
719 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
720 ev->op = RTE_EVENT_OP_NEW;
721 ev->sched_type = rxq->ev.sched_type;
722 ev->queue_id = rxq->ev.queue_id;
723 ev->priority = rxq->ev.priority;
725 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
727 dqrr_index = qbman_get_dqrr_idx(dq);
728 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
729 DPAA2_PER_LCORE_DQRR_SIZE++;
730 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
731 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
735 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
736 const struct qbman_fd *fd,
737 const struct qbman_result *dq,
738 struct dpaa2_queue *rxq,
739 struct rte_event *ev)
741 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
742 DPAA2_FD_PTA_SIZE + 16));
744 ev->flow_id = rxq->ev.flow_id;
745 ev->sub_event_type = rxq->ev.sub_event_type;
746 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
747 ev->op = RTE_EVENT_OP_NEW;
748 ev->sched_type = rxq->ev.sched_type;
749 ev->queue_id = rxq->ev.queue_id;
750 ev->priority = rxq->ev.priority;
752 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
754 *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
755 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
756 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
758 qbman_swp_dqrr_consume(swp, dq);
762 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
764 /* Function receive frames for a given device and VQ */
765 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
766 struct qbman_result *dq_storage;
767 uint32_t fqid = dpaa2_q->fqid;
768 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
769 uint8_t pending, status;
770 struct qbman_swp *swp;
771 const struct qbman_fd *fd;
772 struct qbman_pull_desc pulldesc;
773 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
775 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
776 ret = dpaa2_affine_qbman_swp();
779 "Failed to allocate IO portal, tid: %d\n",
784 swp = DPAA2_PER_LCORE_PORTAL;
787 dq_storage = dpaa2_q->q_storage->dq_storage[0];
788 qbman_pull_desc_clear(&pulldesc);
789 qbman_pull_desc_set_fq(&pulldesc, fqid);
790 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
791 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
793 if (next_pull > dpaa2_dqrr_size) {
794 qbman_pull_desc_set_numframes(&pulldesc,
796 next_pull -= dpaa2_dqrr_size;
798 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
803 if (qbman_swp_pull(swp, &pulldesc)) {
805 "VDQ command is not issued.QBMAN is busy\n");
806 /* Portal was busy, try again */
812 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
813 /* Check if the previous issued command is completed. */
814 while (!qbman_check_command_complete(dq_storage))
820 /* Loop until the dq_storage is updated with
823 while (!qbman_check_new_result(dq_storage))
825 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
826 /* Check whether Last Pull command is Expired and
827 * setting Condition for Loop termination
829 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
831 /* Check for valid frame. */
832 status = qbman_result_DQ_flags(dq_storage);
833 if (unlikely((status &
834 QBMAN_DQ_STAT_VALIDFRAME) == 0))
837 fd = qbman_result_DQ_fd(dq_storage);
839 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
840 if (dpaa2_svr_family != SVR_LX2160A) {
841 const struct qbman_fd *next_fd =
842 qbman_result_DQ_fd(dq_storage + 1);
844 /* Prefetch Annotation address for the parse
847 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
848 DPAA2_GET_FD_ADDR(next_fd) +
849 DPAA2_FD_PTA_SIZE + 16)));
853 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
854 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
857 bufs[num_rx] = eth_fd_to_mbuf(fd,
860 if (eth_data->dev_conf.rxmode.offloads &
861 DEV_RX_OFFLOAD_VLAN_STRIP) {
862 rte_vlan_strip(bufs[num_rx]);
869 /* Last VDQ provided all packets and more packets are requested */
870 } while (next_pull && num_pulled == dpaa2_dqrr_size);
872 dpaa2_q->rx_pkts += num_rx;
877 uint16_t dpaa2_dev_tx_conf(void *queue)
879 /* Function receive frames for a given device and VQ */
880 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
881 struct qbman_result *dq_storage;
882 uint32_t fqid = dpaa2_q->fqid;
883 int ret, num_tx_conf = 0, num_pulled;
884 uint8_t pending, status;
885 struct qbman_swp *swp;
886 const struct qbman_fd *fd, *next_fd;
887 struct qbman_pull_desc pulldesc;
888 struct qbman_release_desc releasedesc;
891 #if defined(RTE_LIBRTE_IEEE1588)
892 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
893 struct dpaa2_dev_priv *priv = eth_data->dev_private;
894 struct dpaa2_annot_hdr *annotation;
897 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
898 ret = dpaa2_affine_qbman_swp();
901 "Failed to allocate IO portal, tid: %d\n",
906 swp = DPAA2_PER_LCORE_PORTAL;
909 dq_storage = dpaa2_q->q_storage->dq_storage[0];
910 qbman_pull_desc_clear(&pulldesc);
911 qbman_pull_desc_set_fq(&pulldesc, fqid);
912 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
913 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
915 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
918 if (qbman_swp_pull(swp, &pulldesc)) {
919 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
921 /* Portal was busy, try again */
927 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
928 /* Check if the previous issued command is completed. */
929 while (!qbman_check_command_complete(dq_storage))
935 /* Loop until the dq_storage is updated with
938 while (!qbman_check_new_result(dq_storage))
940 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
941 /* Check whether Last Pull command is Expired and
942 * setting Condition for Loop termination
944 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
946 /* Check for valid frame. */
947 status = qbman_result_DQ_flags(dq_storage);
948 if (unlikely((status &
949 QBMAN_DQ_STAT_VALIDFRAME) == 0))
952 fd = qbman_result_DQ_fd(dq_storage);
954 next_fd = qbman_result_DQ_fd(dq_storage + 1);
955 /* Prefetch Annotation address for the parse results */
956 rte_prefetch0((void *)(size_t)
957 (DPAA2_GET_FD_ADDR(next_fd) +
958 DPAA2_FD_PTA_SIZE + 16));
960 bpid = DPAA2_GET_FD_BPID(fd);
962 /* Create a release descriptor required for releasing
965 qbman_release_desc_clear(&releasedesc);
966 qbman_release_desc_set_bpid(&releasedesc, bpid);
968 buf = DPAA2_GET_FD_ADDR(fd);
969 /* feed them to bman */
971 ret = qbman_swp_release(swp, &releasedesc,
973 } while (ret == -EBUSY);
978 #if defined(RTE_LIBRTE_IEEE1588)
979 annotation = (struct dpaa2_annot_hdr *)((size_t)
980 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
982 priv->tx_timestamp = annotation->word2;
986 /* Last VDQ provided all packets and more packets are requested */
987 } while (num_pulled == dpaa2_dqrr_size);
989 dpaa2_q->rx_pkts += num_tx_conf;
994 /* Configure the egress frame annotation for timestamp update */
995 static void enable_tx_tstamp(struct qbman_fd *fd)
997 struct dpaa2_faead *fd_faead;
999 /* Set frame annotation status field as valid */
1000 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1002 /* Set frame annotation egress action descriptor as valid */
1003 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1005 /* Set Annotation Length as 128B */
1006 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1008 /* enable update of confirmation frame annotation */
1009 fd_faead = (struct dpaa2_faead *)((size_t)
1010 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1011 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1012 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1013 DPAA2_ANNOT_FAEAD_UPD;
1017 * Callback to handle sending packets through WRIOP based interface
1020 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1022 /* Function to transmit the frames to given device and VQ*/
1023 uint32_t loop, retry_count;
1025 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1026 struct rte_mbuf *mi;
1027 uint32_t frames_to_send;
1028 struct rte_mempool *mp;
1029 struct qbman_eq_desc eqdesc;
1030 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1031 struct qbman_swp *swp;
1032 uint16_t num_tx = 0;
1034 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1035 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1036 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1038 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1039 ret = dpaa2_affine_qbman_swp();
1042 "Failed to allocate IO portal, tid: %d\n",
1047 swp = DPAA2_PER_LCORE_PORTAL;
1049 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1050 eth_data, dpaa2_q->fqid);
1052 #ifdef RTE_LIBRTE_IEEE1588
1053 /* IEEE1588 driver need pointer to tx confirmation queue
1054 * corresponding to last packet transmitted for reading
1057 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1058 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1061 /*Prepare enqueue descriptor*/
1062 qbman_eq_desc_clear(&eqdesc);
1063 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1064 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1066 /*Clear the unused FD fields before sending*/
1068 /*Check if the queue is congested*/
1070 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1072 /* Retry for some time before giving up */
1073 if (retry_count > CONG_RETRY_COUNT)
1077 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1078 dpaa2_eqcr_size : nb_pkts;
1080 for (loop = 0; loop < frames_to_send; loop++) {
1081 if (*dpaa2_seqn(*bufs)) {
1082 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1084 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1086 DPAA2_PER_LCORE_DQRR_SIZE--;
1087 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1088 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1091 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1093 /* Check the basic scenario and set
1094 * the FD appropriately here itself.
1096 if (likely(mp && mp->ops_index ==
1097 priv->bp_list->dpaa2_ops_index &&
1098 (*bufs)->nb_segs == 1 &&
1099 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1100 if (unlikely(((*bufs)->ol_flags
1101 & PKT_TX_VLAN_PKT) ||
1102 (eth_data->dev_conf.txmode.offloads
1103 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1104 ret = rte_vlan_insert(bufs);
1108 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1109 &fd_arr[loop], mempool_to_bpid(mp));
1111 #ifdef RTE_LIBRTE_IEEE1588
1112 enable_tx_tstamp(&fd_arr[loop]);
1117 mi = rte_mbuf_from_indirect(*bufs);
1120 /* Not a hw_pkt pool allocated frame */
1121 if (unlikely(!mp || !priv->bp_list)) {
1122 DPAA2_PMD_ERR("Err: No buffer pool attached");
1126 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1127 (eth_data->dev_conf.txmode.offloads
1128 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1129 int ret = rte_vlan_insert(bufs);
1133 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1134 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1135 /* alloc should be from the default buffer pool
1136 * attached to this interface
1138 bpid = priv->bp_list->buf_pool.bpid;
1140 if (unlikely((*bufs)->nb_segs > 1)) {
1141 DPAA2_PMD_ERR("S/G support not added"
1142 " for non hw offload buffer");
1145 if (eth_copy_mbuf_to_fd(*bufs,
1146 &fd_arr[loop], bpid)) {
1149 /* free the original packet */
1150 rte_pktmbuf_free(*bufs);
1152 bpid = mempool_to_bpid(mp);
1153 if (unlikely((*bufs)->nb_segs > 1)) {
1154 if (eth_mbuf_to_sg_fd(*bufs,
1155 &fd_arr[loop], bpid))
1158 eth_mbuf_to_fd(*bufs,
1159 &fd_arr[loop], bpid);
1162 #ifdef RTE_LIBRTE_IEEE1588
1163 enable_tx_tstamp(&fd_arr[loop]);
1170 while (loop < frames_to_send) {
1171 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1172 &fd_arr[loop], &flags[loop],
1173 frames_to_send - loop);
1174 if (unlikely(ret < 0)) {
1176 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1190 dpaa2_q->tx_pkts += num_tx;
1194 /* send any already prepared fd */
1200 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1204 if (unlikely(ret < 0)) {
1206 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1216 dpaa2_q->tx_pkts += num_tx;
1221 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1223 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1224 struct qbman_fd *fd;
1227 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1229 /* Setting port id does not matter as we are to free the mbuf */
1230 m = eth_fd_to_mbuf(fd, 0);
1231 rte_pktmbuf_free(m);
1235 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1237 struct qbman_eq_desc *eqdesc)
1239 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1240 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1241 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1242 struct eqresp_metadata *eqresp_meta;
1243 uint16_t orpid, seqnum;
1246 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1248 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1249 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1250 DPAA2_EQCR_OPRID_SHIFT;
1251 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1252 DPAA2_EQCR_SEQNUM_SHIFT;
1254 if (!priv->en_loose_ordered) {
1255 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1256 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1257 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1258 dpio_dev->eqresp_pi]), 1);
1259 qbman_eq_desc_set_token(eqdesc, 1);
1261 eqresp_meta = &dpio_dev->eqresp_meta[
1262 dpio_dev->eqresp_pi];
1263 eqresp_meta->dpaa2_q = dpaa2_q;
1264 eqresp_meta->mp = m->pool;
1266 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1267 dpio_dev->eqresp_pi++ :
1268 (dpio_dev->eqresp_pi = 0);
1270 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1273 dq_idx = *dpaa2_seqn(m) - 1;
1274 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1275 DPAA2_PER_LCORE_DQRR_SIZE--;
1276 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1278 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1281 /* Callback to handle sending ordered packets through WRIOP based interface */
1283 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1285 /* Function to transmit the frames to given device and VQ*/
1286 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1287 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1288 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1289 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1290 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1291 struct rte_mbuf *mi;
1292 struct rte_mempool *mp;
1293 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1294 struct qbman_swp *swp;
1295 uint32_t frames_to_send, num_free_eq_desc;
1296 uint32_t loop, retry_count;
1298 uint16_t num_tx = 0;
1301 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1302 ret = dpaa2_affine_qbman_swp();
1305 "Failed to allocate IO portal, tid: %d\n",
1310 swp = DPAA2_PER_LCORE_PORTAL;
1312 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1313 eth_data, dpaa2_q->fqid);
1315 /* This would also handle normal and atomic queues as any type
1316 * of packet can be enqueued when ordered queues are being used.
1319 /*Check if the queue is congested*/
1321 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1323 /* Retry for some time before giving up */
1324 if (retry_count > CONG_RETRY_COUNT)
1328 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1329 dpaa2_eqcr_size : nb_pkts;
1331 if (!priv->en_loose_ordered) {
1332 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1333 num_free_eq_desc = dpaa2_free_eq_descriptors();
1334 if (num_free_eq_desc < frames_to_send)
1335 frames_to_send = num_free_eq_desc;
1339 for (loop = 0; loop < frames_to_send; loop++) {
1340 /*Prepare enqueue descriptor*/
1341 qbman_eq_desc_clear(&eqdesc[loop]);
1343 if (*dpaa2_seqn(*bufs)) {
1344 /* Use only queue 0 for Tx in case of atomic/
1345 * ordered packets as packets can get unordered
1346 * when being tranmitted out from the interface
1348 dpaa2_set_enqueue_descriptor(order_sendq,
1352 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1353 DPAA2_EQ_RESP_ERR_FQ);
1354 qbman_eq_desc_set_fq(&eqdesc[loop],
1358 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1360 /* Check the basic scenario and set
1361 * the FD appropriately here itself.
1363 if (likely(mp && mp->ops_index ==
1364 priv->bp_list->dpaa2_ops_index &&
1365 (*bufs)->nb_segs == 1 &&
1366 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1367 if (unlikely((*bufs)->ol_flags
1368 & PKT_TX_VLAN_PKT)) {
1369 ret = rte_vlan_insert(bufs);
1373 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1375 mempool_to_bpid(mp));
1380 mi = rte_mbuf_from_indirect(*bufs);
1383 /* Not a hw_pkt pool allocated frame */
1384 if (unlikely(!mp || !priv->bp_list)) {
1385 DPAA2_PMD_ERR("Err: No buffer pool attached");
1389 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1390 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1391 /* alloc should be from the default buffer pool
1392 * attached to this interface
1394 bpid = priv->bp_list->buf_pool.bpid;
1396 if (unlikely((*bufs)->nb_segs > 1)) {
1398 "S/G not supp for non hw offload buffer");
1401 if (eth_copy_mbuf_to_fd(*bufs,
1402 &fd_arr[loop], bpid)) {
1405 /* free the original packet */
1406 rte_pktmbuf_free(*bufs);
1408 bpid = mempool_to_bpid(mp);
1409 if (unlikely((*bufs)->nb_segs > 1)) {
1410 if (eth_mbuf_to_sg_fd(*bufs,
1415 eth_mbuf_to_fd(*bufs,
1416 &fd_arr[loop], bpid);
1424 while (loop < frames_to_send) {
1425 ret = qbman_swp_enqueue_multiple_desc(swp,
1426 &eqdesc[loop], &fd_arr[loop],
1427 frames_to_send - loop);
1428 if (unlikely(ret < 0)) {
1430 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1444 dpaa2_q->tx_pkts += num_tx;
1448 /* send any already prepared fd */
1454 ret = qbman_swp_enqueue_multiple_desc(swp,
1455 &eqdesc[loop], &fd_arr[i], loop - i);
1456 if (unlikely(ret < 0)) {
1458 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1468 dpaa2_q->tx_pkts += num_tx;
1473 * Dummy DPDK callback for TX.
1475 * This function is used to temporarily replace the real callback during
1476 * unsafe control operations on the queue, or in case of error.
1479 * Generic pointer to TX queue structure.
1481 * Packets to transmit.
1483 * Number of packets in array.
1486 * Number of packets successfully transmitted (<= pkts_n).
1489 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1497 #if defined(RTE_TOOLCHAIN_GCC)
1498 #pragma GCC diagnostic push
1499 #pragma GCC diagnostic ignored "-Wcast-qual"
1500 #elif defined(RTE_TOOLCHAIN_CLANG)
1501 #pragma clang diagnostic push
1502 #pragma clang diagnostic ignored "-Wcast-qual"
1505 /* This function loopbacks all the received packets.*/
1507 dpaa2_dev_loopback_rx(void *queue,
1508 struct rte_mbuf **bufs __rte_unused,
1511 /* Function receive frames for a given device and VQ*/
1512 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1513 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1514 uint32_t fqid = dpaa2_q->fqid;
1515 int ret, num_rx = 0, num_tx = 0, pull_size;
1516 uint8_t pending, status;
1517 struct qbman_swp *swp;
1518 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1519 struct qbman_pull_desc pulldesc;
1520 struct qbman_eq_desc eqdesc;
1521 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1522 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1523 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1524 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1525 /* todo - currently we are using 1st TX queue only for loopback*/
1527 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1528 ret = dpaa2_affine_qbman_ethrx_swp();
1530 DPAA2_PMD_ERR("Failure in affining portal");
1534 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1535 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1536 if (unlikely(!q_storage->active_dqs)) {
1537 q_storage->toggle = 0;
1538 dq_storage = q_storage->dq_storage[q_storage->toggle];
1539 q_storage->last_num_pkts = pull_size;
1540 qbman_pull_desc_clear(&pulldesc);
1541 qbman_pull_desc_set_numframes(&pulldesc,
1542 q_storage->last_num_pkts);
1543 qbman_pull_desc_set_fq(&pulldesc, fqid);
1544 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1545 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1546 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1547 while (!qbman_check_command_complete(
1549 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1551 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1554 if (qbman_swp_pull(swp, &pulldesc)) {
1556 "VDQ command not issued.QBMAN busy\n");
1557 /* Portal was busy, try again */
1562 q_storage->active_dqs = dq_storage;
1563 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1564 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1568 dq_storage = q_storage->active_dqs;
1569 rte_prefetch0((void *)(size_t)(dq_storage));
1570 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1572 /* Prepare next pull descriptor. This will give space for the
1573 * prefething done on DQRR entries
1575 q_storage->toggle ^= 1;
1576 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1577 qbman_pull_desc_clear(&pulldesc);
1578 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1579 qbman_pull_desc_set_fq(&pulldesc, fqid);
1580 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1581 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1583 /*Prepare enqueue descriptor*/
1584 qbman_eq_desc_clear(&eqdesc);
1585 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1586 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1587 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1589 /* Check if the previous issued command is completed.
1590 * Also seems like the SWP is shared between the Ethernet Driver
1591 * and the SEC driver.
1593 while (!qbman_check_command_complete(dq_storage))
1595 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1596 clear_swp_active_dqs(q_storage->active_dpio_id);
1601 /* Loop until the dq_storage is updated with
1602 * new token by QBMAN
1604 while (!qbman_check_new_result(dq_storage))
1606 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1607 /* Check whether Last Pull command is Expired and
1608 * setting Condition for Loop termination
1610 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1612 /* Check for valid frame. */
1613 status = qbman_result_DQ_flags(dq_storage);
1614 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1617 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1623 while (num_tx < num_rx) {
1624 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1625 &fd[num_tx], 0, num_rx - num_tx);
1628 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1629 while (!qbman_check_command_complete(
1630 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1632 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1634 /* issue a volatile dequeue command for next pull */
1636 if (qbman_swp_pull(swp, &pulldesc)) {
1637 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1638 "QBMAN is busy (2)\n");
1643 q_storage->active_dqs = dq_storage1;
1644 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1645 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1647 dpaa2_q->rx_pkts += num_rx;
1648 dpaa2_q->tx_pkts += num_tx;
1652 #if defined(RTE_TOOLCHAIN_GCC)
1653 #pragma GCC diagnostic pop
1654 #elif defined(RTE_TOOLCHAIN_CLANG)
1655 #pragma clang diagnostic pop