1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2020 NXP
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 static inline rte_mbuf_timestamp_t *
35 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 return RTE_MBUF_DYNFIELD(mbuf,
38 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
41 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
42 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
43 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
44 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
45 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
46 DPAA2_SET_FD_FRC(_fd, 0); \
47 DPAA2_RESET_FD_CTRL(_fd); \
48 DPAA2_RESET_FD_FLC(_fd); \
51 static inline void __rte_hot
52 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
55 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
56 struct dpaa2_annot_hdr *annotation =
57 (struct dpaa2_annot_hdr *)hw_annot_addr;
59 m->packet_type = RTE_PTYPE_UNKNOWN;
61 case DPAA2_PKT_TYPE_ETHER:
62 m->packet_type = RTE_PTYPE_L2_ETHER;
64 case DPAA2_PKT_TYPE_IPV4:
65 m->packet_type = RTE_PTYPE_L2_ETHER |
68 case DPAA2_PKT_TYPE_IPV6:
69 m->packet_type = RTE_PTYPE_L2_ETHER |
72 case DPAA2_PKT_TYPE_IPV4_EXT:
73 m->packet_type = RTE_PTYPE_L2_ETHER |
74 RTE_PTYPE_L3_IPV4_EXT;
76 case DPAA2_PKT_TYPE_IPV6_EXT:
77 m->packet_type = RTE_PTYPE_L2_ETHER |
78 RTE_PTYPE_L3_IPV6_EXT;
80 case DPAA2_PKT_TYPE_IPV4_TCP:
81 m->packet_type = RTE_PTYPE_L2_ETHER |
82 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84 case DPAA2_PKT_TYPE_IPV6_TCP:
85 m->packet_type = RTE_PTYPE_L2_ETHER |
86 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88 case DPAA2_PKT_TYPE_IPV4_UDP:
89 m->packet_type = RTE_PTYPE_L2_ETHER |
90 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92 case DPAA2_PKT_TYPE_IPV6_UDP:
93 m->packet_type = RTE_PTYPE_L2_ETHER |
94 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96 case DPAA2_PKT_TYPE_IPV4_SCTP:
97 m->packet_type = RTE_PTYPE_L2_ETHER |
98 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100 case DPAA2_PKT_TYPE_IPV6_SCTP:
101 m->packet_type = RTE_PTYPE_L2_ETHER |
102 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104 case DPAA2_PKT_TYPE_IPV4_ICMP:
105 m->packet_type = RTE_PTYPE_L2_ETHER |
106 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108 case DPAA2_PKT_TYPE_IPV6_ICMP:
109 m->packet_type = RTE_PTYPE_L2_ETHER |
110 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
113 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115 m->hash.rss = fd->simple.flc_hi;
116 m->ol_flags |= PKT_RX_RSS_HASH;
118 if (dpaa2_enable_ts[m->port]) {
119 *dpaa2_timestamp_dynfield(m) = annotation->word2;
120 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
121 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
122 *dpaa2_timestamp_dynfield(m));
125 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
126 "ol_flags =0x%" PRIx64 "",
127 frc, m->packet_type, m->ol_flags);
130 static inline uint32_t __rte_hot
131 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
132 struct dpaa2_annot_hdr *annotation)
134 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
137 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
138 "(4)=0x%" PRIx64 "\t",
139 annotation->word3, annotation->word4);
141 #if defined(RTE_LIBRTE_IEEE1588)
142 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
143 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
146 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
147 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
148 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
149 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
150 mbuf->ol_flags |= PKT_RX_VLAN;
151 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
152 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
153 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
154 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
155 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
156 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
157 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
160 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
161 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
163 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
164 pkt_type |= RTE_PTYPE_L2_ETHER;
169 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
170 L3_IPV4_N_PRESENT)) {
171 pkt_type |= RTE_PTYPE_L3_IPV4;
172 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
173 L3_IP_N_OPT_PRESENT))
174 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
176 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
177 L3_IPV6_N_PRESENT)) {
178 pkt_type |= RTE_PTYPE_L3_IPV6;
179 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180 L3_IP_N_OPT_PRESENT))
181 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
186 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
187 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
188 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
189 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
191 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
192 L3_IP_1_MORE_FRAGMENT |
193 L3_IP_N_FIRST_FRAGMENT |
194 L3_IP_N_MORE_FRAGMENT)) {
195 pkt_type |= RTE_PTYPE_L4_FRAG;
198 pkt_type |= RTE_PTYPE_L4_NONFRAG;
201 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
202 pkt_type |= RTE_PTYPE_L4_UDP;
204 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
205 pkt_type |= RTE_PTYPE_L4_TCP;
207 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
208 pkt_type |= RTE_PTYPE_L4_SCTP;
210 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
211 pkt_type |= RTE_PTYPE_L4_ICMP;
213 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
214 pkt_type |= RTE_PTYPE_UNKNOWN;
220 static inline uint32_t __rte_hot
221 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
223 struct dpaa2_annot_hdr *annotation =
224 (struct dpaa2_annot_hdr *)hw_annot_addr;
226 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
229 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
230 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
231 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
232 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
234 if (dpaa2_enable_ts[mbuf->port]) {
235 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
236 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
237 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
238 *dpaa2_timestamp_dynfield(mbuf));
241 /* Check detailed parsing requirement */
242 if (annotation->word3 & 0x7FFFFC3FFFF)
243 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
245 /* Return some common types from parse processing */
246 switch (annotation->word4) {
248 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
250 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
251 case DPAA2_L3_IPv4_TCP:
252 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
254 case DPAA2_L3_IPv4_UDP:
255 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
257 case DPAA2_L3_IPv6_TCP:
258 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
260 case DPAA2_L3_IPv6_UDP:
261 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
267 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
270 static inline struct rte_mbuf *__rte_hot
271 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
274 struct qbman_sge *sgt, *sge;
275 size_t sg_addr, fd_addr;
278 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
280 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
281 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
283 /* Get Scatter gather table address */
284 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
287 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
289 /* First Scatter gather entry */
290 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
291 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
292 /* Prepare all the metadata for first segment */
293 first_seg->buf_addr = (uint8_t *)sg_addr;
294 first_seg->ol_flags = 0;
295 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
296 first_seg->data_len = sge->length & 0x1FFFF;
297 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
298 first_seg->nb_segs = 1;
299 first_seg->next = NULL;
300 first_seg->port = port_id;
301 if (dpaa2_svr_family == SVR_LX2160A)
302 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
304 first_seg->packet_type =
305 dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
307 rte_mbuf_refcnt_set(first_seg, 1);
309 while (!DPAA2_SG_IS_FINAL(sge)) {
311 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
312 DPAA2_GET_FLE_ADDR(sge));
313 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
314 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
315 next_seg->buf_addr = (uint8_t *)sg_addr;
316 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
317 next_seg->data_len = sge->length & 0x1FFFF;
318 first_seg->nb_segs += 1;
319 rte_mbuf_refcnt_set(next_seg, 1);
320 cur_seg->next = next_seg;
321 next_seg->next = NULL;
324 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
325 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
326 rte_mbuf_refcnt_set(temp, 1);
327 rte_pktmbuf_free_seg(temp);
329 return (void *)first_seg;
332 static inline struct rte_mbuf *__rte_hot
333 eth_fd_to_mbuf(const struct qbman_fd *fd,
336 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
337 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
338 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
339 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
341 /* need to repopulated some of the fields,
342 * as they may have changed in last transmission
346 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
347 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
348 mbuf->pkt_len = mbuf->data_len;
349 mbuf->port = port_id;
351 rte_mbuf_refcnt_set(mbuf, 1);
353 /* Parse the packet */
354 /* parse results for LX2 are there in FRC field of FD.
355 * For other DPAA2 platforms , parse results are after
356 * the private - sw annotation area
359 if (dpaa2_svr_family == SVR_LX2160A)
360 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
362 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
364 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
365 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
366 mbuf, mbuf->buf_addr, mbuf->data_off,
367 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
368 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
369 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
374 static int __rte_noinline __rte_hot
375 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
376 struct qbman_fd *fd, uint16_t bpid)
378 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
379 struct qbman_sge *sgt, *sge = NULL;
382 temp = rte_pktmbuf_alloc(mbuf->pool);
384 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
388 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
389 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
390 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
391 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
392 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
393 DPAA2_RESET_FD_FRC(fd);
394 DPAA2_RESET_FD_CTRL(fd);
395 /*Set Scatter gather table and Scatter gather entries*/
396 sgt = (struct qbman_sge *)(
397 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
398 + DPAA2_GET_FD_OFFSET(fd));
400 for (i = 0; i < mbuf->nb_segs; i++) {
402 /*Resetting the buffer pool id and offset field*/
403 sge->fin_bpid_offset = 0;
404 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
405 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
406 sge->length = cur_seg->data_len;
407 if (RTE_MBUF_DIRECT(cur_seg)) {
408 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
409 /* If refcnt > 1, invalid bpid is set to ensure
410 * buffer is not freed by HW
412 DPAA2_SET_FLE_IVP(sge);
413 rte_mbuf_refcnt_update(cur_seg, -1);
415 DPAA2_SET_FLE_BPID(sge,
416 mempool_to_bpid(cur_seg->pool));
417 cur_seg = cur_seg->next;
419 /* Get owner MBUF from indirect buffer */
420 mi = rte_mbuf_from_indirect(cur_seg);
421 if (rte_mbuf_refcnt_read(mi) > 1) {
422 /* If refcnt > 1, invalid bpid is set to ensure
423 * owner buffer is not freed by HW
425 DPAA2_SET_FLE_IVP(sge);
427 DPAA2_SET_FLE_BPID(sge,
428 mempool_to_bpid(mi->pool));
429 rte_mbuf_refcnt_update(mi, 1);
432 cur_seg = cur_seg->next;
433 prev_seg->next = NULL;
434 rte_pktmbuf_free(prev_seg);
437 DPAA2_SG_SET_FINAL(sge, true);
442 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
443 struct qbman_fd *fd, uint16_t bpid) __rte_unused;
445 static void __rte_noinline __rte_hot
446 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
447 struct qbman_fd *fd, uint16_t bpid)
449 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
451 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
452 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
453 mbuf, mbuf->buf_addr, mbuf->data_off,
454 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
455 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
456 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
457 if (RTE_MBUF_DIRECT(mbuf)) {
458 if (rte_mbuf_refcnt_read(mbuf) > 1) {
459 DPAA2_SET_FD_IVP(fd);
460 rte_mbuf_refcnt_update(mbuf, -1);
465 mi = rte_mbuf_from_indirect(mbuf);
466 if (rte_mbuf_refcnt_read(mi) > 1)
467 DPAA2_SET_FD_IVP(fd);
469 rte_mbuf_refcnt_update(mi, 1);
470 rte_pktmbuf_free(mbuf);
474 static inline int __rte_hot
475 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
476 struct qbman_fd *fd, uint16_t bpid)
481 if (rte_dpaa2_mbuf_alloc_bulk(
482 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
483 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
486 m = (struct rte_mbuf *)mb;
487 memcpy((char *)m->buf_addr + mbuf->data_off,
488 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
491 /* Copy required fields */
492 m->data_off = mbuf->data_off;
493 m->ol_flags = mbuf->ol_flags;
494 m->packet_type = mbuf->packet_type;
495 m->tx_offload = mbuf->tx_offload;
497 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
500 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
501 " meta: %d, off: %d, len: %d\n",
504 DPAA2_GET_FD_ADDR(fd),
505 DPAA2_GET_FD_BPID(fd),
506 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
507 DPAA2_GET_FD_OFFSET(fd),
508 DPAA2_GET_FD_LEN(fd));
513 /* This function assumes that caller will be keep the same value for nb_pkts
514 * across calls per queue, if that is not the case, better use non-prefetch
515 * version of rx call.
516 * It will return the packets as requested in previous call without honoring
517 * the current nb_pkts or bufs space.
520 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
522 /* Function receive frames for a given device and VQ*/
523 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
524 struct qbman_result *dq_storage, *dq_storage1 = NULL;
525 uint32_t fqid = dpaa2_q->fqid;
526 int ret, num_rx = 0, pull_size;
527 uint8_t pending, status;
528 struct qbman_swp *swp;
529 const struct qbman_fd *fd;
530 struct qbman_pull_desc pulldesc;
531 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
532 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
533 #if defined(RTE_LIBRTE_IEEE1588)
534 struct dpaa2_dev_priv *priv = eth_data->dev_private;
537 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
538 ret = dpaa2_affine_qbman_ethrx_swp();
540 DPAA2_PMD_ERR("Failure in affining portal");
545 if (unlikely(!rte_dpaa2_bpid_info &&
546 rte_eal_process_type() == RTE_PROC_SECONDARY))
547 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
549 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
550 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
551 if (unlikely(!q_storage->active_dqs)) {
552 q_storage->toggle = 0;
553 dq_storage = q_storage->dq_storage[q_storage->toggle];
554 q_storage->last_num_pkts = pull_size;
555 qbman_pull_desc_clear(&pulldesc);
556 qbman_pull_desc_set_numframes(&pulldesc,
557 q_storage->last_num_pkts);
558 qbman_pull_desc_set_fq(&pulldesc, fqid);
559 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
560 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
561 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
562 while (!qbman_check_command_complete(
564 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
566 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
569 if (qbman_swp_pull(swp, &pulldesc)) {
570 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
571 " QBMAN is busy (1)\n");
572 /* Portal was busy, try again */
577 q_storage->active_dqs = dq_storage;
578 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
579 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
583 dq_storage = q_storage->active_dqs;
584 rte_prefetch0((void *)(size_t)(dq_storage));
585 rte_prefetch0((void *)(size_t)(dq_storage + 1));
587 /* Prepare next pull descriptor. This will give space for the
588 * prefething done on DQRR entries
590 q_storage->toggle ^= 1;
591 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
592 qbman_pull_desc_clear(&pulldesc);
593 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
594 qbman_pull_desc_set_fq(&pulldesc, fqid);
595 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
596 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
598 /* Check if the previous issued command is completed.
599 * Also seems like the SWP is shared between the Ethernet Driver
600 * and the SEC driver.
602 while (!qbman_check_command_complete(dq_storage))
604 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
605 clear_swp_active_dqs(q_storage->active_dpio_id);
610 /* Loop until the dq_storage is updated with
613 while (!qbman_check_new_result(dq_storage))
615 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
616 /* Check whether Last Pull command is Expired and
617 * setting Condition for Loop termination
619 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
621 /* Check for valid frame. */
622 status = qbman_result_DQ_flags(dq_storage);
623 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
626 fd = qbman_result_DQ_fd(dq_storage);
628 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
629 if (dpaa2_svr_family != SVR_LX2160A) {
630 const struct qbman_fd *next_fd =
631 qbman_result_DQ_fd(dq_storage + 1);
632 /* Prefetch Annotation address for the parse results */
633 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
634 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
638 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
639 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
641 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
642 #if defined(RTE_LIBRTE_IEEE1588)
643 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
646 if (eth_data->dev_conf.rxmode.offloads &
647 DEV_RX_OFFLOAD_VLAN_STRIP)
648 rte_vlan_strip(bufs[num_rx]);
654 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
655 while (!qbman_check_command_complete(
656 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
658 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
660 /* issue a volatile dequeue command for next pull */
662 if (qbman_swp_pull(swp, &pulldesc)) {
663 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
664 "QBMAN is busy (2)\n");
669 q_storage->active_dqs = dq_storage1;
670 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
671 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
673 dpaa2_q->rx_pkts += num_rx;
679 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
680 const struct qbman_fd *fd,
681 const struct qbman_result *dq,
682 struct dpaa2_queue *rxq,
683 struct rte_event *ev)
685 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
686 DPAA2_FD_PTA_SIZE + 16));
688 ev->flow_id = rxq->ev.flow_id;
689 ev->sub_event_type = rxq->ev.sub_event_type;
690 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
691 ev->op = RTE_EVENT_OP_NEW;
692 ev->sched_type = rxq->ev.sched_type;
693 ev->queue_id = rxq->ev.queue_id;
694 ev->priority = rxq->ev.priority;
696 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
698 qbman_swp_dqrr_consume(swp, dq);
702 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
703 const struct qbman_fd *fd,
704 const struct qbman_result *dq,
705 struct dpaa2_queue *rxq,
706 struct rte_event *ev)
710 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
711 DPAA2_FD_PTA_SIZE + 16));
713 ev->flow_id = rxq->ev.flow_id;
714 ev->sub_event_type = rxq->ev.sub_event_type;
715 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
716 ev->op = RTE_EVENT_OP_NEW;
717 ev->sched_type = rxq->ev.sched_type;
718 ev->queue_id = rxq->ev.queue_id;
719 ev->priority = rxq->ev.priority;
721 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
723 dqrr_index = qbman_get_dqrr_idx(dq);
724 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
725 DPAA2_PER_LCORE_DQRR_SIZE++;
726 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
727 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
731 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
732 const struct qbman_fd *fd,
733 const struct qbman_result *dq,
734 struct dpaa2_queue *rxq,
735 struct rte_event *ev)
737 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
738 DPAA2_FD_PTA_SIZE + 16));
740 ev->flow_id = rxq->ev.flow_id;
741 ev->sub_event_type = rxq->ev.sub_event_type;
742 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
743 ev->op = RTE_EVENT_OP_NEW;
744 ev->sched_type = rxq->ev.sched_type;
745 ev->queue_id = rxq->ev.queue_id;
746 ev->priority = rxq->ev.priority;
748 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
750 *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
751 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
752 *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
754 qbman_swp_dqrr_consume(swp, dq);
758 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
760 /* Function receive frames for a given device and VQ */
761 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
762 struct qbman_result *dq_storage;
763 uint32_t fqid = dpaa2_q->fqid;
764 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
765 uint8_t pending, status;
766 struct qbman_swp *swp;
767 const struct qbman_fd *fd;
768 struct qbman_pull_desc pulldesc;
769 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
771 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
772 ret = dpaa2_affine_qbman_swp();
775 "Failed to allocate IO portal, tid: %d\n",
780 swp = DPAA2_PER_LCORE_PORTAL;
783 dq_storage = dpaa2_q->q_storage->dq_storage[0];
784 qbman_pull_desc_clear(&pulldesc);
785 qbman_pull_desc_set_fq(&pulldesc, fqid);
786 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
787 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
789 if (next_pull > dpaa2_dqrr_size) {
790 qbman_pull_desc_set_numframes(&pulldesc,
792 next_pull -= dpaa2_dqrr_size;
794 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
799 if (qbman_swp_pull(swp, &pulldesc)) {
801 "VDQ command is not issued.QBMAN is busy\n");
802 /* Portal was busy, try again */
808 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
809 /* Check if the previous issued command is completed. */
810 while (!qbman_check_command_complete(dq_storage))
816 /* Loop until the dq_storage is updated with
819 while (!qbman_check_new_result(dq_storage))
821 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
822 /* Check whether Last Pull command is Expired and
823 * setting Condition for Loop termination
825 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
827 /* Check for valid frame. */
828 status = qbman_result_DQ_flags(dq_storage);
829 if (unlikely((status &
830 QBMAN_DQ_STAT_VALIDFRAME) == 0))
833 fd = qbman_result_DQ_fd(dq_storage);
835 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
836 if (dpaa2_svr_family != SVR_LX2160A) {
837 const struct qbman_fd *next_fd =
838 qbman_result_DQ_fd(dq_storage + 1);
840 /* Prefetch Annotation address for the parse
843 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
844 DPAA2_GET_FD_ADDR(next_fd) +
845 DPAA2_FD_PTA_SIZE + 16)));
849 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
850 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
853 bufs[num_rx] = eth_fd_to_mbuf(fd,
856 if (eth_data->dev_conf.rxmode.offloads &
857 DEV_RX_OFFLOAD_VLAN_STRIP) {
858 rte_vlan_strip(bufs[num_rx]);
865 /* Last VDQ provided all packets and more packets are requested */
866 } while (next_pull && num_pulled == dpaa2_dqrr_size);
868 dpaa2_q->rx_pkts += num_rx;
873 uint16_t dpaa2_dev_tx_conf(void *queue)
875 /* Function receive frames for a given device and VQ */
876 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
877 struct qbman_result *dq_storage;
878 uint32_t fqid = dpaa2_q->fqid;
879 int ret, num_tx_conf = 0, num_pulled;
880 uint8_t pending, status;
881 struct qbman_swp *swp;
882 const struct qbman_fd *fd, *next_fd;
883 struct qbman_pull_desc pulldesc;
884 struct qbman_release_desc releasedesc;
887 #if defined(RTE_LIBRTE_IEEE1588)
888 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
889 struct dpaa2_dev_priv *priv = eth_data->dev_private;
890 struct dpaa2_annot_hdr *annotation;
893 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
894 ret = dpaa2_affine_qbman_swp();
897 "Failed to allocate IO portal, tid: %d\n",
902 swp = DPAA2_PER_LCORE_PORTAL;
905 dq_storage = dpaa2_q->q_storage->dq_storage[0];
906 qbman_pull_desc_clear(&pulldesc);
907 qbman_pull_desc_set_fq(&pulldesc, fqid);
908 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
909 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
911 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
914 if (qbman_swp_pull(swp, &pulldesc)) {
915 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
917 /* Portal was busy, try again */
923 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
924 /* Check if the previous issued command is completed. */
925 while (!qbman_check_command_complete(dq_storage))
931 /* Loop until the dq_storage is updated with
934 while (!qbman_check_new_result(dq_storage))
936 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
937 /* Check whether Last Pull command is Expired and
938 * setting Condition for Loop termination
940 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
942 /* Check for valid frame. */
943 status = qbman_result_DQ_flags(dq_storage);
944 if (unlikely((status &
945 QBMAN_DQ_STAT_VALIDFRAME) == 0))
948 fd = qbman_result_DQ_fd(dq_storage);
950 next_fd = qbman_result_DQ_fd(dq_storage + 1);
951 /* Prefetch Annotation address for the parse results */
952 rte_prefetch0((void *)(size_t)
953 (DPAA2_GET_FD_ADDR(next_fd) +
954 DPAA2_FD_PTA_SIZE + 16));
956 bpid = DPAA2_GET_FD_BPID(fd);
958 /* Create a release descriptor required for releasing
961 qbman_release_desc_clear(&releasedesc);
962 qbman_release_desc_set_bpid(&releasedesc, bpid);
964 buf = DPAA2_GET_FD_ADDR(fd);
965 /* feed them to bman */
967 ret = qbman_swp_release(swp, &releasedesc,
969 } while (ret == -EBUSY);
974 #if defined(RTE_LIBRTE_IEEE1588)
975 annotation = (struct dpaa2_annot_hdr *)((size_t)
976 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
978 priv->tx_timestamp = annotation->word2;
982 /* Last VDQ provided all packets and more packets are requested */
983 } while (num_pulled == dpaa2_dqrr_size);
985 dpaa2_q->rx_pkts += num_tx_conf;
990 /* Configure the egress frame annotation for timestamp update */
991 static void enable_tx_tstamp(struct qbman_fd *fd)
993 struct dpaa2_faead *fd_faead;
995 /* Set frame annotation status field as valid */
996 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
998 /* Set frame annotation egress action descriptor as valid */
999 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1001 /* Set Annotation Length as 128B */
1002 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1004 /* enable update of confirmation frame annotation */
1005 fd_faead = (struct dpaa2_faead *)((size_t)
1006 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1007 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1008 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1009 DPAA2_ANNOT_FAEAD_UPD;
1013 * Callback to handle sending packets through WRIOP based interface
1016 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1018 /* Function to transmit the frames to given device and VQ*/
1019 uint32_t loop, retry_count;
1021 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1022 struct rte_mbuf *mi;
1023 uint32_t frames_to_send;
1024 struct rte_mempool *mp;
1025 struct qbman_eq_desc eqdesc;
1026 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1027 struct qbman_swp *swp;
1028 uint16_t num_tx = 0;
1030 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1031 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1032 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1034 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1035 ret = dpaa2_affine_qbman_swp();
1038 "Failed to allocate IO portal, tid: %d\n",
1043 swp = DPAA2_PER_LCORE_PORTAL;
1045 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1046 eth_data, dpaa2_q->fqid);
1048 #ifdef RTE_LIBRTE_IEEE1588
1049 /* IEEE1588 driver need pointer to tx confirmation queue
1050 * corresponding to last packet transmitted for reading
1053 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1054 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1057 /*Prepare enqueue descriptor*/
1058 qbman_eq_desc_clear(&eqdesc);
1059 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1060 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1062 /*Clear the unused FD fields before sending*/
1064 /*Check if the queue is congested*/
1066 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1068 /* Retry for some time before giving up */
1069 if (retry_count > CONG_RETRY_COUNT)
1073 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1074 dpaa2_eqcr_size : nb_pkts;
1076 for (loop = 0; loop < frames_to_send; loop++) {
1077 if (*dpaa2_seqn(*bufs)) {
1078 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1080 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1082 DPAA2_PER_LCORE_DQRR_SIZE--;
1083 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1084 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1087 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1089 /* Check the basic scenario and set
1090 * the FD appropriately here itself.
1092 if (likely(mp && mp->ops_index ==
1093 priv->bp_list->dpaa2_ops_index &&
1094 (*bufs)->nb_segs == 1 &&
1095 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1096 if (unlikely(((*bufs)->ol_flags
1097 & PKT_TX_VLAN_PKT) ||
1098 (eth_data->dev_conf.txmode.offloads
1099 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1100 ret = rte_vlan_insert(bufs);
1104 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1105 &fd_arr[loop], mempool_to_bpid(mp));
1107 #ifdef RTE_LIBRTE_IEEE1588
1108 enable_tx_tstamp(&fd_arr[loop]);
1113 mi = rte_mbuf_from_indirect(*bufs);
1116 /* Not a hw_pkt pool allocated frame */
1117 if (unlikely(!mp || !priv->bp_list)) {
1118 DPAA2_PMD_ERR("Err: No buffer pool attached");
1122 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1123 (eth_data->dev_conf.txmode.offloads
1124 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1125 int ret = rte_vlan_insert(bufs);
1129 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1130 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1131 /* alloc should be from the default buffer pool
1132 * attached to this interface
1134 bpid = priv->bp_list->buf_pool.bpid;
1136 if (unlikely((*bufs)->nb_segs > 1)) {
1137 DPAA2_PMD_ERR("S/G support not added"
1138 " for non hw offload buffer");
1141 if (eth_copy_mbuf_to_fd(*bufs,
1142 &fd_arr[loop], bpid)) {
1145 /* free the original packet */
1146 rte_pktmbuf_free(*bufs);
1148 bpid = mempool_to_bpid(mp);
1149 if (unlikely((*bufs)->nb_segs > 1)) {
1150 if (eth_mbuf_to_sg_fd(*bufs,
1151 &fd_arr[loop], bpid))
1154 eth_mbuf_to_fd(*bufs,
1155 &fd_arr[loop], bpid);
1158 #ifdef RTE_LIBRTE_IEEE1588
1159 enable_tx_tstamp(&fd_arr[loop]);
1166 while (loop < frames_to_send) {
1167 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1168 &fd_arr[loop], &flags[loop],
1169 frames_to_send - loop);
1170 if (unlikely(ret < 0)) {
1172 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1186 dpaa2_q->tx_pkts += num_tx;
1190 /* send any already prepared fd */
1196 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1200 if (unlikely(ret < 0)) {
1202 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1212 dpaa2_q->tx_pkts += num_tx;
1217 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1219 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1220 struct qbman_fd *fd;
1223 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1225 /* Setting port id does not matter as we are to free the mbuf */
1226 m = eth_fd_to_mbuf(fd, 0);
1227 rte_pktmbuf_free(m);
1231 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1233 struct qbman_eq_desc *eqdesc)
1235 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1236 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1237 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1238 struct eqresp_metadata *eqresp_meta;
1239 uint16_t orpid, seqnum;
1242 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1244 if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1245 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1246 DPAA2_EQCR_OPRID_SHIFT;
1247 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1248 DPAA2_EQCR_SEQNUM_SHIFT;
1250 if (!priv->en_loose_ordered) {
1251 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1252 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1253 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1254 dpio_dev->eqresp_pi]), 1);
1255 qbman_eq_desc_set_token(eqdesc, 1);
1257 eqresp_meta = &dpio_dev->eqresp_meta[
1258 dpio_dev->eqresp_pi];
1259 eqresp_meta->dpaa2_q = dpaa2_q;
1260 eqresp_meta->mp = m->pool;
1262 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1263 dpio_dev->eqresp_pi++ :
1264 (dpio_dev->eqresp_pi = 0);
1266 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1269 dq_idx = *dpaa2_seqn(m) - 1;
1270 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1271 DPAA2_PER_LCORE_DQRR_SIZE--;
1272 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1274 *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1277 /* Callback to handle sending ordered packets through WRIOP based interface */
1279 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1281 /* Function to transmit the frames to given device and VQ*/
1282 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1283 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1284 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1285 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1286 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1287 struct rte_mbuf *mi;
1288 struct rte_mempool *mp;
1289 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1290 struct qbman_swp *swp;
1291 uint32_t frames_to_send, num_free_eq_desc;
1292 uint32_t loop, retry_count;
1294 uint16_t num_tx = 0;
1297 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1298 ret = dpaa2_affine_qbman_swp();
1301 "Failed to allocate IO portal, tid: %d\n",
1306 swp = DPAA2_PER_LCORE_PORTAL;
1308 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1309 eth_data, dpaa2_q->fqid);
1311 /* This would also handle normal and atomic queues as any type
1312 * of packet can be enqueued when ordered queues are being used.
1315 /*Check if the queue is congested*/
1317 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1319 /* Retry for some time before giving up */
1320 if (retry_count > CONG_RETRY_COUNT)
1324 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1325 dpaa2_eqcr_size : nb_pkts;
1327 if (!priv->en_loose_ordered) {
1328 if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1329 num_free_eq_desc = dpaa2_free_eq_descriptors();
1330 if (num_free_eq_desc < frames_to_send)
1331 frames_to_send = num_free_eq_desc;
1335 for (loop = 0; loop < frames_to_send; loop++) {
1336 /*Prepare enqueue descriptor*/
1337 qbman_eq_desc_clear(&eqdesc[loop]);
1339 if (*dpaa2_seqn(*bufs)) {
1340 /* Use only queue 0 for Tx in case of atomic/
1341 * ordered packets as packets can get unordered
1342 * when being tranmitted out from the interface
1344 dpaa2_set_enqueue_descriptor(order_sendq,
1348 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1349 DPAA2_EQ_RESP_ERR_FQ);
1350 qbman_eq_desc_set_fq(&eqdesc[loop],
1354 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1356 /* Check the basic scenario and set
1357 * the FD appropriately here itself.
1359 if (likely(mp && mp->ops_index ==
1360 priv->bp_list->dpaa2_ops_index &&
1361 (*bufs)->nb_segs == 1 &&
1362 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1363 if (unlikely((*bufs)->ol_flags
1364 & PKT_TX_VLAN_PKT)) {
1365 ret = rte_vlan_insert(bufs);
1369 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1371 mempool_to_bpid(mp));
1376 mi = rte_mbuf_from_indirect(*bufs);
1379 /* Not a hw_pkt pool allocated frame */
1380 if (unlikely(!mp || !priv->bp_list)) {
1381 DPAA2_PMD_ERR("Err: No buffer pool attached");
1385 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1386 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1387 /* alloc should be from the default buffer pool
1388 * attached to this interface
1390 bpid = priv->bp_list->buf_pool.bpid;
1392 if (unlikely((*bufs)->nb_segs > 1)) {
1394 "S/G not supp for non hw offload buffer");
1397 if (eth_copy_mbuf_to_fd(*bufs,
1398 &fd_arr[loop], bpid)) {
1401 /* free the original packet */
1402 rte_pktmbuf_free(*bufs);
1404 bpid = mempool_to_bpid(mp);
1405 if (unlikely((*bufs)->nb_segs > 1)) {
1406 if (eth_mbuf_to_sg_fd(*bufs,
1411 eth_mbuf_to_fd(*bufs,
1412 &fd_arr[loop], bpid);
1420 while (loop < frames_to_send) {
1421 ret = qbman_swp_enqueue_multiple_desc(swp,
1422 &eqdesc[loop], &fd_arr[loop],
1423 frames_to_send - loop);
1424 if (unlikely(ret < 0)) {
1426 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1440 dpaa2_q->tx_pkts += num_tx;
1444 /* send any already prepared fd */
1450 ret = qbman_swp_enqueue_multiple_desc(swp,
1451 &eqdesc[loop], &fd_arr[i], loop - i);
1452 if (unlikely(ret < 0)) {
1454 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1464 dpaa2_q->tx_pkts += num_tx;
1469 * Dummy DPDK callback for TX.
1471 * This function is used to temporarily replace the real callback during
1472 * unsafe control operations on the queue, or in case of error.
1475 * Generic pointer to TX queue structure.
1477 * Packets to transmit.
1479 * Number of packets in array.
1482 * Number of packets successfully transmitted (<= pkts_n).
1485 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1493 #if defined(RTE_TOOLCHAIN_GCC)
1494 #pragma GCC diagnostic push
1495 #pragma GCC diagnostic ignored "-Wcast-qual"
1496 #elif defined(RTE_TOOLCHAIN_CLANG)
1497 #pragma clang diagnostic push
1498 #pragma clang diagnostic ignored "-Wcast-qual"
1501 /* This function loopbacks all the received packets.*/
1503 dpaa2_dev_loopback_rx(void *queue,
1504 struct rte_mbuf **bufs __rte_unused,
1507 /* Function receive frames for a given device and VQ*/
1508 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1509 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1510 uint32_t fqid = dpaa2_q->fqid;
1511 int ret, num_rx = 0, num_tx = 0, pull_size;
1512 uint8_t pending, status;
1513 struct qbman_swp *swp;
1514 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1515 struct qbman_pull_desc pulldesc;
1516 struct qbman_eq_desc eqdesc;
1517 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1518 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1519 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1520 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1521 /* todo - currently we are using 1st TX queue only for loopback*/
1523 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1524 ret = dpaa2_affine_qbman_ethrx_swp();
1526 DPAA2_PMD_ERR("Failure in affining portal");
1530 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1531 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1532 if (unlikely(!q_storage->active_dqs)) {
1533 q_storage->toggle = 0;
1534 dq_storage = q_storage->dq_storage[q_storage->toggle];
1535 q_storage->last_num_pkts = pull_size;
1536 qbman_pull_desc_clear(&pulldesc);
1537 qbman_pull_desc_set_numframes(&pulldesc,
1538 q_storage->last_num_pkts);
1539 qbman_pull_desc_set_fq(&pulldesc, fqid);
1540 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1541 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1542 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1543 while (!qbman_check_command_complete(
1545 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1547 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1550 if (qbman_swp_pull(swp, &pulldesc)) {
1552 "VDQ command not issued.QBMAN busy\n");
1553 /* Portal was busy, try again */
1558 q_storage->active_dqs = dq_storage;
1559 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1560 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1564 dq_storage = q_storage->active_dqs;
1565 rte_prefetch0((void *)(size_t)(dq_storage));
1566 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1568 /* Prepare next pull descriptor. This will give space for the
1569 * prefething done on DQRR entries
1571 q_storage->toggle ^= 1;
1572 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1573 qbman_pull_desc_clear(&pulldesc);
1574 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1575 qbman_pull_desc_set_fq(&pulldesc, fqid);
1576 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1577 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1579 /*Prepare enqueue descriptor*/
1580 qbman_eq_desc_clear(&eqdesc);
1581 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1582 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1583 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1585 /* Check if the previous issued command is completed.
1586 * Also seems like the SWP is shared between the Ethernet Driver
1587 * and the SEC driver.
1589 while (!qbman_check_command_complete(dq_storage))
1591 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1592 clear_swp_active_dqs(q_storage->active_dpio_id);
1597 /* Loop until the dq_storage is updated with
1598 * new token by QBMAN
1600 while (!qbman_check_new_result(dq_storage))
1602 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1603 /* Check whether Last Pull command is Expired and
1604 * setting Condition for Loop termination
1606 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1608 /* Check for valid frame. */
1609 status = qbman_result_DQ_flags(dq_storage);
1610 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1613 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1619 while (num_tx < num_rx) {
1620 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1621 &fd[num_tx], 0, num_rx - num_tx);
1624 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1625 while (!qbman_check_command_complete(
1626 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1628 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1630 /* issue a volatile dequeue command for next pull */
1632 if (qbman_swp_pull(swp, &pulldesc)) {
1633 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1634 "QBMAN is busy (2)\n");
1639 q_storage->active_dqs = dq_storage1;
1640 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1641 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1643 dpaa2_q->rx_pkts += num_rx;
1644 dpaa2_q->tx_pkts += num_tx;
1648 #if defined(RTE_TOOLCHAIN_GCC)
1649 #pragma GCC diagnostic pop
1650 #elif defined(RTE_TOOLCHAIN_CLANG)
1651 #pragma clang diagnostic pop