1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2020 NXP
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
35 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
36 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
37 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
38 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
39 DPAA2_SET_FD_FRC(_fd, 0); \
40 DPAA2_RESET_FD_CTRL(_fd); \
41 DPAA2_RESET_FD_FLC(_fd); \
44 static inline void __rte_hot
45 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
48 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
49 struct dpaa2_annot_hdr *annotation =
50 (struct dpaa2_annot_hdr *)hw_annot_addr;
52 m->packet_type = RTE_PTYPE_UNKNOWN;
54 case DPAA2_PKT_TYPE_ETHER:
55 m->packet_type = RTE_PTYPE_L2_ETHER;
57 case DPAA2_PKT_TYPE_IPV4:
58 m->packet_type = RTE_PTYPE_L2_ETHER |
61 case DPAA2_PKT_TYPE_IPV6:
62 m->packet_type = RTE_PTYPE_L2_ETHER |
65 case DPAA2_PKT_TYPE_IPV4_EXT:
66 m->packet_type = RTE_PTYPE_L2_ETHER |
67 RTE_PTYPE_L3_IPV4_EXT;
69 case DPAA2_PKT_TYPE_IPV6_EXT:
70 m->packet_type = RTE_PTYPE_L2_ETHER |
71 RTE_PTYPE_L3_IPV6_EXT;
73 case DPAA2_PKT_TYPE_IPV4_TCP:
74 m->packet_type = RTE_PTYPE_L2_ETHER |
75 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
77 case DPAA2_PKT_TYPE_IPV6_TCP:
78 m->packet_type = RTE_PTYPE_L2_ETHER |
79 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
81 case DPAA2_PKT_TYPE_IPV4_UDP:
82 m->packet_type = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
85 case DPAA2_PKT_TYPE_IPV6_UDP:
86 m->packet_type = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
89 case DPAA2_PKT_TYPE_IPV4_SCTP:
90 m->packet_type = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
93 case DPAA2_PKT_TYPE_IPV6_SCTP:
94 m->packet_type = RTE_PTYPE_L2_ETHER |
95 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
97 case DPAA2_PKT_TYPE_IPV4_ICMP:
98 m->packet_type = RTE_PTYPE_L2_ETHER |
99 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
101 case DPAA2_PKT_TYPE_IPV6_ICMP:
102 m->packet_type = RTE_PTYPE_L2_ETHER |
103 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
106 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
108 m->hash.rss = fd->simple.flc_hi;
109 m->ol_flags |= PKT_RX_RSS_HASH;
111 if (dpaa2_enable_ts[m->port]) {
112 m->timestamp = annotation->word2;
113 m->ol_flags |= PKT_RX_TIMESTAMP;
114 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp);
117 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
118 "ol_flags =0x%" PRIx64 "",
119 frc, m->packet_type, m->ol_flags);
122 static inline uint32_t __rte_hot
123 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
124 struct dpaa2_annot_hdr *annotation)
126 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
129 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
130 "(4)=0x%" PRIx64 "\t",
131 annotation->word3, annotation->word4);
133 #if defined(RTE_LIBRTE_IEEE1588)
134 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
135 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
138 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
139 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
140 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
141 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
142 mbuf->ol_flags |= PKT_RX_VLAN;
143 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
144 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
145 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
146 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
147 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
148 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
149 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
152 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
153 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
155 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
156 pkt_type |= RTE_PTYPE_L2_ETHER;
161 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
162 L3_IPV4_N_PRESENT)) {
163 pkt_type |= RTE_PTYPE_L3_IPV4;
164 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
165 L3_IP_N_OPT_PRESENT))
166 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
168 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
169 L3_IPV6_N_PRESENT)) {
170 pkt_type |= RTE_PTYPE_L3_IPV6;
171 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
172 L3_IP_N_OPT_PRESENT))
173 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
178 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
179 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
180 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
181 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
183 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
184 L3_IP_1_MORE_FRAGMENT |
185 L3_IP_N_FIRST_FRAGMENT |
186 L3_IP_N_MORE_FRAGMENT)) {
187 pkt_type |= RTE_PTYPE_L4_FRAG;
190 pkt_type |= RTE_PTYPE_L4_NONFRAG;
193 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
194 pkt_type |= RTE_PTYPE_L4_UDP;
196 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
197 pkt_type |= RTE_PTYPE_L4_TCP;
199 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
200 pkt_type |= RTE_PTYPE_L4_SCTP;
202 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
203 pkt_type |= RTE_PTYPE_L4_ICMP;
205 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
206 pkt_type |= RTE_PTYPE_UNKNOWN;
212 static inline uint32_t __rte_hot
213 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
215 struct dpaa2_annot_hdr *annotation =
216 (struct dpaa2_annot_hdr *)hw_annot_addr;
218 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
221 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
222 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
223 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
224 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
226 mbuf->ol_flags |= PKT_RX_TIMESTAMP;
227 mbuf->timestamp = annotation->word2;
228 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp);
230 /* Check detailed parsing requirement */
231 if (annotation->word3 & 0x7FFFFC3FFFF)
232 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
234 /* Return some common types from parse processing */
235 switch (annotation->word4) {
237 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
239 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
240 case DPAA2_L3_IPv4_TCP:
241 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
243 case DPAA2_L3_IPv4_UDP:
244 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
246 case DPAA2_L3_IPv6_TCP:
247 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
249 case DPAA2_L3_IPv6_UDP:
250 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
256 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
259 static inline struct rte_mbuf *__rte_hot
260 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
263 struct qbman_sge *sgt, *sge;
264 size_t sg_addr, fd_addr;
267 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
269 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
270 hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
272 /* Get Scatter gather table address */
273 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
276 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
278 /* First Scatter gather entry */
279 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
280 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
281 /* Prepare all the metadata for first segment */
282 first_seg->buf_addr = (uint8_t *)sg_addr;
283 first_seg->ol_flags = 0;
284 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
285 first_seg->data_len = sge->length & 0x1FFFF;
286 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
287 first_seg->nb_segs = 1;
288 first_seg->next = NULL;
289 first_seg->port = port_id;
290 if (dpaa2_svr_family == SVR_LX2160A)
291 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
293 first_seg->packet_type =
294 dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
296 rte_mbuf_refcnt_set(first_seg, 1);
298 while (!DPAA2_SG_IS_FINAL(sge)) {
300 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
301 DPAA2_GET_FLE_ADDR(sge));
302 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
303 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
304 next_seg->buf_addr = (uint8_t *)sg_addr;
305 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
306 next_seg->data_len = sge->length & 0x1FFFF;
307 first_seg->nb_segs += 1;
308 rte_mbuf_refcnt_set(next_seg, 1);
309 cur_seg->next = next_seg;
310 next_seg->next = NULL;
313 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
314 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
315 rte_mbuf_refcnt_set(temp, 1);
316 rte_pktmbuf_free_seg(temp);
318 return (void *)first_seg;
321 static inline struct rte_mbuf *__rte_hot
322 eth_fd_to_mbuf(const struct qbman_fd *fd,
325 void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
326 void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
327 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
328 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
330 /* need to repopulated some of the fields,
331 * as they may have changed in last transmission
335 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
336 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
337 mbuf->pkt_len = mbuf->data_len;
338 mbuf->port = port_id;
340 rte_mbuf_refcnt_set(mbuf, 1);
342 /* Parse the packet */
343 /* parse results for LX2 are there in FRC field of FD.
344 * For other DPAA2 platforms , parse results are after
345 * the private - sw annotation area
348 if (dpaa2_svr_family == SVR_LX2160A)
349 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
351 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
353 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
354 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
355 mbuf, mbuf->buf_addr, mbuf->data_off,
356 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
357 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
358 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
363 static int __rte_noinline __rte_hot
364 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
365 struct qbman_fd *fd, uint16_t bpid)
367 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
368 struct qbman_sge *sgt, *sge = NULL;
371 temp = rte_pktmbuf_alloc(mbuf->pool);
373 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
377 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
378 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
379 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
380 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
381 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
382 DPAA2_RESET_FD_FRC(fd);
383 DPAA2_RESET_FD_CTRL(fd);
384 /*Set Scatter gather table and Scatter gather entries*/
385 sgt = (struct qbman_sge *)(
386 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
387 + DPAA2_GET_FD_OFFSET(fd));
389 for (i = 0; i < mbuf->nb_segs; i++) {
391 /*Resetting the buffer pool id and offset field*/
392 sge->fin_bpid_offset = 0;
393 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
394 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
395 sge->length = cur_seg->data_len;
396 if (RTE_MBUF_DIRECT(cur_seg)) {
397 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
398 /* If refcnt > 1, invalid bpid is set to ensure
399 * buffer is not freed by HW
401 DPAA2_SET_FLE_IVP(sge);
402 rte_mbuf_refcnt_update(cur_seg, -1);
404 DPAA2_SET_FLE_BPID(sge,
405 mempool_to_bpid(cur_seg->pool));
406 cur_seg = cur_seg->next;
408 /* Get owner MBUF from indirect buffer */
409 mi = rte_mbuf_from_indirect(cur_seg);
410 if (rte_mbuf_refcnt_read(mi) > 1) {
411 /* If refcnt > 1, invalid bpid is set to ensure
412 * owner buffer is not freed by HW
414 DPAA2_SET_FLE_IVP(sge);
416 DPAA2_SET_FLE_BPID(sge,
417 mempool_to_bpid(mi->pool));
418 rte_mbuf_refcnt_update(mi, 1);
421 cur_seg = cur_seg->next;
422 prev_seg->next = NULL;
423 rte_pktmbuf_free(prev_seg);
426 DPAA2_SG_SET_FINAL(sge, true);
431 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
432 struct qbman_fd *fd, uint16_t bpid) __rte_unused;
434 static void __rte_noinline __rte_hot
435 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
436 struct qbman_fd *fd, uint16_t bpid)
438 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
440 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
441 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
442 mbuf, mbuf->buf_addr, mbuf->data_off,
443 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
444 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
445 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
446 if (RTE_MBUF_DIRECT(mbuf)) {
447 if (rte_mbuf_refcnt_read(mbuf) > 1) {
448 DPAA2_SET_FD_IVP(fd);
449 rte_mbuf_refcnt_update(mbuf, -1);
454 mi = rte_mbuf_from_indirect(mbuf);
455 if (rte_mbuf_refcnt_read(mi) > 1)
456 DPAA2_SET_FD_IVP(fd);
458 rte_mbuf_refcnt_update(mi, 1);
459 rte_pktmbuf_free(mbuf);
463 static inline int __rte_hot
464 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
465 struct qbman_fd *fd, uint16_t bpid)
470 if (rte_dpaa2_mbuf_alloc_bulk(
471 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
472 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
475 m = (struct rte_mbuf *)mb;
476 memcpy((char *)m->buf_addr + mbuf->data_off,
477 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
480 /* Copy required fields */
481 m->data_off = mbuf->data_off;
482 m->ol_flags = mbuf->ol_flags;
483 m->packet_type = mbuf->packet_type;
484 m->tx_offload = mbuf->tx_offload;
486 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
489 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
490 " meta: %d, off: %d, len: %d\n",
493 DPAA2_GET_FD_ADDR(fd),
494 DPAA2_GET_FD_BPID(fd),
495 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
496 DPAA2_GET_FD_OFFSET(fd),
497 DPAA2_GET_FD_LEN(fd));
502 /* This function assumes that caller will be keep the same value for nb_pkts
503 * across calls per queue, if that is not the case, better use non-prefetch
504 * version of rx call.
505 * It will return the packets as requested in previous call without honoring
506 * the current nb_pkts or bufs space.
509 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
511 /* Function receive frames for a given device and VQ*/
512 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
513 struct qbman_result *dq_storage, *dq_storage1 = NULL;
514 uint32_t fqid = dpaa2_q->fqid;
515 int ret, num_rx = 0, pull_size;
516 uint8_t pending, status;
517 struct qbman_swp *swp;
518 const struct qbman_fd *fd;
519 struct qbman_pull_desc pulldesc;
520 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
521 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
522 #if defined(RTE_LIBRTE_IEEE1588)
523 struct dpaa2_dev_priv *priv = eth_data->dev_private;
526 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
527 ret = dpaa2_affine_qbman_ethrx_swp();
529 DPAA2_PMD_ERR("Failure in affining portal");
534 if (unlikely(!rte_dpaa2_bpid_info &&
535 rte_eal_process_type() == RTE_PROC_SECONDARY))
536 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
538 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
539 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
540 if (unlikely(!q_storage->active_dqs)) {
541 q_storage->toggle = 0;
542 dq_storage = q_storage->dq_storage[q_storage->toggle];
543 q_storage->last_num_pkts = pull_size;
544 qbman_pull_desc_clear(&pulldesc);
545 qbman_pull_desc_set_numframes(&pulldesc,
546 q_storage->last_num_pkts);
547 qbman_pull_desc_set_fq(&pulldesc, fqid);
548 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
549 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
550 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
551 while (!qbman_check_command_complete(
553 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
555 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
558 if (qbman_swp_pull(swp, &pulldesc)) {
559 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
560 " QBMAN is busy (1)\n");
561 /* Portal was busy, try again */
566 q_storage->active_dqs = dq_storage;
567 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
568 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
572 dq_storage = q_storage->active_dqs;
573 rte_prefetch0((void *)(size_t)(dq_storage));
574 rte_prefetch0((void *)(size_t)(dq_storage + 1));
576 /* Prepare next pull descriptor. This will give space for the
577 * prefething done on DQRR entries
579 q_storage->toggle ^= 1;
580 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
581 qbman_pull_desc_clear(&pulldesc);
582 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
583 qbman_pull_desc_set_fq(&pulldesc, fqid);
584 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
585 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
587 /* Check if the previous issued command is completed.
588 * Also seems like the SWP is shared between the Ethernet Driver
589 * and the SEC driver.
591 while (!qbman_check_command_complete(dq_storage))
593 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
594 clear_swp_active_dqs(q_storage->active_dpio_id);
599 /* Loop until the dq_storage is updated with
602 while (!qbman_check_new_result(dq_storage))
604 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
605 /* Check whether Last Pull command is Expired and
606 * setting Condition for Loop termination
608 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
610 /* Check for valid frame. */
611 status = qbman_result_DQ_flags(dq_storage);
612 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
615 fd = qbman_result_DQ_fd(dq_storage);
617 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
618 if (dpaa2_svr_family != SVR_LX2160A) {
619 const struct qbman_fd *next_fd =
620 qbman_result_DQ_fd(dq_storage + 1);
621 /* Prefetch Annotation address for the parse results */
622 rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
623 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
627 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
628 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
630 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
631 #if defined(RTE_LIBRTE_IEEE1588)
632 priv->rx_timestamp = bufs[num_rx]->timestamp;
635 if (eth_data->dev_conf.rxmode.offloads &
636 DEV_RX_OFFLOAD_VLAN_STRIP)
637 rte_vlan_strip(bufs[num_rx]);
643 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
644 while (!qbman_check_command_complete(
645 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
647 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
649 /* issue a volatile dequeue command for next pull */
651 if (qbman_swp_pull(swp, &pulldesc)) {
652 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
653 "QBMAN is busy (2)\n");
658 q_storage->active_dqs = dq_storage1;
659 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
660 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
662 dpaa2_q->rx_pkts += num_rx;
668 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
669 const struct qbman_fd *fd,
670 const struct qbman_result *dq,
671 struct dpaa2_queue *rxq,
672 struct rte_event *ev)
674 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
675 DPAA2_FD_PTA_SIZE + 16));
677 ev->flow_id = rxq->ev.flow_id;
678 ev->sub_event_type = rxq->ev.sub_event_type;
679 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
680 ev->op = RTE_EVENT_OP_NEW;
681 ev->sched_type = rxq->ev.sched_type;
682 ev->queue_id = rxq->ev.queue_id;
683 ev->priority = rxq->ev.priority;
685 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
687 qbman_swp_dqrr_consume(swp, dq);
691 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
692 const struct qbman_fd *fd,
693 const struct qbman_result *dq,
694 struct dpaa2_queue *rxq,
695 struct rte_event *ev)
699 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
700 DPAA2_FD_PTA_SIZE + 16));
702 ev->flow_id = rxq->ev.flow_id;
703 ev->sub_event_type = rxq->ev.sub_event_type;
704 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
705 ev->op = RTE_EVENT_OP_NEW;
706 ev->sched_type = rxq->ev.sched_type;
707 ev->queue_id = rxq->ev.queue_id;
708 ev->priority = rxq->ev.priority;
710 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
712 dqrr_index = qbman_get_dqrr_idx(dq);
713 ev->mbuf->seqn = dqrr_index + 1;
714 DPAA2_PER_LCORE_DQRR_SIZE++;
715 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
716 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
720 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
721 const struct qbman_fd *fd,
722 const struct qbman_result *dq,
723 struct dpaa2_queue *rxq,
724 struct rte_event *ev)
726 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
727 DPAA2_FD_PTA_SIZE + 16));
729 ev->flow_id = rxq->ev.flow_id;
730 ev->sub_event_type = rxq->ev.sub_event_type;
731 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
732 ev->op = RTE_EVENT_OP_NEW;
733 ev->sched_type = rxq->ev.sched_type;
734 ev->queue_id = rxq->ev.queue_id;
735 ev->priority = rxq->ev.priority;
737 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
739 ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
740 ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
741 ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
743 qbman_swp_dqrr_consume(swp, dq);
747 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
749 /* Function receive frames for a given device and VQ */
750 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
751 struct qbman_result *dq_storage;
752 uint32_t fqid = dpaa2_q->fqid;
753 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
754 uint8_t pending, status;
755 struct qbman_swp *swp;
756 const struct qbman_fd *fd;
757 struct qbman_pull_desc pulldesc;
758 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
760 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
761 ret = dpaa2_affine_qbman_swp();
764 "Failed to allocate IO portal, tid: %d\n",
769 swp = DPAA2_PER_LCORE_PORTAL;
772 dq_storage = dpaa2_q->q_storage->dq_storage[0];
773 qbman_pull_desc_clear(&pulldesc);
774 qbman_pull_desc_set_fq(&pulldesc, fqid);
775 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
776 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
778 if (next_pull > dpaa2_dqrr_size) {
779 qbman_pull_desc_set_numframes(&pulldesc,
781 next_pull -= dpaa2_dqrr_size;
783 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
788 if (qbman_swp_pull(swp, &pulldesc)) {
790 "VDQ command is not issued.QBMAN is busy\n");
791 /* Portal was busy, try again */
797 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
798 /* Check if the previous issued command is completed. */
799 while (!qbman_check_command_complete(dq_storage))
805 /* Loop until the dq_storage is updated with
808 while (!qbman_check_new_result(dq_storage))
810 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
811 /* Check whether Last Pull command is Expired and
812 * setting Condition for Loop termination
814 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
816 /* Check for valid frame. */
817 status = qbman_result_DQ_flags(dq_storage);
818 if (unlikely((status &
819 QBMAN_DQ_STAT_VALIDFRAME) == 0))
822 fd = qbman_result_DQ_fd(dq_storage);
824 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
825 if (dpaa2_svr_family != SVR_LX2160A) {
826 const struct qbman_fd *next_fd =
827 qbman_result_DQ_fd(dq_storage + 1);
829 /* Prefetch Annotation address for the parse
832 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
833 DPAA2_GET_FD_ADDR(next_fd) +
834 DPAA2_FD_PTA_SIZE + 16)));
838 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
839 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
842 bufs[num_rx] = eth_fd_to_mbuf(fd,
845 if (eth_data->dev_conf.rxmode.offloads &
846 DEV_RX_OFFLOAD_VLAN_STRIP) {
847 rte_vlan_strip(bufs[num_rx]);
854 /* Last VDQ provided all packets and more packets are requested */
855 } while (next_pull && num_pulled == dpaa2_dqrr_size);
857 dpaa2_q->rx_pkts += num_rx;
862 uint16_t dpaa2_dev_tx_conf(void *queue)
864 /* Function receive frames for a given device and VQ */
865 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
866 struct qbman_result *dq_storage;
867 uint32_t fqid = dpaa2_q->fqid;
868 int ret, num_tx_conf = 0, num_pulled;
869 uint8_t pending, status;
870 struct qbman_swp *swp;
871 const struct qbman_fd *fd, *next_fd;
872 struct qbman_pull_desc pulldesc;
873 struct qbman_release_desc releasedesc;
876 #if defined(RTE_LIBRTE_IEEE1588)
877 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
878 struct dpaa2_dev_priv *priv = eth_data->dev_private;
879 struct dpaa2_annot_hdr *annotation;
882 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
883 ret = dpaa2_affine_qbman_swp();
886 "Failed to allocate IO portal, tid: %d\n",
891 swp = DPAA2_PER_LCORE_PORTAL;
894 dq_storage = dpaa2_q->q_storage->dq_storage[0];
895 qbman_pull_desc_clear(&pulldesc);
896 qbman_pull_desc_set_fq(&pulldesc, fqid);
897 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
898 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
900 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
903 if (qbman_swp_pull(swp, &pulldesc)) {
904 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
906 /* Portal was busy, try again */
912 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
913 /* Check if the previous issued command is completed. */
914 while (!qbman_check_command_complete(dq_storage))
920 /* Loop until the dq_storage is updated with
923 while (!qbman_check_new_result(dq_storage))
925 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
926 /* Check whether Last Pull command is Expired and
927 * setting Condition for Loop termination
929 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
931 /* Check for valid frame. */
932 status = qbman_result_DQ_flags(dq_storage);
933 if (unlikely((status &
934 QBMAN_DQ_STAT_VALIDFRAME) == 0))
937 fd = qbman_result_DQ_fd(dq_storage);
939 next_fd = qbman_result_DQ_fd(dq_storage + 1);
940 /* Prefetch Annotation address for the parse results */
941 rte_prefetch0((void *)(size_t)
942 (DPAA2_GET_FD_ADDR(next_fd) +
943 DPAA2_FD_PTA_SIZE + 16));
945 bpid = DPAA2_GET_FD_BPID(fd);
947 /* Create a release descriptor required for releasing
950 qbman_release_desc_clear(&releasedesc);
951 qbman_release_desc_set_bpid(&releasedesc, bpid);
953 buf = DPAA2_GET_FD_ADDR(fd);
954 /* feed them to bman */
956 ret = qbman_swp_release(swp, &releasedesc,
958 } while (ret == -EBUSY);
963 #if defined(RTE_LIBRTE_IEEE1588)
964 annotation = (struct dpaa2_annot_hdr *)((size_t)
965 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
967 priv->tx_timestamp = annotation->word2;
971 /* Last VDQ provided all packets and more packets are requested */
972 } while (num_pulled == dpaa2_dqrr_size);
974 dpaa2_q->rx_pkts += num_tx_conf;
979 /* Configure the egress frame annotation for timestamp update */
980 static void enable_tx_tstamp(struct qbman_fd *fd)
982 struct dpaa2_faead *fd_faead;
984 /* Set frame annotation status field as valid */
985 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
987 /* Set frame annotation egress action descriptor as valid */
988 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
990 /* Set Annotation Length as 128B */
991 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
993 /* enable update of confirmation frame annotation */
994 fd_faead = (struct dpaa2_faead *)((size_t)
995 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
996 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
997 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
998 DPAA2_ANNOT_FAEAD_UPD;
1002 * Callback to handle sending packets through WRIOP based interface
1005 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1007 /* Function to transmit the frames to given device and VQ*/
1008 uint32_t loop, retry_count;
1010 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1011 struct rte_mbuf *mi;
1012 uint32_t frames_to_send;
1013 struct rte_mempool *mp;
1014 struct qbman_eq_desc eqdesc;
1015 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1016 struct qbman_swp *swp;
1017 uint16_t num_tx = 0;
1019 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1020 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1021 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1023 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1024 ret = dpaa2_affine_qbman_swp();
1027 "Failed to allocate IO portal, tid: %d\n",
1032 swp = DPAA2_PER_LCORE_PORTAL;
1034 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1035 eth_data, dpaa2_q->fqid);
1037 #ifdef RTE_LIBRTE_IEEE1588
1038 /* IEEE1588 driver need pointer to tx confirmation queue
1039 * corresponding to last packet transmitted for reading
1042 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1043 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1046 /*Prepare enqueue descriptor*/
1047 qbman_eq_desc_clear(&eqdesc);
1048 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1049 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1051 /*Clear the unused FD fields before sending*/
1053 /*Check if the queue is congested*/
1055 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1057 /* Retry for some time before giving up */
1058 if (retry_count > CONG_RETRY_COUNT)
1062 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1063 dpaa2_eqcr_size : nb_pkts;
1065 for (loop = 0; loop < frames_to_send; loop++) {
1066 if ((*bufs)->seqn) {
1067 uint8_t dqrr_index = (*bufs)->seqn - 1;
1069 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1071 DPAA2_PER_LCORE_DQRR_SIZE--;
1072 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1073 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
1076 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1078 /* Check the basic scenario and set
1079 * the FD appropriately here itself.
1081 if (likely(mp && mp->ops_index ==
1082 priv->bp_list->dpaa2_ops_index &&
1083 (*bufs)->nb_segs == 1 &&
1084 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1085 if (unlikely(((*bufs)->ol_flags
1086 & PKT_TX_VLAN_PKT) ||
1087 (eth_data->dev_conf.txmode.offloads
1088 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1089 ret = rte_vlan_insert(bufs);
1093 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1094 &fd_arr[loop], mempool_to_bpid(mp));
1096 #ifdef RTE_LIBRTE_IEEE1588
1097 enable_tx_tstamp(&fd_arr[loop]);
1102 mi = rte_mbuf_from_indirect(*bufs);
1105 /* Not a hw_pkt pool allocated frame */
1106 if (unlikely(!mp || !priv->bp_list)) {
1107 DPAA2_PMD_ERR("Err: No buffer pool attached");
1111 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1112 (eth_data->dev_conf.txmode.offloads
1113 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1114 int ret = rte_vlan_insert(bufs);
1118 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1119 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1120 /* alloc should be from the default buffer pool
1121 * attached to this interface
1123 bpid = priv->bp_list->buf_pool.bpid;
1125 if (unlikely((*bufs)->nb_segs > 1)) {
1126 DPAA2_PMD_ERR("S/G support not added"
1127 " for non hw offload buffer");
1130 if (eth_copy_mbuf_to_fd(*bufs,
1131 &fd_arr[loop], bpid)) {
1134 /* free the original packet */
1135 rte_pktmbuf_free(*bufs);
1137 bpid = mempool_to_bpid(mp);
1138 if (unlikely((*bufs)->nb_segs > 1)) {
1139 if (eth_mbuf_to_sg_fd(*bufs,
1140 &fd_arr[loop], bpid))
1143 eth_mbuf_to_fd(*bufs,
1144 &fd_arr[loop], bpid);
1147 #ifdef RTE_LIBRTE_IEEE1588
1148 enable_tx_tstamp(&fd_arr[loop]);
1155 while (loop < frames_to_send) {
1156 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1157 &fd_arr[loop], &flags[loop],
1158 frames_to_send - loop);
1159 if (unlikely(ret < 0)) {
1161 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1175 dpaa2_q->tx_pkts += num_tx;
1179 /* send any already prepared fd */
1185 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1189 if (unlikely(ret < 0)) {
1191 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1201 dpaa2_q->tx_pkts += num_tx;
1206 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1208 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1209 struct qbman_fd *fd;
1212 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1214 /* Setting port id does not matter as we are to free the mbuf */
1215 m = eth_fd_to_mbuf(fd, 0);
1216 rte_pktmbuf_free(m);
1220 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1222 struct qbman_eq_desc *eqdesc)
1224 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1225 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1226 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1227 struct eqresp_metadata *eqresp_meta;
1228 uint16_t orpid, seqnum;
1231 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1233 if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1234 orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
1235 DPAA2_EQCR_OPRID_SHIFT;
1236 seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
1237 DPAA2_EQCR_SEQNUM_SHIFT;
1239 if (!priv->en_loose_ordered) {
1240 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1241 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1242 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1243 dpio_dev->eqresp_pi]), 1);
1244 qbman_eq_desc_set_token(eqdesc, 1);
1246 eqresp_meta = &dpio_dev->eqresp_meta[
1247 dpio_dev->eqresp_pi];
1248 eqresp_meta->dpaa2_q = dpaa2_q;
1249 eqresp_meta->mp = m->pool;
1251 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1252 dpio_dev->eqresp_pi++ :
1253 (dpio_dev->eqresp_pi = 0);
1255 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1258 dq_idx = m->seqn - 1;
1259 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1260 DPAA2_PER_LCORE_DQRR_SIZE--;
1261 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1263 m->seqn = DPAA2_INVALID_MBUF_SEQN;
1266 /* Callback to handle sending ordered packets through WRIOP based interface */
1268 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1270 /* Function to transmit the frames to given device and VQ*/
1271 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1272 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1273 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1274 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1275 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1276 struct rte_mbuf *mi;
1277 struct rte_mempool *mp;
1278 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1279 struct qbman_swp *swp;
1280 uint32_t frames_to_send, num_free_eq_desc;
1281 uint32_t loop, retry_count;
1283 uint16_t num_tx = 0;
1286 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1287 ret = dpaa2_affine_qbman_swp();
1290 "Failed to allocate IO portal, tid: %d\n",
1295 swp = DPAA2_PER_LCORE_PORTAL;
1297 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1298 eth_data, dpaa2_q->fqid);
1300 /* This would also handle normal and atomic queues as any type
1301 * of packet can be enqueued when ordered queues are being used.
1304 /*Check if the queue is congested*/
1306 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1308 /* Retry for some time before giving up */
1309 if (retry_count > CONG_RETRY_COUNT)
1313 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1314 dpaa2_eqcr_size : nb_pkts;
1316 if (!priv->en_loose_ordered) {
1317 if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1318 num_free_eq_desc = dpaa2_free_eq_descriptors();
1319 if (num_free_eq_desc < frames_to_send)
1320 frames_to_send = num_free_eq_desc;
1324 for (loop = 0; loop < frames_to_send; loop++) {
1325 /*Prepare enqueue descriptor*/
1326 qbman_eq_desc_clear(&eqdesc[loop]);
1328 if ((*bufs)->seqn) {
1329 /* Use only queue 0 for Tx in case of atomic/
1330 * ordered packets as packets can get unordered
1331 * when being tranmitted out from the interface
1333 dpaa2_set_enqueue_descriptor(order_sendq,
1337 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1338 DPAA2_EQ_RESP_ERR_FQ);
1339 qbman_eq_desc_set_fq(&eqdesc[loop],
1343 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1345 /* Check the basic scenario and set
1346 * the FD appropriately here itself.
1348 if (likely(mp && mp->ops_index ==
1349 priv->bp_list->dpaa2_ops_index &&
1350 (*bufs)->nb_segs == 1 &&
1351 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1352 if (unlikely((*bufs)->ol_flags
1353 & PKT_TX_VLAN_PKT)) {
1354 ret = rte_vlan_insert(bufs);
1358 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1360 mempool_to_bpid(mp));
1365 mi = rte_mbuf_from_indirect(*bufs);
1368 /* Not a hw_pkt pool allocated frame */
1369 if (unlikely(!mp || !priv->bp_list)) {
1370 DPAA2_PMD_ERR("Err: No buffer pool attached");
1374 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1375 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1376 /* alloc should be from the default buffer pool
1377 * attached to this interface
1379 bpid = priv->bp_list->buf_pool.bpid;
1381 if (unlikely((*bufs)->nb_segs > 1)) {
1383 "S/G not supp for non hw offload buffer");
1386 if (eth_copy_mbuf_to_fd(*bufs,
1387 &fd_arr[loop], bpid)) {
1390 /* free the original packet */
1391 rte_pktmbuf_free(*bufs);
1393 bpid = mempool_to_bpid(mp);
1394 if (unlikely((*bufs)->nb_segs > 1)) {
1395 if (eth_mbuf_to_sg_fd(*bufs,
1400 eth_mbuf_to_fd(*bufs,
1401 &fd_arr[loop], bpid);
1409 while (loop < frames_to_send) {
1410 ret = qbman_swp_enqueue_multiple_desc(swp,
1411 &eqdesc[loop], &fd_arr[loop],
1412 frames_to_send - loop);
1413 if (unlikely(ret < 0)) {
1415 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1429 dpaa2_q->tx_pkts += num_tx;
1433 /* send any already prepared fd */
1439 ret = qbman_swp_enqueue_multiple_desc(swp,
1440 &eqdesc[loop], &fd_arr[i], loop - i);
1441 if (unlikely(ret < 0)) {
1443 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1453 dpaa2_q->tx_pkts += num_tx;
1458 * Dummy DPDK callback for TX.
1460 * This function is used to temporarily replace the real callback during
1461 * unsafe control operations on the queue, or in case of error.
1464 * Generic pointer to TX queue structure.
1466 * Packets to transmit.
1468 * Number of packets in array.
1471 * Number of packets successfully transmitted (<= pkts_n).
1474 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1482 #if defined(RTE_TOOLCHAIN_GCC)
1483 #pragma GCC diagnostic push
1484 #pragma GCC diagnostic ignored "-Wcast-qual"
1485 #elif defined(RTE_TOOLCHAIN_CLANG)
1486 #pragma clang diagnostic push
1487 #pragma clang diagnostic ignored "-Wcast-qual"
1490 /* This function loopbacks all the received packets.*/
1492 dpaa2_dev_loopback_rx(void *queue,
1493 struct rte_mbuf **bufs __rte_unused,
1496 /* Function receive frames for a given device and VQ*/
1497 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1498 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1499 uint32_t fqid = dpaa2_q->fqid;
1500 int ret, num_rx = 0, num_tx = 0, pull_size;
1501 uint8_t pending, status;
1502 struct qbman_swp *swp;
1503 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1504 struct qbman_pull_desc pulldesc;
1505 struct qbman_eq_desc eqdesc;
1506 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1507 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1508 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1509 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1510 /* todo - currently we are using 1st TX queue only for loopback*/
1512 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1513 ret = dpaa2_affine_qbman_ethrx_swp();
1515 DPAA2_PMD_ERR("Failure in affining portal");
1519 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1520 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1521 if (unlikely(!q_storage->active_dqs)) {
1522 q_storage->toggle = 0;
1523 dq_storage = q_storage->dq_storage[q_storage->toggle];
1524 q_storage->last_num_pkts = pull_size;
1525 qbman_pull_desc_clear(&pulldesc);
1526 qbman_pull_desc_set_numframes(&pulldesc,
1527 q_storage->last_num_pkts);
1528 qbman_pull_desc_set_fq(&pulldesc, fqid);
1529 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1530 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1531 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1532 while (!qbman_check_command_complete(
1534 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1536 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1539 if (qbman_swp_pull(swp, &pulldesc)) {
1541 "VDQ command not issued.QBMAN busy\n");
1542 /* Portal was busy, try again */
1547 q_storage->active_dqs = dq_storage;
1548 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1549 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1553 dq_storage = q_storage->active_dqs;
1554 rte_prefetch0((void *)(size_t)(dq_storage));
1555 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1557 /* Prepare next pull descriptor. This will give space for the
1558 * prefething done on DQRR entries
1560 q_storage->toggle ^= 1;
1561 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1562 qbman_pull_desc_clear(&pulldesc);
1563 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1564 qbman_pull_desc_set_fq(&pulldesc, fqid);
1565 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1566 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1568 /*Prepare enqueue descriptor*/
1569 qbman_eq_desc_clear(&eqdesc);
1570 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1571 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1572 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1574 /* Check if the previous issued command is completed.
1575 * Also seems like the SWP is shared between the Ethernet Driver
1576 * and the SEC driver.
1578 while (!qbman_check_command_complete(dq_storage))
1580 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1581 clear_swp_active_dqs(q_storage->active_dpio_id);
1586 /* Loop until the dq_storage is updated with
1587 * new token by QBMAN
1589 while (!qbman_check_new_result(dq_storage))
1591 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1592 /* Check whether Last Pull command is Expired and
1593 * setting Condition for Loop termination
1595 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1597 /* Check for valid frame. */
1598 status = qbman_result_DQ_flags(dq_storage);
1599 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1602 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1608 while (num_tx < num_rx) {
1609 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1610 &fd[num_tx], 0, num_rx - num_tx);
1613 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1614 while (!qbman_check_command_complete(
1615 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1617 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1619 /* issue a volatile dequeue command for next pull */
1621 if (qbman_swp_pull(swp, &pulldesc)) {
1622 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1623 "QBMAN is busy (2)\n");
1628 q_storage->active_dqs = dq_storage1;
1629 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1630 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1632 dpaa2_q->rx_pkts += num_rx;
1633 dpaa2_q->tx_pkts += num_tx;
1637 #if defined(RTE_TOOLCHAIN_GCC)
1638 #pragma GCC diagnostic pop
1639 #elif defined(RTE_TOOLCHAIN_CLANG)
1640 #pragma clang diagnostic pop