1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __attribute__((hot))
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
33 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
34 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
35 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
36 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
37 DPAA2_SET_FD_FRC(_fd, 0); \
38 DPAA2_RESET_FD_CTRL(_fd); \
39 DPAA2_RESET_FD_FLC(_fd); \
42 static inline void __attribute__((hot))
43 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd)
45 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
47 m->packet_type = RTE_PTYPE_UNKNOWN;
49 case DPAA2_PKT_TYPE_ETHER:
50 m->packet_type = RTE_PTYPE_L2_ETHER;
52 case DPAA2_PKT_TYPE_IPV4:
53 m->packet_type = RTE_PTYPE_L2_ETHER |
56 case DPAA2_PKT_TYPE_IPV6:
57 m->packet_type = RTE_PTYPE_L2_ETHER |
60 case DPAA2_PKT_TYPE_IPV4_EXT:
61 m->packet_type = RTE_PTYPE_L2_ETHER |
62 RTE_PTYPE_L3_IPV4_EXT;
64 case DPAA2_PKT_TYPE_IPV6_EXT:
65 m->packet_type = RTE_PTYPE_L2_ETHER |
66 RTE_PTYPE_L3_IPV6_EXT;
68 case DPAA2_PKT_TYPE_IPV4_TCP:
69 m->packet_type = RTE_PTYPE_L2_ETHER |
70 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
72 case DPAA2_PKT_TYPE_IPV6_TCP:
73 m->packet_type = RTE_PTYPE_L2_ETHER |
74 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
76 case DPAA2_PKT_TYPE_IPV4_UDP:
77 m->packet_type = RTE_PTYPE_L2_ETHER |
78 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
80 case DPAA2_PKT_TYPE_IPV6_UDP:
81 m->packet_type = RTE_PTYPE_L2_ETHER |
82 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
84 case DPAA2_PKT_TYPE_IPV4_SCTP:
85 m->packet_type = RTE_PTYPE_L2_ETHER |
86 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
88 case DPAA2_PKT_TYPE_IPV6_SCTP:
89 m->packet_type = RTE_PTYPE_L2_ETHER |
90 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
92 case DPAA2_PKT_TYPE_IPV4_ICMP:
93 m->packet_type = RTE_PTYPE_L2_ETHER |
94 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
96 case DPAA2_PKT_TYPE_IPV6_ICMP:
97 m->packet_type = RTE_PTYPE_L2_ETHER |
98 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
101 m->packet_type = dpaa2_dev_rx_parse_slow(m,
102 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
103 + DPAA2_FD_PTA_SIZE));
105 m->hash.rss = fd->simple.flc_hi;
106 m->ol_flags |= PKT_RX_RSS_HASH;
109 static inline uint32_t __attribute__((hot))
110 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
111 struct dpaa2_annot_hdr *annotation)
113 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
116 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
117 "(4)=0x%" PRIx64 "\t",
118 annotation->word3, annotation->word4);
120 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
121 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
122 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
123 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
124 mbuf->ol_flags |= PKT_RX_VLAN;
125 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
126 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
127 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
128 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
129 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
130 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
131 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
134 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
135 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
137 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
138 pkt_type |= RTE_PTYPE_L2_ETHER;
143 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
144 L3_IPV4_N_PRESENT)) {
145 pkt_type |= RTE_PTYPE_L3_IPV4;
146 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
147 L3_IP_N_OPT_PRESENT))
148 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
150 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
151 L3_IPV6_N_PRESENT)) {
152 pkt_type |= RTE_PTYPE_L3_IPV6;
153 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
154 L3_IP_N_OPT_PRESENT))
155 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
160 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
161 L3_IP_1_MORE_FRAGMENT |
162 L3_IP_N_FIRST_FRAGMENT |
163 L3_IP_N_MORE_FRAGMENT)) {
164 pkt_type |= RTE_PTYPE_L4_FRAG;
167 pkt_type |= RTE_PTYPE_L4_NONFRAG;
170 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
171 pkt_type |= RTE_PTYPE_L4_UDP;
173 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
174 pkt_type |= RTE_PTYPE_L4_TCP;
176 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
177 pkt_type |= RTE_PTYPE_L4_SCTP;
179 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
180 pkt_type |= RTE_PTYPE_L4_ICMP;
182 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
183 pkt_type |= RTE_PTYPE_UNKNOWN;
189 static inline uint32_t __attribute__((hot))
190 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
192 struct dpaa2_annot_hdr *annotation =
193 (struct dpaa2_annot_hdr *)hw_annot_addr;
195 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
198 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
199 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
200 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
201 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
203 /* Check detailed parsing requirement */
204 if (annotation->word3 & 0x7FFFFC3FFFF)
205 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
207 /* Return some common types from parse processing */
208 switch (annotation->word4) {
210 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
212 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
213 case DPAA2_L3_IPv4_TCP:
214 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
216 case DPAA2_L3_IPv4_UDP:
217 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
219 case DPAA2_L3_IPv6_TCP:
220 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
222 case DPAA2_L3_IPv6_UDP:
223 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
229 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
232 static inline struct rte_mbuf *__attribute__((hot))
233 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
235 struct qbman_sge *sgt, *sge;
236 size_t sg_addr, fd_addr;
238 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
240 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
242 /* Get Scatter gather table address */
243 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
246 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
248 /* First Scatter gather entry */
249 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
250 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
251 /* Prepare all the metadata for first segment */
252 first_seg->buf_addr = (uint8_t *)sg_addr;
253 first_seg->ol_flags = 0;
254 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
255 first_seg->data_len = sge->length & 0x1FFFF;
256 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
257 first_seg->nb_segs = 1;
258 first_seg->next = NULL;
259 if (dpaa2_svr_family == SVR_LX2160A)
260 dpaa2_dev_rx_parse_new(first_seg, fd);
262 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
263 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
264 + DPAA2_FD_PTA_SIZE));
266 rte_mbuf_refcnt_set(first_seg, 1);
268 while (!DPAA2_SG_IS_FINAL(sge)) {
270 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
271 DPAA2_GET_FLE_ADDR(sge));
272 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
273 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
274 next_seg->buf_addr = (uint8_t *)sg_addr;
275 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
276 next_seg->data_len = sge->length & 0x1FFFF;
277 first_seg->nb_segs += 1;
278 rte_mbuf_refcnt_set(next_seg, 1);
279 cur_seg->next = next_seg;
280 next_seg->next = NULL;
283 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
284 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
285 rte_mbuf_refcnt_set(temp, 1);
286 rte_pktmbuf_free_seg(temp);
288 return (void *)first_seg;
291 static inline struct rte_mbuf *__attribute__((hot))
292 eth_fd_to_mbuf(const struct qbman_fd *fd)
294 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
295 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
296 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
298 /* need to repopulated some of the fields,
299 * as they may have changed in last transmission
303 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
304 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
305 mbuf->pkt_len = mbuf->data_len;
307 rte_mbuf_refcnt_set(mbuf, 1);
309 /* Parse the packet */
310 /* parse results for LX2 are there in FRC field of FD.
311 * For other DPAA2 platforms , parse results are after
312 * the private - sw annotation area
315 if (dpaa2_svr_family == SVR_LX2160A)
316 dpaa2_dev_rx_parse_new(mbuf, fd);
318 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
319 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
320 + DPAA2_FD_PTA_SIZE));
322 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
323 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
324 mbuf, mbuf->buf_addr, mbuf->data_off,
325 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
326 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
327 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
332 static int __attribute__ ((noinline)) __attribute__((hot))
333 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
334 struct qbman_fd *fd, uint16_t bpid)
336 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
337 struct qbman_sge *sgt, *sge = NULL;
340 temp = rte_pktmbuf_alloc(mbuf->pool);
342 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
346 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
347 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
348 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
349 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
350 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
351 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
352 /*Set Scatter gather table and Scatter gather entries*/
353 sgt = (struct qbman_sge *)(
354 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
355 + DPAA2_GET_FD_OFFSET(fd));
357 for (i = 0; i < mbuf->nb_segs; i++) {
359 /*Resetting the buffer pool id and offset field*/
360 sge->fin_bpid_offset = 0;
361 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
362 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
363 sge->length = cur_seg->data_len;
364 if (RTE_MBUF_DIRECT(cur_seg)) {
365 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
366 /* If refcnt > 1, invalid bpid is set to ensure
367 * buffer is not freed by HW
369 DPAA2_SET_FLE_IVP(sge);
370 rte_mbuf_refcnt_update(cur_seg, -1);
372 DPAA2_SET_FLE_BPID(sge,
373 mempool_to_bpid(cur_seg->pool));
374 cur_seg = cur_seg->next;
376 /* Get owner MBUF from indirect buffer */
377 mi = rte_mbuf_from_indirect(cur_seg);
378 if (rte_mbuf_refcnt_read(mi) > 1) {
379 /* If refcnt > 1, invalid bpid is set to ensure
380 * owner buffer is not freed by HW
382 DPAA2_SET_FLE_IVP(sge);
384 DPAA2_SET_FLE_BPID(sge,
385 mempool_to_bpid(mi->pool));
386 rte_mbuf_refcnt_update(mi, 1);
389 cur_seg = cur_seg->next;
390 prev_seg->next = NULL;
391 rte_pktmbuf_free(prev_seg);
394 DPAA2_SG_SET_FINAL(sge, true);
399 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
400 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
402 static void __attribute__ ((noinline)) __attribute__((hot))
403 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
404 struct qbman_fd *fd, uint16_t bpid)
406 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
408 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
409 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
410 mbuf, mbuf->buf_addr, mbuf->data_off,
411 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
412 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
413 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
414 if (RTE_MBUF_DIRECT(mbuf)) {
415 if (rte_mbuf_refcnt_read(mbuf) > 1) {
416 DPAA2_SET_FD_IVP(fd);
417 rte_mbuf_refcnt_update(mbuf, -1);
422 mi = rte_mbuf_from_indirect(mbuf);
423 if (rte_mbuf_refcnt_read(mi) > 1)
424 DPAA2_SET_FD_IVP(fd);
426 rte_mbuf_refcnt_update(mi, 1);
427 rte_pktmbuf_free(mbuf);
431 static inline int __attribute__((hot))
432 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
433 struct qbman_fd *fd, uint16_t bpid)
438 if (rte_dpaa2_mbuf_alloc_bulk(
439 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
440 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
443 m = (struct rte_mbuf *)mb;
444 memcpy((char *)m->buf_addr + mbuf->data_off,
445 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
448 /* Copy required fields */
449 m->data_off = mbuf->data_off;
450 m->ol_flags = mbuf->ol_flags;
451 m->packet_type = mbuf->packet_type;
452 m->tx_offload = mbuf->tx_offload;
454 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
457 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
458 " meta: %d, off: %d, len: %d\n",
461 DPAA2_GET_FD_ADDR(fd),
462 DPAA2_GET_FD_BPID(fd),
463 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
464 DPAA2_GET_FD_OFFSET(fd),
465 DPAA2_GET_FD_LEN(fd));
470 /* This function assumes that caller will be keep the same value for nb_pkts
471 * across calls per queue, if that is not the case, better use non-prefetch
472 * version of rx call.
473 * It will return the packets as requested in previous call without honoring
474 * the current nb_pkts or bufs space.
477 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
479 /* Function receive frames for a given device and VQ*/
480 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
481 struct qbman_result *dq_storage, *dq_storage1 = NULL;
482 uint32_t fqid = dpaa2_q->fqid;
483 int ret, num_rx = 0, pull_size;
484 uint8_t pending, status;
485 struct qbman_swp *swp;
486 const struct qbman_fd *fd, *next_fd;
487 struct qbman_pull_desc pulldesc;
488 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
489 struct rte_eth_dev *dev = dpaa2_q->dev;
491 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
492 ret = dpaa2_affine_qbman_ethrx_swp();
494 DPAA2_PMD_ERR("Failure in affining portal");
498 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
499 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
500 if (unlikely(!q_storage->active_dqs)) {
501 q_storage->toggle = 0;
502 dq_storage = q_storage->dq_storage[q_storage->toggle];
503 q_storage->last_num_pkts = pull_size;
504 qbman_pull_desc_clear(&pulldesc);
505 qbman_pull_desc_set_numframes(&pulldesc,
506 q_storage->last_num_pkts);
507 qbman_pull_desc_set_fq(&pulldesc, fqid);
508 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
509 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
510 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
511 while (!qbman_check_command_complete(
513 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
515 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
518 if (qbman_swp_pull(swp, &pulldesc)) {
519 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
520 " QBMAN is busy (1)\n");
521 /* Portal was busy, try again */
526 q_storage->active_dqs = dq_storage;
527 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
528 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
532 dq_storage = q_storage->active_dqs;
533 rte_prefetch0((void *)(size_t)(dq_storage));
534 rte_prefetch0((void *)(size_t)(dq_storage + 1));
536 /* Prepare next pull descriptor. This will give space for the
537 * prefething done on DQRR entries
539 q_storage->toggle ^= 1;
540 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
541 qbman_pull_desc_clear(&pulldesc);
542 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
543 qbman_pull_desc_set_fq(&pulldesc, fqid);
544 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
545 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
547 /* Check if the previous issued command is completed.
548 * Also seems like the SWP is shared between the Ethernet Driver
549 * and the SEC driver.
551 while (!qbman_check_command_complete(dq_storage))
553 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
554 clear_swp_active_dqs(q_storage->active_dpio_id);
559 /* Loop until the dq_storage is updated with
562 while (!qbman_check_new_result(dq_storage))
564 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
565 /* Check whether Last Pull command is Expired and
566 * setting Condition for Loop termination
568 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
570 /* Check for valid frame. */
571 status = qbman_result_DQ_flags(dq_storage);
572 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
575 fd = qbman_result_DQ_fd(dq_storage);
577 if (dpaa2_svr_family != SVR_LX2160A) {
578 next_fd = qbman_result_DQ_fd(dq_storage + 1);
579 /* Prefetch Annotation address for the parse results */
580 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(
581 next_fd) + DPAA2_FD_PTA_SIZE + 16));
584 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
585 bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
587 bufs[num_rx] = eth_fd_to_mbuf(fd);
588 bufs[num_rx]->port = dev->data->port_id;
590 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
591 rte_vlan_strip(bufs[num_rx]);
597 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
598 while (!qbman_check_command_complete(
599 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
601 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
603 /* issue a volatile dequeue command for next pull */
605 if (qbman_swp_pull(swp, &pulldesc)) {
606 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
607 "QBMAN is busy (2)\n");
612 q_storage->active_dqs = dq_storage1;
613 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
614 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
616 dpaa2_q->rx_pkts += num_rx;
621 void __attribute__((hot))
622 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
623 const struct qbman_fd *fd,
624 const struct qbman_result *dq,
625 struct dpaa2_queue *rxq,
626 struct rte_event *ev)
628 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
629 DPAA2_FD_PTA_SIZE + 16));
631 ev->flow_id = rxq->ev.flow_id;
632 ev->sub_event_type = rxq->ev.sub_event_type;
633 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
634 ev->op = RTE_EVENT_OP_NEW;
635 ev->sched_type = rxq->ev.sched_type;
636 ev->queue_id = rxq->ev.queue_id;
637 ev->priority = rxq->ev.priority;
639 ev->mbuf = eth_fd_to_mbuf(fd);
641 qbman_swp_dqrr_consume(swp, dq);
644 void __attribute__((hot))
645 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
646 const struct qbman_fd *fd,
647 const struct qbman_result *dq,
648 struct dpaa2_queue *rxq,
649 struct rte_event *ev)
653 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
654 DPAA2_FD_PTA_SIZE + 16));
656 ev->flow_id = rxq->ev.flow_id;
657 ev->sub_event_type = rxq->ev.sub_event_type;
658 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
659 ev->op = RTE_EVENT_OP_NEW;
660 ev->sched_type = rxq->ev.sched_type;
661 ev->queue_id = rxq->ev.queue_id;
662 ev->priority = rxq->ev.priority;
664 ev->mbuf = eth_fd_to_mbuf(fd);
666 dqrr_index = qbman_get_dqrr_idx(dq);
667 ev->mbuf->seqn = dqrr_index + 1;
668 DPAA2_PER_LCORE_DQRR_SIZE++;
669 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
670 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
674 * Callback to handle sending packets through WRIOP based interface
677 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
679 /* Function to transmit the frames to given device and VQ*/
680 uint32_t loop, retry_count;
682 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
684 uint32_t frames_to_send;
685 struct rte_mempool *mp;
686 struct qbman_eq_desc eqdesc;
687 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
688 struct qbman_swp *swp;
691 struct rte_eth_dev *dev = dpaa2_q->dev;
692 struct dpaa2_dev_priv *priv = dev->data->dev_private;
693 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
695 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
696 ret = dpaa2_affine_qbman_swp();
698 DPAA2_PMD_ERR("Failure in affining portal");
702 swp = DPAA2_PER_LCORE_PORTAL;
704 DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
706 /*Prepare enqueue descriptor*/
707 qbman_eq_desc_clear(&eqdesc);
708 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
709 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
710 dpaa2_q->flow_id, dpaa2_q->tc_index);
711 /*Clear the unused FD fields before sending*/
713 /*Check if the queue is congested*/
715 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
717 /* Retry for some time before giving up */
718 if (retry_count > CONG_RETRY_COUNT)
722 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
723 dpaa2_eqcr_size : nb_pkts;
725 for (loop = 0; loop < frames_to_send; loop++) {
727 uint8_t dqrr_index = (*bufs)->seqn - 1;
729 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
731 DPAA2_PER_LCORE_DQRR_SIZE--;
732 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
733 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
736 if (likely(RTE_MBUF_DIRECT(*bufs))) {
738 /* Check the basic scenario and set
739 * the FD appropriately here itself.
741 if (likely(mp && mp->ops_index ==
742 priv->bp_list->dpaa2_ops_index &&
743 (*bufs)->nb_segs == 1 &&
744 rte_mbuf_refcnt_read((*bufs)) == 1)) {
745 if (unlikely(((*bufs)->ol_flags
746 & PKT_TX_VLAN_PKT) ||
747 (dev->data->dev_conf.txmode.offloads
748 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
749 ret = rte_vlan_insert(bufs);
753 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
754 &fd_arr[loop], mempool_to_bpid(mp));
759 mi = rte_mbuf_from_indirect(*bufs);
762 /* Not a hw_pkt pool allocated frame */
763 if (unlikely(!mp || !priv->bp_list)) {
764 DPAA2_PMD_ERR("Err: No buffer pool attached");
768 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
769 (dev->data->dev_conf.txmode.offloads
770 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
771 int ret = rte_vlan_insert(bufs);
775 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
776 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
777 /* alloc should be from the default buffer pool
778 * attached to this interface
780 bpid = priv->bp_list->buf_pool.bpid;
782 if (unlikely((*bufs)->nb_segs > 1)) {
783 DPAA2_PMD_ERR("S/G support not added"
784 " for non hw offload buffer");
787 if (eth_copy_mbuf_to_fd(*bufs,
788 &fd_arr[loop], bpid)) {
791 /* free the original packet */
792 rte_pktmbuf_free(*bufs);
794 bpid = mempool_to_bpid(mp);
795 if (unlikely((*bufs)->nb_segs > 1)) {
796 if (eth_mbuf_to_sg_fd(*bufs,
797 &fd_arr[loop], bpid))
800 eth_mbuf_to_fd(*bufs,
801 &fd_arr[loop], bpid);
807 while (loop < frames_to_send) {
808 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
809 &fd_arr[loop], &flags[loop],
810 frames_to_send - loop);
813 num_tx += frames_to_send;
814 nb_pkts -= frames_to_send;
816 dpaa2_q->tx_pkts += num_tx;
820 /* send any already prepared fd */
825 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
833 dpaa2_q->tx_pkts += num_tx;
838 * Dummy DPDK callback for TX.
840 * This function is used to temporarily replace the real callback during
841 * unsafe control operations on the queue, or in case of error.
844 * Generic pointer to TX queue structure.
846 * Packets to transmit.
848 * Number of packets in array.
851 * Number of packets successfully transmitted (<= pkts_n).
854 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)