1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __attribute__((hot))
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
33 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
34 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
35 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
36 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
37 DPAA2_SET_FD_FRC(_fd, 0); \
38 DPAA2_RESET_FD_CTRL(_fd); \
39 DPAA2_RESET_FD_FLC(_fd); \
42 static inline void __attribute__((hot))
43 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd)
45 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
47 m->packet_type = RTE_PTYPE_UNKNOWN;
49 case DPAA2_PKT_TYPE_ETHER:
50 m->packet_type = RTE_PTYPE_L2_ETHER;
52 case DPAA2_PKT_TYPE_IPV4:
53 m->packet_type = RTE_PTYPE_L2_ETHER |
56 case DPAA2_PKT_TYPE_IPV6:
57 m->packet_type = RTE_PTYPE_L2_ETHER |
60 case DPAA2_PKT_TYPE_IPV4_EXT:
61 m->packet_type = RTE_PTYPE_L2_ETHER |
62 RTE_PTYPE_L3_IPV4_EXT;
64 case DPAA2_PKT_TYPE_IPV6_EXT:
65 m->packet_type = RTE_PTYPE_L2_ETHER |
66 RTE_PTYPE_L3_IPV6_EXT;
68 case DPAA2_PKT_TYPE_IPV4_TCP:
69 m->packet_type = RTE_PTYPE_L2_ETHER |
70 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
72 case DPAA2_PKT_TYPE_IPV6_TCP:
73 m->packet_type = RTE_PTYPE_L2_ETHER |
74 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
76 case DPAA2_PKT_TYPE_IPV4_UDP:
77 m->packet_type = RTE_PTYPE_L2_ETHER |
78 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
80 case DPAA2_PKT_TYPE_IPV6_UDP:
81 m->packet_type = RTE_PTYPE_L2_ETHER |
82 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
84 case DPAA2_PKT_TYPE_IPV4_SCTP:
85 m->packet_type = RTE_PTYPE_L2_ETHER |
86 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
88 case DPAA2_PKT_TYPE_IPV6_SCTP:
89 m->packet_type = RTE_PTYPE_L2_ETHER |
90 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
92 case DPAA2_PKT_TYPE_IPV4_ICMP:
93 m->packet_type = RTE_PTYPE_L2_ETHER |
94 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
96 case DPAA2_PKT_TYPE_IPV6_ICMP:
97 m->packet_type = RTE_PTYPE_L2_ETHER |
98 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
101 m->packet_type = dpaa2_dev_rx_parse_slow(m,
102 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
103 + DPAA2_FD_PTA_SIZE));
105 m->hash.rss = fd->simple.flc_hi;
106 m->ol_flags |= PKT_RX_RSS_HASH;
109 static inline uint32_t __attribute__((hot))
110 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
111 struct dpaa2_annot_hdr *annotation)
113 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
116 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
117 "(4)=0x%" PRIx64 "\t",
118 annotation->word3, annotation->word4);
120 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
121 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
122 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
123 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
124 mbuf->ol_flags |= PKT_RX_VLAN;
125 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
126 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
127 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
128 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
129 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
130 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
131 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
134 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
135 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
137 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
138 pkt_type |= RTE_PTYPE_L2_ETHER;
143 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
144 L3_IPV4_N_PRESENT)) {
145 pkt_type |= RTE_PTYPE_L3_IPV4;
146 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
147 L3_IP_N_OPT_PRESENT))
148 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
150 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
151 L3_IPV6_N_PRESENT)) {
152 pkt_type |= RTE_PTYPE_L3_IPV6;
153 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
154 L3_IP_N_OPT_PRESENT))
155 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
160 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
161 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
162 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
163 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
165 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
166 L3_IP_1_MORE_FRAGMENT |
167 L3_IP_N_FIRST_FRAGMENT |
168 L3_IP_N_MORE_FRAGMENT)) {
169 pkt_type |= RTE_PTYPE_L4_FRAG;
172 pkt_type |= RTE_PTYPE_L4_NONFRAG;
175 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
176 pkt_type |= RTE_PTYPE_L4_UDP;
178 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
179 pkt_type |= RTE_PTYPE_L4_TCP;
181 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
182 pkt_type |= RTE_PTYPE_L4_SCTP;
184 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
185 pkt_type |= RTE_PTYPE_L4_ICMP;
187 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
188 pkt_type |= RTE_PTYPE_UNKNOWN;
194 static inline uint32_t __attribute__((hot))
195 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
197 struct dpaa2_annot_hdr *annotation =
198 (struct dpaa2_annot_hdr *)hw_annot_addr;
200 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
203 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
204 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
205 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
206 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
208 /* Check detailed parsing requirement */
209 if (annotation->word3 & 0x7FFFFC3FFFF)
210 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
212 /* Return some common types from parse processing */
213 switch (annotation->word4) {
215 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
217 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
218 case DPAA2_L3_IPv4_TCP:
219 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
221 case DPAA2_L3_IPv4_UDP:
222 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
224 case DPAA2_L3_IPv6_TCP:
225 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
227 case DPAA2_L3_IPv6_UDP:
228 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
234 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
237 static inline struct rte_mbuf *__attribute__((hot))
238 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
240 struct qbman_sge *sgt, *sge;
241 size_t sg_addr, fd_addr;
243 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
245 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
247 /* Get Scatter gather table address */
248 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
251 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
253 /* First Scatter gather entry */
254 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
255 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
256 /* Prepare all the metadata for first segment */
257 first_seg->buf_addr = (uint8_t *)sg_addr;
258 first_seg->ol_flags = 0;
259 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
260 first_seg->data_len = sge->length & 0x1FFFF;
261 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
262 first_seg->nb_segs = 1;
263 first_seg->next = NULL;
264 if (dpaa2_svr_family == SVR_LX2160A)
265 dpaa2_dev_rx_parse_new(first_seg, fd);
267 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
268 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
269 + DPAA2_FD_PTA_SIZE));
271 rte_mbuf_refcnt_set(first_seg, 1);
273 while (!DPAA2_SG_IS_FINAL(sge)) {
275 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
276 DPAA2_GET_FLE_ADDR(sge));
277 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
278 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
279 next_seg->buf_addr = (uint8_t *)sg_addr;
280 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
281 next_seg->data_len = sge->length & 0x1FFFF;
282 first_seg->nb_segs += 1;
283 rte_mbuf_refcnt_set(next_seg, 1);
284 cur_seg->next = next_seg;
285 next_seg->next = NULL;
288 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
289 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
290 rte_mbuf_refcnt_set(temp, 1);
291 rte_pktmbuf_free_seg(temp);
293 return (void *)first_seg;
296 static inline struct rte_mbuf *__attribute__((hot))
297 eth_fd_to_mbuf(const struct qbman_fd *fd)
299 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
300 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
301 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
303 /* need to repopulated some of the fields,
304 * as they may have changed in last transmission
308 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
309 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
310 mbuf->pkt_len = mbuf->data_len;
312 rte_mbuf_refcnt_set(mbuf, 1);
314 /* Parse the packet */
315 /* parse results for LX2 are there in FRC field of FD.
316 * For other DPAA2 platforms , parse results are after
317 * the private - sw annotation area
320 if (dpaa2_svr_family == SVR_LX2160A)
321 dpaa2_dev_rx_parse_new(mbuf, fd);
323 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
324 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
325 + DPAA2_FD_PTA_SIZE));
327 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
328 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
329 mbuf, mbuf->buf_addr, mbuf->data_off,
330 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
331 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
332 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
337 static int __attribute__ ((noinline)) __attribute__((hot))
338 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
339 struct qbman_fd *fd, uint16_t bpid)
341 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
342 struct qbman_sge *sgt, *sge = NULL;
345 temp = rte_pktmbuf_alloc(mbuf->pool);
347 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
351 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
352 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
353 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
354 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
355 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
356 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
357 /*Set Scatter gather table and Scatter gather entries*/
358 sgt = (struct qbman_sge *)(
359 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
360 + DPAA2_GET_FD_OFFSET(fd));
362 for (i = 0; i < mbuf->nb_segs; i++) {
364 /*Resetting the buffer pool id and offset field*/
365 sge->fin_bpid_offset = 0;
366 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
367 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
368 sge->length = cur_seg->data_len;
369 if (RTE_MBUF_DIRECT(cur_seg)) {
370 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
371 /* If refcnt > 1, invalid bpid is set to ensure
372 * buffer is not freed by HW
374 DPAA2_SET_FLE_IVP(sge);
375 rte_mbuf_refcnt_update(cur_seg, -1);
377 DPAA2_SET_FLE_BPID(sge,
378 mempool_to_bpid(cur_seg->pool));
379 cur_seg = cur_seg->next;
381 /* Get owner MBUF from indirect buffer */
382 mi = rte_mbuf_from_indirect(cur_seg);
383 if (rte_mbuf_refcnt_read(mi) > 1) {
384 /* If refcnt > 1, invalid bpid is set to ensure
385 * owner buffer is not freed by HW
387 DPAA2_SET_FLE_IVP(sge);
389 DPAA2_SET_FLE_BPID(sge,
390 mempool_to_bpid(mi->pool));
391 rte_mbuf_refcnt_update(mi, 1);
394 cur_seg = cur_seg->next;
395 prev_seg->next = NULL;
396 rte_pktmbuf_free(prev_seg);
399 DPAA2_SG_SET_FINAL(sge, true);
404 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
405 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
407 static void __attribute__ ((noinline)) __attribute__((hot))
408 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
409 struct qbman_fd *fd, uint16_t bpid)
411 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
413 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
414 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
415 mbuf, mbuf->buf_addr, mbuf->data_off,
416 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
417 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
418 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
419 if (RTE_MBUF_DIRECT(mbuf)) {
420 if (rte_mbuf_refcnt_read(mbuf) > 1) {
421 DPAA2_SET_FD_IVP(fd);
422 rte_mbuf_refcnt_update(mbuf, -1);
427 mi = rte_mbuf_from_indirect(mbuf);
428 if (rte_mbuf_refcnt_read(mi) > 1)
429 DPAA2_SET_FD_IVP(fd);
431 rte_mbuf_refcnt_update(mi, 1);
432 rte_pktmbuf_free(mbuf);
436 static inline int __attribute__((hot))
437 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
438 struct qbman_fd *fd, uint16_t bpid)
443 if (rte_dpaa2_mbuf_alloc_bulk(
444 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
445 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
448 m = (struct rte_mbuf *)mb;
449 memcpy((char *)m->buf_addr + mbuf->data_off,
450 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
453 /* Copy required fields */
454 m->data_off = mbuf->data_off;
455 m->ol_flags = mbuf->ol_flags;
456 m->packet_type = mbuf->packet_type;
457 m->tx_offload = mbuf->tx_offload;
459 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
462 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
463 " meta: %d, off: %d, len: %d\n",
466 DPAA2_GET_FD_ADDR(fd),
467 DPAA2_GET_FD_BPID(fd),
468 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
469 DPAA2_GET_FD_OFFSET(fd),
470 DPAA2_GET_FD_LEN(fd));
475 /* This function assumes that caller will be keep the same value for nb_pkts
476 * across calls per queue, if that is not the case, better use non-prefetch
477 * version of rx call.
478 * It will return the packets as requested in previous call without honoring
479 * the current nb_pkts or bufs space.
482 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
484 /* Function receive frames for a given device and VQ*/
485 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
486 struct qbman_result *dq_storage, *dq_storage1 = NULL;
487 uint32_t fqid = dpaa2_q->fqid;
488 int ret, num_rx = 0, pull_size;
489 uint8_t pending, status;
490 struct qbman_swp *swp;
491 const struct qbman_fd *fd, *next_fd;
492 struct qbman_pull_desc pulldesc;
493 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
494 struct rte_eth_dev *dev = dpaa2_q->dev;
496 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
497 ret = dpaa2_affine_qbman_ethrx_swp();
499 DPAA2_PMD_ERR("Failure in affining portal");
503 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
504 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
505 if (unlikely(!q_storage->active_dqs)) {
506 q_storage->toggle = 0;
507 dq_storage = q_storage->dq_storage[q_storage->toggle];
508 q_storage->last_num_pkts = pull_size;
509 qbman_pull_desc_clear(&pulldesc);
510 qbman_pull_desc_set_numframes(&pulldesc,
511 q_storage->last_num_pkts);
512 qbman_pull_desc_set_fq(&pulldesc, fqid);
513 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
514 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
515 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
516 while (!qbman_check_command_complete(
518 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
520 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
523 if (qbman_swp_pull(swp, &pulldesc)) {
524 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
525 " QBMAN is busy (1)\n");
526 /* Portal was busy, try again */
531 q_storage->active_dqs = dq_storage;
532 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
533 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
537 dq_storage = q_storage->active_dqs;
538 rte_prefetch0((void *)(size_t)(dq_storage));
539 rte_prefetch0((void *)(size_t)(dq_storage + 1));
541 /* Prepare next pull descriptor. This will give space for the
542 * prefething done on DQRR entries
544 q_storage->toggle ^= 1;
545 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
546 qbman_pull_desc_clear(&pulldesc);
547 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
548 qbman_pull_desc_set_fq(&pulldesc, fqid);
549 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
550 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
552 /* Check if the previous issued command is completed.
553 * Also seems like the SWP is shared between the Ethernet Driver
554 * and the SEC driver.
556 while (!qbman_check_command_complete(dq_storage))
558 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
559 clear_swp_active_dqs(q_storage->active_dpio_id);
564 /* Loop until the dq_storage is updated with
567 while (!qbman_check_new_result(dq_storage))
569 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
570 /* Check whether Last Pull command is Expired and
571 * setting Condition for Loop termination
573 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
575 /* Check for valid frame. */
576 status = qbman_result_DQ_flags(dq_storage);
577 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
580 fd = qbman_result_DQ_fd(dq_storage);
582 if (dpaa2_svr_family != SVR_LX2160A) {
583 next_fd = qbman_result_DQ_fd(dq_storage + 1);
584 /* Prefetch Annotation address for the parse results */
585 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(
586 next_fd) + DPAA2_FD_PTA_SIZE + 16));
589 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
590 bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
592 bufs[num_rx] = eth_fd_to_mbuf(fd);
593 bufs[num_rx]->port = dev->data->port_id;
595 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
596 rte_vlan_strip(bufs[num_rx]);
602 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
603 while (!qbman_check_command_complete(
604 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
606 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
608 /* issue a volatile dequeue command for next pull */
610 if (qbman_swp_pull(swp, &pulldesc)) {
611 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
612 "QBMAN is busy (2)\n");
617 q_storage->active_dqs = dq_storage1;
618 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
619 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
621 dpaa2_q->rx_pkts += num_rx;
626 void __attribute__((hot))
627 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
628 const struct qbman_fd *fd,
629 const struct qbman_result *dq,
630 struct dpaa2_queue *rxq,
631 struct rte_event *ev)
633 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
634 DPAA2_FD_PTA_SIZE + 16));
636 ev->flow_id = rxq->ev.flow_id;
637 ev->sub_event_type = rxq->ev.sub_event_type;
638 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
639 ev->op = RTE_EVENT_OP_NEW;
640 ev->sched_type = rxq->ev.sched_type;
641 ev->queue_id = rxq->ev.queue_id;
642 ev->priority = rxq->ev.priority;
644 ev->mbuf = eth_fd_to_mbuf(fd);
646 qbman_swp_dqrr_consume(swp, dq);
649 void __attribute__((hot))
650 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
651 const struct qbman_fd *fd,
652 const struct qbman_result *dq,
653 struct dpaa2_queue *rxq,
654 struct rte_event *ev)
658 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
659 DPAA2_FD_PTA_SIZE + 16));
661 ev->flow_id = rxq->ev.flow_id;
662 ev->sub_event_type = rxq->ev.sub_event_type;
663 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
664 ev->op = RTE_EVENT_OP_NEW;
665 ev->sched_type = rxq->ev.sched_type;
666 ev->queue_id = rxq->ev.queue_id;
667 ev->priority = rxq->ev.priority;
669 ev->mbuf = eth_fd_to_mbuf(fd);
671 dqrr_index = qbman_get_dqrr_idx(dq);
672 ev->mbuf->seqn = dqrr_index + 1;
673 DPAA2_PER_LCORE_DQRR_SIZE++;
674 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
675 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
679 * Callback to handle sending packets through WRIOP based interface
682 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
684 /* Function to transmit the frames to given device and VQ*/
685 uint32_t loop, retry_count;
687 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
689 uint32_t frames_to_send;
690 struct rte_mempool *mp;
691 struct qbman_eq_desc eqdesc;
692 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
693 struct qbman_swp *swp;
696 struct rte_eth_dev *dev = dpaa2_q->dev;
697 struct dpaa2_dev_priv *priv = dev->data->dev_private;
698 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
700 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
701 ret = dpaa2_affine_qbman_swp();
703 DPAA2_PMD_ERR("Failure in affining portal");
707 swp = DPAA2_PER_LCORE_PORTAL;
709 DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
711 /*Prepare enqueue descriptor*/
712 qbman_eq_desc_clear(&eqdesc);
713 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
714 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
715 dpaa2_q->flow_id, dpaa2_q->tc_index);
716 /*Clear the unused FD fields before sending*/
718 /*Check if the queue is congested*/
720 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
722 /* Retry for some time before giving up */
723 if (retry_count > CONG_RETRY_COUNT)
727 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
728 dpaa2_eqcr_size : nb_pkts;
730 for (loop = 0; loop < frames_to_send; loop++) {
732 uint8_t dqrr_index = (*bufs)->seqn - 1;
734 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
736 DPAA2_PER_LCORE_DQRR_SIZE--;
737 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
738 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
741 if (likely(RTE_MBUF_DIRECT(*bufs))) {
743 /* Check the basic scenario and set
744 * the FD appropriately here itself.
746 if (likely(mp && mp->ops_index ==
747 priv->bp_list->dpaa2_ops_index &&
748 (*bufs)->nb_segs == 1 &&
749 rte_mbuf_refcnt_read((*bufs)) == 1)) {
750 if (unlikely(((*bufs)->ol_flags
751 & PKT_TX_VLAN_PKT) ||
752 (dev->data->dev_conf.txmode.offloads
753 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
754 ret = rte_vlan_insert(bufs);
758 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
759 &fd_arr[loop], mempool_to_bpid(mp));
764 mi = rte_mbuf_from_indirect(*bufs);
767 /* Not a hw_pkt pool allocated frame */
768 if (unlikely(!mp || !priv->bp_list)) {
769 DPAA2_PMD_ERR("Err: No buffer pool attached");
773 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
774 (dev->data->dev_conf.txmode.offloads
775 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
776 int ret = rte_vlan_insert(bufs);
780 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
781 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
782 /* alloc should be from the default buffer pool
783 * attached to this interface
785 bpid = priv->bp_list->buf_pool.bpid;
787 if (unlikely((*bufs)->nb_segs > 1)) {
788 DPAA2_PMD_ERR("S/G support not added"
789 " for non hw offload buffer");
792 if (eth_copy_mbuf_to_fd(*bufs,
793 &fd_arr[loop], bpid)) {
796 /* free the original packet */
797 rte_pktmbuf_free(*bufs);
799 bpid = mempool_to_bpid(mp);
800 if (unlikely((*bufs)->nb_segs > 1)) {
801 if (eth_mbuf_to_sg_fd(*bufs,
802 &fd_arr[loop], bpid))
805 eth_mbuf_to_fd(*bufs,
806 &fd_arr[loop], bpid);
812 while (loop < frames_to_send) {
813 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
814 &fd_arr[loop], &flags[loop],
815 frames_to_send - loop);
818 num_tx += frames_to_send;
819 nb_pkts -= frames_to_send;
821 dpaa2_q->tx_pkts += num_tx;
825 /* send any already prepared fd */
830 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
838 dpaa2_q->tx_pkts += num_tx;
843 * Dummy DPDK callback for TX.
845 * This function is used to temporarily replace the real callback during
846 * unsafe control operations on the queue, or in case of error.
849 * Generic pointer to TX queue structure.
851 * Packets to transmit.
853 * Number of packets in array.
856 * Number of packets successfully transmitted (<= pkts_n).
859 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)