1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
29 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
30 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
31 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
32 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
33 DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
36 static inline void __attribute__((hot))
37 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
39 DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc);
41 m->packet_type = RTE_PTYPE_UNKNOWN;
43 case DPAA2_PKT_TYPE_ETHER:
44 m->packet_type = RTE_PTYPE_L2_ETHER;
46 case DPAA2_PKT_TYPE_IPV4:
47 m->packet_type = RTE_PTYPE_L2_ETHER |
50 case DPAA2_PKT_TYPE_IPV6:
51 m->packet_type = RTE_PTYPE_L2_ETHER |
54 case DPAA2_PKT_TYPE_IPV4_EXT:
55 m->packet_type = RTE_PTYPE_L2_ETHER |
56 RTE_PTYPE_L3_IPV4_EXT;
58 case DPAA2_PKT_TYPE_IPV6_EXT:
59 m->packet_type = RTE_PTYPE_L2_ETHER |
60 RTE_PTYPE_L3_IPV6_EXT;
62 case DPAA2_PKT_TYPE_IPV4_TCP:
63 m->packet_type = RTE_PTYPE_L2_ETHER |
64 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
66 case DPAA2_PKT_TYPE_IPV6_TCP:
67 m->packet_type = RTE_PTYPE_L2_ETHER |
68 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
70 case DPAA2_PKT_TYPE_IPV4_UDP:
71 m->packet_type = RTE_PTYPE_L2_ETHER |
72 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
74 case DPAA2_PKT_TYPE_IPV6_UDP:
75 m->packet_type = RTE_PTYPE_L2_ETHER |
76 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
78 case DPAA2_PKT_TYPE_IPV4_SCTP:
79 m->packet_type = RTE_PTYPE_L2_ETHER |
80 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
82 case DPAA2_PKT_TYPE_IPV6_SCTP:
83 m->packet_type = RTE_PTYPE_L2_ETHER |
84 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
86 case DPAA2_PKT_TYPE_IPV4_ICMP:
87 m->packet_type = RTE_PTYPE_L2_ETHER |
88 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
90 case DPAA2_PKT_TYPE_IPV6_ICMP:
91 m->packet_type = RTE_PTYPE_L2_ETHER |
92 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
94 case DPAA2_PKT_TYPE_VLAN_1:
95 case DPAA2_PKT_TYPE_VLAN_2:
96 m->ol_flags |= PKT_RX_VLAN;
98 /* More switch cases can be added */
99 /* TODO: Add handling for checksum error check from FRC */
101 m->packet_type = RTE_PTYPE_UNKNOWN;
105 static inline uint32_t __attribute__((hot))
106 dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
108 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
110 DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t",
112 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
113 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
115 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
116 pkt_type = RTE_PTYPE_L2_ETHER;
121 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
122 L3_IPV4_N_PRESENT)) {
123 pkt_type |= RTE_PTYPE_L3_IPV4;
124 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
125 L3_IP_N_OPT_PRESENT))
126 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
128 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
129 L3_IPV6_N_PRESENT)) {
130 pkt_type |= RTE_PTYPE_L3_IPV6;
131 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
132 L3_IP_N_OPT_PRESENT))
133 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
138 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
139 L3_IP_1_MORE_FRAGMENT |
140 L3_IP_N_FIRST_FRAGMENT |
141 L3_IP_N_MORE_FRAGMENT)) {
142 pkt_type |= RTE_PTYPE_L4_FRAG;
145 pkt_type |= RTE_PTYPE_L4_NONFRAG;
148 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
149 pkt_type |= RTE_PTYPE_L4_UDP;
151 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
152 pkt_type |= RTE_PTYPE_L4_TCP;
154 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
155 pkt_type |= RTE_PTYPE_L4_SCTP;
157 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
158 pkt_type |= RTE_PTYPE_L4_ICMP;
160 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
161 pkt_type |= RTE_PTYPE_UNKNOWN;
167 static inline uint32_t __attribute__((hot))
168 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
170 struct dpaa2_annot_hdr *annotation =
171 (struct dpaa2_annot_hdr *)hw_annot_addr;
173 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
176 /* Check offloads first */
177 if (BIT_ISSET_AT_POS(annotation->word3,
178 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
179 mbuf->ol_flags |= PKT_RX_VLAN;
181 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
182 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
183 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
184 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
186 /* Return some common types from parse processing */
187 switch (annotation->word4) {
189 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
191 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
192 case DPAA2_L3_IPv4_TCP:
193 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
195 case DPAA2_L3_IPv4_UDP:
196 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
198 case DPAA2_L3_IPv6_TCP:
199 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
201 case DPAA2_L3_IPv6_UDP:
202 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
208 return dpaa2_dev_rx_parse_slow(annotation);
211 static inline struct rte_mbuf *__attribute__((hot))
212 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
214 struct qbman_sge *sgt, *sge;
215 size_t sg_addr, fd_addr;
217 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
219 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
221 /* Get Scatter gather table address */
222 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
225 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
227 /* First Scatter gather entry */
228 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
229 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
230 /* Prepare all the metadata for first segment */
231 first_seg->buf_addr = (uint8_t *)sg_addr;
232 first_seg->ol_flags = 0;
233 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
234 first_seg->data_len = sge->length & 0x1FFFF;
235 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
236 first_seg->nb_segs = 1;
237 first_seg->next = NULL;
238 if (dpaa2_svr_family == SVR_LX2160A)
239 dpaa2_dev_rx_parse_frc(first_seg,
240 DPAA2_GET_FD_FRC_PARSE_SUM(fd));
242 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
243 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
244 + DPAA2_FD_PTA_SIZE));
246 rte_mbuf_refcnt_set(first_seg, 1);
248 while (!DPAA2_SG_IS_FINAL(sge)) {
250 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
251 DPAA2_GET_FLE_ADDR(sge));
252 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
253 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
254 next_seg->buf_addr = (uint8_t *)sg_addr;
255 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
256 next_seg->data_len = sge->length & 0x1FFFF;
257 first_seg->nb_segs += 1;
258 rte_mbuf_refcnt_set(next_seg, 1);
259 cur_seg->next = next_seg;
260 next_seg->next = NULL;
263 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
264 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
265 rte_mbuf_refcnt_set(temp, 1);
266 rte_pktmbuf_free_seg(temp);
268 return (void *)first_seg;
271 static inline struct rte_mbuf *__attribute__((hot))
272 eth_fd_to_mbuf(const struct qbman_fd *fd)
274 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
275 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
276 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
278 /* need to repopulated some of the fields,
279 * as they may have changed in last transmission
283 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
284 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
285 mbuf->pkt_len = mbuf->data_len;
287 rte_mbuf_refcnt_set(mbuf, 1);
289 /* Parse the packet */
290 /* parse results for LX2 are there in FRC field of FD.
291 * For other DPAA2 platforms , parse results are after
292 * the private - sw annotation area
295 if (dpaa2_svr_family == SVR_LX2160A)
296 dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
298 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
299 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
300 + DPAA2_FD_PTA_SIZE));
302 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
303 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
304 mbuf, mbuf->buf_addr, mbuf->data_off,
305 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
306 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
307 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
312 static int __attribute__ ((noinline)) __attribute__((hot))
313 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
314 struct qbman_fd *fd, uint16_t bpid)
316 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
317 struct qbman_sge *sgt, *sge = NULL;
320 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
321 int ret = rte_vlan_insert(&mbuf);
326 temp = rte_pktmbuf_alloc(mbuf->pool);
328 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
332 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
333 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
334 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
335 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
336 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
337 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
338 /*Set Scatter gather table and Scatter gather entries*/
339 sgt = (struct qbman_sge *)(
340 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
341 + DPAA2_GET_FD_OFFSET(fd));
343 for (i = 0; i < mbuf->nb_segs; i++) {
345 /*Resetting the buffer pool id and offset field*/
346 sge->fin_bpid_offset = 0;
347 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
348 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
349 sge->length = cur_seg->data_len;
350 if (RTE_MBUF_DIRECT(cur_seg)) {
351 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
352 /* If refcnt > 1, invalid bpid is set to ensure
353 * buffer is not freed by HW
355 DPAA2_SET_FLE_IVP(sge);
356 rte_mbuf_refcnt_update(cur_seg, -1);
358 DPAA2_SET_FLE_BPID(sge,
359 mempool_to_bpid(cur_seg->pool));
360 cur_seg = cur_seg->next;
362 /* Get owner MBUF from indirect buffer */
363 mi = rte_mbuf_from_indirect(cur_seg);
364 if (rte_mbuf_refcnt_read(mi) > 1) {
365 /* If refcnt > 1, invalid bpid is set to ensure
366 * owner buffer is not freed by HW
368 DPAA2_SET_FLE_IVP(sge);
370 DPAA2_SET_FLE_BPID(sge,
371 mempool_to_bpid(mi->pool));
372 rte_mbuf_refcnt_update(mi, 1);
375 cur_seg = cur_seg->next;
376 prev_seg->next = NULL;
377 rte_pktmbuf_free(prev_seg);
380 DPAA2_SG_SET_FINAL(sge, true);
385 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
386 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
388 static void __attribute__ ((noinline)) __attribute__((hot))
389 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
390 struct qbman_fd *fd, uint16_t bpid)
392 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
393 if (rte_vlan_insert(&mbuf)) {
394 rte_pktmbuf_free(mbuf);
399 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
401 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
402 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
403 mbuf, mbuf->buf_addr, mbuf->data_off,
404 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
405 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
406 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
407 if (RTE_MBUF_DIRECT(mbuf)) {
408 if (rte_mbuf_refcnt_read(mbuf) > 1) {
409 DPAA2_SET_FD_IVP(fd);
410 rte_mbuf_refcnt_update(mbuf, -1);
415 mi = rte_mbuf_from_indirect(mbuf);
416 if (rte_mbuf_refcnt_read(mi) > 1)
417 DPAA2_SET_FD_IVP(fd);
419 rte_mbuf_refcnt_update(mi, 1);
420 rte_pktmbuf_free(mbuf);
424 static inline int __attribute__((hot))
425 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
426 struct qbman_fd *fd, uint16_t bpid)
431 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
432 int ret = rte_vlan_insert(&mbuf);
437 if (rte_dpaa2_mbuf_alloc_bulk(
438 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
439 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
442 m = (struct rte_mbuf *)mb;
443 memcpy((char *)m->buf_addr + mbuf->data_off,
444 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
447 /* Copy required fields */
448 m->data_off = mbuf->data_off;
449 m->ol_flags = mbuf->ol_flags;
450 m->packet_type = mbuf->packet_type;
451 m->tx_offload = mbuf->tx_offload;
453 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
456 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
457 " meta: %d, off: %d, len: %d\n",
460 DPAA2_GET_FD_ADDR(fd),
461 DPAA2_GET_FD_BPID(fd),
462 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
463 DPAA2_GET_FD_OFFSET(fd),
464 DPAA2_GET_FD_LEN(fd));
470 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
472 /* Function receive frames for a given device and VQ*/
473 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
474 struct qbman_result *dq_storage, *dq_storage1 = NULL;
475 uint32_t fqid = dpaa2_q->fqid;
477 uint8_t pending, status;
478 struct qbman_swp *swp;
479 const struct qbman_fd *fd, *next_fd;
480 struct qbman_pull_desc pulldesc;
481 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
482 struct rte_eth_dev *dev = dpaa2_q->dev;
484 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
485 ret = dpaa2_affine_qbman_swp();
487 DPAA2_PMD_ERR("Failure in affining portal");
491 swp = DPAA2_PER_LCORE_PORTAL;
492 if (unlikely(!q_storage->active_dqs)) {
493 q_storage->toggle = 0;
494 dq_storage = q_storage->dq_storage[q_storage->toggle];
495 q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
496 DPAA2_DQRR_RING_SIZE : nb_pkts;
497 qbman_pull_desc_clear(&pulldesc);
498 qbman_pull_desc_set_numframes(&pulldesc,
499 q_storage->last_num_pkts);
500 qbman_pull_desc_set_fq(&pulldesc, fqid);
501 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
502 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
503 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
504 while (!qbman_check_command_complete(
505 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
507 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
510 if (qbman_swp_pull(swp, &pulldesc)) {
511 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
512 " QBMAN is busy (1)\n");
513 /* Portal was busy, try again */
518 q_storage->active_dqs = dq_storage;
519 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
520 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
523 dq_storage = q_storage->active_dqs;
524 rte_prefetch0((void *)(size_t)(dq_storage));
525 rte_prefetch0((void *)(size_t)(dq_storage + 1));
527 /* Prepare next pull descriptor. This will give space for the
528 * prefething done on DQRR entries
530 q_storage->toggle ^= 1;
531 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
532 qbman_pull_desc_clear(&pulldesc);
533 qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
534 qbman_pull_desc_set_fq(&pulldesc, fqid);
535 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
536 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
538 /* Check if the previous issued command is completed.
539 * Also seems like the SWP is shared between the Ethernet Driver
540 * and the SEC driver.
542 while (!qbman_check_command_complete(dq_storage))
544 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
545 clear_swp_active_dqs(q_storage->active_dpio_id);
550 /* Loop until the dq_storage is updated with
553 while (!qbman_check_new_result(dq_storage))
555 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
556 /* Check whether Last Pull command is Expired and
557 * setting Condition for Loop termination
559 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
561 /* Check for valid frame. */
562 status = qbman_result_DQ_flags(dq_storage);
563 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
566 fd = qbman_result_DQ_fd(dq_storage);
568 next_fd = qbman_result_DQ_fd(dq_storage + 1);
569 /* Prefetch Annotation address for the parse results */
570 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
571 + DPAA2_FD_PTA_SIZE + 16));
573 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
574 bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
576 bufs[num_rx] = eth_fd_to_mbuf(fd);
577 bufs[num_rx]->port = dev->data->port_id;
579 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
580 rte_vlan_strip(bufs[num_rx]);
586 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
587 while (!qbman_check_command_complete(
588 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
590 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
592 /* issue a volatile dequeue command for next pull */
594 if (qbman_swp_pull(swp, &pulldesc)) {
595 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
596 "QBMAN is busy (2)\n");
601 q_storage->active_dqs = dq_storage1;
602 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
603 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
605 dpaa2_q->rx_pkts += num_rx;
610 void __attribute__((hot))
611 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
612 const struct qbman_fd *fd,
613 const struct qbman_result *dq,
614 struct dpaa2_queue *rxq,
615 struct rte_event *ev)
617 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
618 DPAA2_FD_PTA_SIZE + 16));
620 ev->flow_id = rxq->ev.flow_id;
621 ev->sub_event_type = rxq->ev.sub_event_type;
622 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
623 ev->op = RTE_EVENT_OP_NEW;
624 ev->sched_type = rxq->ev.sched_type;
625 ev->queue_id = rxq->ev.queue_id;
626 ev->priority = rxq->ev.priority;
628 ev->mbuf = eth_fd_to_mbuf(fd);
630 qbman_swp_dqrr_consume(swp, dq);
633 void __attribute__((hot))
634 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
635 const struct qbman_fd *fd,
636 const struct qbman_result *dq,
637 struct dpaa2_queue *rxq,
638 struct rte_event *ev)
642 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
643 DPAA2_FD_PTA_SIZE + 16));
645 ev->flow_id = rxq->ev.flow_id;
646 ev->sub_event_type = rxq->ev.sub_event_type;
647 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
648 ev->op = RTE_EVENT_OP_NEW;
649 ev->sched_type = rxq->ev.sched_type;
650 ev->queue_id = rxq->ev.queue_id;
651 ev->priority = rxq->ev.priority;
653 ev->mbuf = eth_fd_to_mbuf(fd);
655 dqrr_index = qbman_get_dqrr_idx(dq);
656 ev->mbuf->seqn = dqrr_index + 1;
657 DPAA2_PER_LCORE_DQRR_SIZE++;
658 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
659 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
663 * Callback to handle sending packets through WRIOP based interface
666 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
668 /* Function to transmit the frames to given device and VQ*/
669 uint32_t loop, retry_count;
671 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
673 uint32_t frames_to_send;
674 struct rte_mempool *mp;
675 struct qbman_eq_desc eqdesc;
676 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
677 struct qbman_swp *swp;
680 struct rte_eth_dev *dev = dpaa2_q->dev;
681 struct dpaa2_dev_priv *priv = dev->data->dev_private;
682 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
684 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
685 ret = dpaa2_affine_qbman_swp();
687 DPAA2_PMD_ERR("Failure in affining portal");
691 swp = DPAA2_PER_LCORE_PORTAL;
693 DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
695 /*Prepare enqueue descriptor*/
696 qbman_eq_desc_clear(&eqdesc);
697 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
698 qbman_eq_desc_set_response(&eqdesc, 0, 0);
699 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
700 dpaa2_q->flow_id, dpaa2_q->tc_index);
701 /*Clear the unused FD fields before sending*/
703 /*Check if the queue is congested*/
705 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
707 /* Retry for some time before giving up */
708 if (retry_count > CONG_RETRY_COUNT)
712 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
714 for (loop = 0; loop < frames_to_send; loop++) {
716 uint8_t dqrr_index = (*bufs)->seqn - 1;
718 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
720 DPAA2_PER_LCORE_DQRR_SIZE--;
721 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
722 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
725 fd_arr[loop].simple.frc = 0;
726 DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
727 DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
728 if (likely(RTE_MBUF_DIRECT(*bufs))) {
730 /* Check the basic scenario and set
731 * the FD appropriately here itself.
733 if (likely(mp && mp->ops_index ==
734 priv->bp_list->dpaa2_ops_index &&
735 (*bufs)->nb_segs == 1 &&
736 rte_mbuf_refcnt_read((*bufs)) == 1)) {
737 if (unlikely((*bufs)->ol_flags
738 & PKT_TX_VLAN_PKT)) {
739 ret = rte_vlan_insert(bufs);
743 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
744 &fd_arr[loop], mempool_to_bpid(mp));
749 mi = rte_mbuf_from_indirect(*bufs);
752 /* Not a hw_pkt pool allocated frame */
753 if (unlikely(!mp || !priv->bp_list)) {
754 DPAA2_PMD_ERR("Err: No buffer pool attached");
758 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
759 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
760 /* alloc should be from the default buffer pool
761 * attached to this interface
763 bpid = priv->bp_list->buf_pool.bpid;
765 if (unlikely((*bufs)->nb_segs > 1)) {
766 DPAA2_PMD_ERR("S/G support not added"
767 " for non hw offload buffer");
770 if (eth_copy_mbuf_to_fd(*bufs,
771 &fd_arr[loop], bpid)) {
774 /* free the original packet */
775 rte_pktmbuf_free(*bufs);
777 bpid = mempool_to_bpid(mp);
778 if (unlikely((*bufs)->nb_segs > 1)) {
779 if (eth_mbuf_to_sg_fd(*bufs,
780 &fd_arr[loop], bpid))
783 eth_mbuf_to_fd(*bufs,
784 &fd_arr[loop], bpid);
790 while (loop < frames_to_send) {
791 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
792 &fd_arr[loop], &flags[loop],
793 frames_to_send - loop);
796 num_tx += frames_to_send;
797 nb_pkts -= frames_to_send;
799 dpaa2_q->tx_pkts += num_tx;
803 /* send any already prepared fd */
808 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
816 dpaa2_q->tx_pkts += num_tx;
821 * Dummy DPDK callback for TX.
823 * This function is used to temporarily replace the real callback during
824 * unsafe control operations on the queue, or in case of error.
827 * Generic pointer to TX queue structure.
829 * Packets to transmit.
831 * Number of packets in array.
834 * Number of packets successfully transmitted (<= pkts_n).
837 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)