1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <fslmc_logs.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_ethdev.h"
25 #include "base/dpaa2_hw_dpni_annot.h"
27 static inline void __attribute__((hot))
28 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
30 PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc);
32 m->packet_type = RTE_PTYPE_UNKNOWN;
34 case DPAA2_PKT_TYPE_ETHER:
35 m->packet_type = RTE_PTYPE_L2_ETHER;
37 case DPAA2_PKT_TYPE_IPV4:
38 m->packet_type = RTE_PTYPE_L2_ETHER |
41 case DPAA2_PKT_TYPE_IPV6:
42 m->packet_type = RTE_PTYPE_L2_ETHER |
45 case DPAA2_PKT_TYPE_IPV4_EXT:
46 m->packet_type = RTE_PTYPE_L2_ETHER |
47 RTE_PTYPE_L3_IPV4_EXT;
49 case DPAA2_PKT_TYPE_IPV6_EXT:
50 m->packet_type = RTE_PTYPE_L2_ETHER |
51 RTE_PTYPE_L3_IPV6_EXT;
53 case DPAA2_PKT_TYPE_IPV4_TCP:
54 m->packet_type = RTE_PTYPE_L2_ETHER |
55 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
57 case DPAA2_PKT_TYPE_IPV6_TCP:
58 m->packet_type = RTE_PTYPE_L2_ETHER |
59 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
61 case DPAA2_PKT_TYPE_IPV4_UDP:
62 m->packet_type = RTE_PTYPE_L2_ETHER |
63 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
65 case DPAA2_PKT_TYPE_IPV6_UDP:
66 m->packet_type = RTE_PTYPE_L2_ETHER |
67 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
69 case DPAA2_PKT_TYPE_IPV4_SCTP:
70 m->packet_type = RTE_PTYPE_L2_ETHER |
71 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
73 case DPAA2_PKT_TYPE_IPV6_SCTP:
74 m->packet_type = RTE_PTYPE_L2_ETHER |
75 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
77 case DPAA2_PKT_TYPE_IPV4_ICMP:
78 m->packet_type = RTE_PTYPE_L2_ETHER |
79 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
81 case DPAA2_PKT_TYPE_IPV6_ICMP:
82 m->packet_type = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
85 case DPAA2_PKT_TYPE_VLAN_1:
86 case DPAA2_PKT_TYPE_VLAN_2:
87 m->ol_flags |= PKT_RX_VLAN;
89 /* More switch cases can be added */
90 /* TODO: Add handling for checksum error check from FRC */
92 m->packet_type = RTE_PTYPE_UNKNOWN;
96 static inline uint32_t __attribute__((hot))
97 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
99 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
100 struct dpaa2_annot_hdr *annotation =
101 (struct dpaa2_annot_hdr *)hw_annot_addr;
103 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
105 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
106 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
108 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
109 pkt_type = RTE_PTYPE_L2_ETHER;
114 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
115 L3_IPV4_N_PRESENT)) {
116 pkt_type |= RTE_PTYPE_L3_IPV4;
117 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
118 L3_IP_N_OPT_PRESENT))
119 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
121 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
122 L3_IPV6_N_PRESENT)) {
123 pkt_type |= RTE_PTYPE_L3_IPV6;
124 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
125 L3_IP_N_OPT_PRESENT))
126 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
131 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
132 L3_IP_1_MORE_FRAGMENT |
133 L3_IP_N_FIRST_FRAGMENT |
134 L3_IP_N_MORE_FRAGMENT)) {
135 pkt_type |= RTE_PTYPE_L4_FRAG;
138 pkt_type |= RTE_PTYPE_L4_NONFRAG;
141 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
142 pkt_type |= RTE_PTYPE_L4_UDP;
144 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
145 pkt_type |= RTE_PTYPE_L4_TCP;
147 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
148 pkt_type |= RTE_PTYPE_L4_SCTP;
150 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
151 pkt_type |= RTE_PTYPE_L4_ICMP;
153 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
154 pkt_type |= RTE_PTYPE_UNKNOWN;
160 static inline void __attribute__((hot))
161 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
163 struct dpaa2_annot_hdr *annotation =
164 (struct dpaa2_annot_hdr *)hw_annot_addr;
166 if (BIT_ISSET_AT_POS(annotation->word3,
167 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
168 mbuf->ol_flags |= PKT_RX_VLAN;
170 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
171 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
173 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
174 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
177 static inline struct rte_mbuf *__attribute__((hot))
178 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
180 struct qbman_sge *sgt, *sge;
184 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
186 fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
188 /* Get Scatter gather table address */
189 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
192 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
194 /* First Scatter gather entry */
195 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
196 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
197 /* Prepare all the metadata for first segment */
198 first_seg->buf_addr = (uint8_t *)sg_addr;
199 first_seg->ol_flags = 0;
200 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
201 first_seg->data_len = sge->length & 0x1FFFF;
202 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
203 first_seg->nb_segs = 1;
204 first_seg->next = NULL;
205 if (dpaa2_svr_family == SVR_LX2160A)
206 dpaa2_dev_rx_parse_frc(first_seg,
207 DPAA2_GET_FD_FRC_PARSE_SUM(fd));
209 first_seg->packet_type = dpaa2_dev_rx_parse(
210 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
211 + DPAA2_FD_PTA_SIZE);
212 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
213 DPAA2_GET_FD_ADDR(fd)) +
214 DPAA2_FD_PTA_SIZE, first_seg);
216 rte_mbuf_refcnt_set(first_seg, 1);
218 while (!DPAA2_SG_IS_FINAL(sge)) {
220 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
221 DPAA2_GET_FLE_ADDR(sge));
222 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
223 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
224 next_seg->buf_addr = (uint8_t *)sg_addr;
225 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
226 next_seg->data_len = sge->length & 0x1FFFF;
227 first_seg->nb_segs += 1;
228 rte_mbuf_refcnt_set(next_seg, 1);
229 cur_seg->next = next_seg;
230 next_seg->next = NULL;
233 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
234 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
235 rte_mbuf_refcnt_set(temp, 1);
236 rte_pktmbuf_free_seg(temp);
238 return (void *)first_seg;
241 static inline struct rte_mbuf *__attribute__((hot))
242 eth_fd_to_mbuf(const struct qbman_fd *fd)
244 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
245 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
246 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
248 /* need to repopulated some of the fields,
249 * as they may have changed in last transmission
253 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
254 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
255 mbuf->pkt_len = mbuf->data_len;
257 /* Parse the packet */
258 /* parse results for LX2 are there in FRC field of FD.
259 * For other DPAA2 platforms , parse results are after
260 * the private - sw annotation area
263 if (dpaa2_svr_family == SVR_LX2160A)
264 dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
266 mbuf->packet_type = dpaa2_dev_rx_parse(
267 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
268 + DPAA2_FD_PTA_SIZE);
269 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
270 DPAA2_GET_FD_ADDR(fd)) +
271 DPAA2_FD_PTA_SIZE, mbuf);
275 rte_mbuf_refcnt_set(mbuf, 1);
277 PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
278 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
279 mbuf, mbuf->buf_addr, mbuf->data_off,
280 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
281 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
282 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
287 static int __attribute__ ((noinline)) __attribute__((hot))
288 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
289 struct qbman_fd *fd, uint16_t bpid)
291 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
292 struct qbman_sge *sgt, *sge = NULL;
295 /* First Prepare FD to be transmited*/
296 /* Resetting the buffer pool id and offset field*/
297 fd->simple.bpid_offset = 0;
299 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
300 int ret = rte_vlan_insert(&mbuf);
305 temp = rte_pktmbuf_alloc(mbuf->pool);
307 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
311 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
312 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
313 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
314 DPAA2_SET_FD_BPID(fd, bpid);
315 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
316 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
317 /*Set Scatter gather table and Scatter gather entries*/
318 sgt = (struct qbman_sge *)(
319 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
320 + DPAA2_GET_FD_OFFSET(fd));
322 for (i = 0; i < mbuf->nb_segs; i++) {
324 /*Resetting the buffer pool id and offset field*/
325 sge->fin_bpid_offset = 0;
326 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
327 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
328 sge->length = cur_seg->data_len;
329 if (RTE_MBUF_DIRECT(cur_seg)) {
330 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
331 /* If refcnt > 1, invalid bpid is set to ensure
332 * buffer is not freed by HW
334 DPAA2_SET_FLE_IVP(sge);
335 rte_mbuf_refcnt_update(cur_seg, -1);
337 DPAA2_SET_FLE_BPID(sge,
338 mempool_to_bpid(cur_seg->pool));
339 cur_seg = cur_seg->next;
341 /* Get owner MBUF from indirect buffer */
342 mi = rte_mbuf_from_indirect(cur_seg);
343 if (rte_mbuf_refcnt_read(mi) > 1) {
344 /* If refcnt > 1, invalid bpid is set to ensure
345 * owner buffer is not freed by HW
347 DPAA2_SET_FLE_IVP(sge);
349 DPAA2_SET_FLE_BPID(sge,
350 mempool_to_bpid(mi->pool));
351 rte_mbuf_refcnt_update(mi, 1);
354 cur_seg = cur_seg->next;
355 prev_seg->next = NULL;
356 rte_pktmbuf_free(prev_seg);
359 DPAA2_SG_SET_FINAL(sge, true);
364 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
365 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
367 static void __attribute__ ((noinline)) __attribute__((hot))
368 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
369 struct qbman_fd *fd, uint16_t bpid)
371 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
372 if (rte_vlan_insert(&mbuf)) {
373 rte_pktmbuf_free(mbuf);
377 /*Resetting the buffer pool id and offset field*/
378 fd->simple.bpid_offset = 0;
380 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
381 DPAA2_SET_FD_LEN(fd, mbuf->data_len);
382 DPAA2_SET_FD_BPID(fd, bpid);
383 DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
384 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
386 PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
387 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
388 mbuf, mbuf->buf_addr, mbuf->data_off,
389 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
390 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
391 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
392 if (RTE_MBUF_DIRECT(mbuf)) {
393 if (rte_mbuf_refcnt_read(mbuf) > 1) {
394 DPAA2_SET_FD_IVP(fd);
395 rte_mbuf_refcnt_update(mbuf, -1);
400 mi = rte_mbuf_from_indirect(mbuf);
401 if (rte_mbuf_refcnt_read(mi) > 1)
402 DPAA2_SET_FD_IVP(fd);
404 rte_mbuf_refcnt_update(mi, 1);
405 rte_pktmbuf_free(mbuf);
409 static inline int __attribute__((hot))
410 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
411 struct qbman_fd *fd, uint16_t bpid)
416 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
417 int ret = rte_vlan_insert(&mbuf);
422 if (rte_dpaa2_mbuf_alloc_bulk(
423 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
424 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
427 m = (struct rte_mbuf *)mb;
428 memcpy((char *)m->buf_addr + mbuf->data_off,
429 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
432 /* Copy required fields */
433 m->data_off = mbuf->data_off;
434 m->ol_flags = mbuf->ol_flags;
435 m->packet_type = mbuf->packet_type;
436 m->tx_offload = mbuf->tx_offload;
438 /*Resetting the buffer pool id and offset field*/
439 fd->simple.bpid_offset = 0;
441 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
442 DPAA2_SET_FD_LEN(fd, mbuf->data_len);
443 DPAA2_SET_FD_BPID(fd, bpid);
444 DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
445 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
447 PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
448 (void *)mbuf, mbuf->buf_addr);
450 PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
451 DPAA2_GET_FD_ADDR(fd),
452 DPAA2_GET_FD_BPID(fd),
453 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
454 DPAA2_GET_FD_OFFSET(fd),
455 DPAA2_GET_FD_LEN(fd));
461 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
463 /* Function receive frames for a given device and VQ*/
464 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
465 struct qbman_result *dq_storage;
466 uint32_t fqid = dpaa2_q->fqid;
468 uint8_t is_last = 0, status;
469 struct qbman_swp *swp;
470 const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
471 struct qbman_pull_desc pulldesc;
472 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
473 struct rte_eth_dev *dev = dpaa2_q->dev;
475 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
476 ret = dpaa2_affine_qbman_swp();
478 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
482 swp = DPAA2_PER_LCORE_PORTAL;
483 if (!q_storage->active_dqs) {
484 q_storage->toggle = 0;
485 dq_storage = q_storage->dq_storage[q_storage->toggle];
486 qbman_pull_desc_clear(&pulldesc);
487 qbman_pull_desc_set_numframes(&pulldesc,
488 (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
489 DPAA2_DQRR_RING_SIZE : nb_pkts);
490 qbman_pull_desc_set_fq(&pulldesc, fqid);
491 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
492 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
493 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
494 while (!qbman_check_command_complete(
495 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
497 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
500 if (qbman_swp_pull(swp, &pulldesc)) {
501 PMD_RX_LOG(WARNING, "VDQ command is not issued."
503 /* Portal was busy, try again */
508 q_storage->active_dqs = dq_storage;
509 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
510 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
512 dq_storage = q_storage->active_dqs;
513 /* Check if the previous issued command is completed.
514 * Also seems like the SWP is shared between the Ethernet Driver
515 * and the SEC driver.
517 while (!qbman_check_command_complete(dq_storage))
519 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
520 clear_swp_active_dqs(q_storage->active_dpio_id);
522 /* Loop until the dq_storage is updated with
525 while (!qbman_check_new_result(dq_storage))
527 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
528 /* Check whether Last Pull command is Expired and
529 * setting Condition for Loop termination
531 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
533 /* Check for valid frame. */
534 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
535 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
538 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
540 /* Prefetch Annotation address for the parse results */
541 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
542 + DPAA2_FD_PTA_SIZE + 16));
544 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
545 bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
547 bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
548 bufs[num_rx]->port = dev->data->port_id;
550 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
551 rte_vlan_strip(bufs[num_rx]);
557 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
558 while (!qbman_check_command_complete(
559 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
561 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
563 q_storage->toggle ^= 1;
564 dq_storage = q_storage->dq_storage[q_storage->toggle];
565 qbman_pull_desc_clear(&pulldesc);
566 qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
567 qbman_pull_desc_set_fq(&pulldesc, fqid);
568 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
569 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
570 /* Issue a volatile dequeue command. */
572 if (qbman_swp_pull(swp, &pulldesc)) {
573 PMD_RX_LOG(WARNING, "VDQ command is not issued."
579 q_storage->active_dqs = dq_storage;
580 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
581 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
583 dpaa2_q->rx_pkts += num_rx;
585 /* Return the total number of packets received to DPAA2 app */
589 void __attribute__((hot))
590 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
591 const struct qbman_fd *fd,
592 const struct qbman_result *dq,
593 struct dpaa2_queue *rxq,
594 struct rte_event *ev)
596 ev->mbuf = eth_fd_to_mbuf(fd);
598 ev->flow_id = rxq->ev.flow_id;
599 ev->sub_event_type = rxq->ev.sub_event_type;
600 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
601 ev->op = RTE_EVENT_OP_NEW;
602 ev->sched_type = rxq->ev.sched_type;
603 ev->queue_id = rxq->ev.queue_id;
604 ev->priority = rxq->ev.priority;
606 qbman_swp_dqrr_consume(swp, dq);
610 * Callback to handle sending packets through WRIOP based interface
613 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
615 /* Function to transmit the frames to given device and VQ*/
616 uint32_t loop, retry_count;
618 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
620 uint32_t frames_to_send;
621 struct rte_mempool *mp;
622 struct qbman_eq_desc eqdesc;
623 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
624 struct qbman_swp *swp;
627 struct rte_eth_dev *dev = dpaa2_q->dev;
628 struct dpaa2_dev_priv *priv = dev->data->dev_private;
630 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
631 ret = dpaa2_affine_qbman_swp();
633 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
637 swp = DPAA2_PER_LCORE_PORTAL;
639 PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
641 /*Prepare enqueue descriptor*/
642 qbman_eq_desc_clear(&eqdesc);
643 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
644 qbman_eq_desc_set_response(&eqdesc, 0, 0);
645 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
646 dpaa2_q->flow_id, dpaa2_q->tc_index);
648 /*Clear the unused FD fields before sending*/
650 /*Check if the queue is congested*/
652 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
654 /* Retry for some time before giving up */
655 if (retry_count > CONG_RETRY_COUNT)
659 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
661 for (loop = 0; loop < frames_to_send; loop++) {
662 fd_arr[loop].simple.frc = 0;
663 DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
664 DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
665 if (RTE_MBUF_DIRECT(*bufs)) {
668 mi = rte_mbuf_from_indirect(*bufs);
671 /* Not a hw_pkt pool allocated frame */
672 if (unlikely(!mp || !priv->bp_list)) {
673 PMD_TX_LOG(ERR, "err: no bpool attached");
677 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
678 PMD_TX_LOG(ERR, "non hw offload bufffer ");
679 /* alloc should be from the default buffer pool
680 * attached to this interface
682 bpid = priv->bp_list->buf_pool.bpid;
684 if (unlikely((*bufs)->nb_segs > 1)) {
685 PMD_TX_LOG(ERR, "S/G support not added"
686 " for non hw offload buffer");
689 if (eth_copy_mbuf_to_fd(*bufs,
690 &fd_arr[loop], bpid)) {
693 /* free the original packet */
694 rte_pktmbuf_free(*bufs);
696 bpid = mempool_to_bpid(mp);
697 if (unlikely((*bufs)->nb_segs > 1)) {
698 if (eth_mbuf_to_sg_fd(*bufs,
699 &fd_arr[loop], bpid))
702 eth_mbuf_to_fd(*bufs,
703 &fd_arr[loop], bpid);
709 while (loop < frames_to_send) {
710 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
711 &fd_arr[loop], frames_to_send - loop);
714 num_tx += frames_to_send;
715 dpaa2_q->tx_pkts += frames_to_send;
716 nb_pkts -= frames_to_send;
721 /* send any already prepared fd */
726 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
727 &fd_arr[i], loop - i);
730 dpaa2_q->tx_pkts += loop;
737 * Dummy DPDK callback for TX.
739 * This function is used to temporarily replace the real callback during
740 * unsafe control operations on the queue, or in case of error.
743 * Generic pointer to TX queue structure.
745 * Packets to transmit.
747 * Number of packets in array.
750 * Number of packets successfully transmitted (<= pkts_n).
753 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)