1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <fslmc_logs.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_ethdev.h"
25 #include "base/dpaa2_hw_dpni_annot.h"
27 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
28 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
29 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
30 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
31 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
32 DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
35 static inline void __attribute__((hot))
36 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
38 PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc);
40 m->packet_type = RTE_PTYPE_UNKNOWN;
42 case DPAA2_PKT_TYPE_ETHER:
43 m->packet_type = RTE_PTYPE_L2_ETHER;
45 case DPAA2_PKT_TYPE_IPV4:
46 m->packet_type = RTE_PTYPE_L2_ETHER |
49 case DPAA2_PKT_TYPE_IPV6:
50 m->packet_type = RTE_PTYPE_L2_ETHER |
53 case DPAA2_PKT_TYPE_IPV4_EXT:
54 m->packet_type = RTE_PTYPE_L2_ETHER |
55 RTE_PTYPE_L3_IPV4_EXT;
57 case DPAA2_PKT_TYPE_IPV6_EXT:
58 m->packet_type = RTE_PTYPE_L2_ETHER |
59 RTE_PTYPE_L3_IPV6_EXT;
61 case DPAA2_PKT_TYPE_IPV4_TCP:
62 m->packet_type = RTE_PTYPE_L2_ETHER |
63 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
65 case DPAA2_PKT_TYPE_IPV6_TCP:
66 m->packet_type = RTE_PTYPE_L2_ETHER |
67 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
69 case DPAA2_PKT_TYPE_IPV4_UDP:
70 m->packet_type = RTE_PTYPE_L2_ETHER |
71 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
73 case DPAA2_PKT_TYPE_IPV6_UDP:
74 m->packet_type = RTE_PTYPE_L2_ETHER |
75 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
77 case DPAA2_PKT_TYPE_IPV4_SCTP:
78 m->packet_type = RTE_PTYPE_L2_ETHER |
79 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
81 case DPAA2_PKT_TYPE_IPV6_SCTP:
82 m->packet_type = RTE_PTYPE_L2_ETHER |
83 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
85 case DPAA2_PKT_TYPE_IPV4_ICMP:
86 m->packet_type = RTE_PTYPE_L2_ETHER |
87 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
89 case DPAA2_PKT_TYPE_IPV6_ICMP:
90 m->packet_type = RTE_PTYPE_L2_ETHER |
91 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
93 case DPAA2_PKT_TYPE_VLAN_1:
94 case DPAA2_PKT_TYPE_VLAN_2:
95 m->ol_flags |= PKT_RX_VLAN;
97 /* More switch cases can be added */
98 /* TODO: Add handling for checksum error check from FRC */
100 m->packet_type = RTE_PTYPE_UNKNOWN;
104 static inline uint32_t __attribute__((hot))
105 dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
107 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
108 struct dpaa2_annot_hdr *annotation =
109 (struct dpaa2_annot_hdr *)hw_annot_addr;
111 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
112 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
113 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
115 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
116 pkt_type = RTE_PTYPE_L2_ETHER;
121 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
122 L3_IPV4_N_PRESENT)) {
123 pkt_type |= RTE_PTYPE_L3_IPV4;
124 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
125 L3_IP_N_OPT_PRESENT))
126 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
128 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
129 L3_IPV6_N_PRESENT)) {
130 pkt_type |= RTE_PTYPE_L3_IPV6;
131 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
132 L3_IP_N_OPT_PRESENT))
133 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
138 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
139 L3_IP_1_MORE_FRAGMENT |
140 L3_IP_N_FIRST_FRAGMENT |
141 L3_IP_N_MORE_FRAGMENT)) {
142 pkt_type |= RTE_PTYPE_L4_FRAG;
145 pkt_type |= RTE_PTYPE_L4_NONFRAG;
148 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
149 pkt_type |= RTE_PTYPE_L4_UDP;
151 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
152 pkt_type |= RTE_PTYPE_L4_TCP;
154 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
155 pkt_type |= RTE_PTYPE_L4_SCTP;
157 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
158 pkt_type |= RTE_PTYPE_L4_ICMP;
160 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
161 pkt_type |= RTE_PTYPE_UNKNOWN;
168 static inline uint32_t __attribute__((hot))
169 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
171 struct dpaa2_annot_hdr *annotation =
172 (struct dpaa2_annot_hdr *)hw_annot_addr;
174 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
176 /* Return some common types from parse processing */
177 switch (annotation->word4) {
179 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
181 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
182 case DPAA2_L3_IPv4_TCP:
183 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
185 case DPAA2_L3_IPv4_UDP:
186 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
188 case DPAA2_L3_IPv6_TCP:
189 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
191 case DPAA2_L3_IPv6_UDP:
192 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
195 PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n");
199 return dpaa2_dev_rx_parse_slow(hw_annot_addr);
202 static inline void __attribute__((hot))
203 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
205 struct dpaa2_annot_hdr *annotation =
206 (struct dpaa2_annot_hdr *)hw_annot_addr;
208 if (BIT_ISSET_AT_POS(annotation->word3,
209 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
210 mbuf->ol_flags |= PKT_RX_VLAN;
212 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
213 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
215 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
216 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
219 static inline struct rte_mbuf *__attribute__((hot))
220 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
222 struct qbman_sge *sgt, *sge;
226 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
228 fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
230 /* Get Scatter gather table address */
231 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
234 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
236 /* First Scatter gather entry */
237 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
238 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
239 /* Prepare all the metadata for first segment */
240 first_seg->buf_addr = (uint8_t *)sg_addr;
241 first_seg->ol_flags = 0;
242 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
243 first_seg->data_len = sge->length & 0x1FFFF;
244 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
245 first_seg->nb_segs = 1;
246 first_seg->next = NULL;
247 if (dpaa2_svr_family == SVR_LX2160A)
248 dpaa2_dev_rx_parse_frc(first_seg,
249 DPAA2_GET_FD_FRC_PARSE_SUM(fd));
251 first_seg->packet_type = dpaa2_dev_rx_parse(
252 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
253 + DPAA2_FD_PTA_SIZE);
254 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
255 DPAA2_GET_FD_ADDR(fd)) +
256 DPAA2_FD_PTA_SIZE, first_seg);
258 rte_mbuf_refcnt_set(first_seg, 1);
260 while (!DPAA2_SG_IS_FINAL(sge)) {
262 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
263 DPAA2_GET_FLE_ADDR(sge));
264 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
265 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
266 next_seg->buf_addr = (uint8_t *)sg_addr;
267 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
268 next_seg->data_len = sge->length & 0x1FFFF;
269 first_seg->nb_segs += 1;
270 rte_mbuf_refcnt_set(next_seg, 1);
271 cur_seg->next = next_seg;
272 next_seg->next = NULL;
275 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
276 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
277 rte_mbuf_refcnt_set(temp, 1);
278 rte_pktmbuf_free_seg(temp);
280 return (void *)first_seg;
283 static inline struct rte_mbuf *__attribute__((hot))
284 eth_fd_to_mbuf(const struct qbman_fd *fd)
286 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
287 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
288 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
290 /* need to repopulated some of the fields,
291 * as they may have changed in last transmission
295 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
296 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
297 mbuf->pkt_len = mbuf->data_len;
299 rte_mbuf_refcnt_set(mbuf, 1);
301 /* Parse the packet */
302 /* parse results for LX2 are there in FRC field of FD.
303 * For other DPAA2 platforms , parse results are after
304 * the private - sw annotation area
307 if (dpaa2_svr_family == SVR_LX2160A)
308 dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
310 mbuf->packet_type = dpaa2_dev_rx_parse(
311 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
312 + DPAA2_FD_PTA_SIZE);
313 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
314 DPAA2_GET_FD_ADDR(fd)) +
315 DPAA2_FD_PTA_SIZE, mbuf);
318 PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
319 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
320 mbuf, mbuf->buf_addr, mbuf->data_off,
321 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
322 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
323 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
328 static int __attribute__ ((noinline)) __attribute__((hot))
329 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
330 struct qbman_fd *fd, uint16_t bpid)
332 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
333 struct qbman_sge *sgt, *sge = NULL;
336 /* First Prepare FD to be transmited*/
337 /* Resetting the buffer pool id and offset field*/
338 fd->simple.bpid_offset = 0;
340 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
341 int ret = rte_vlan_insert(&mbuf);
346 temp = rte_pktmbuf_alloc(mbuf->pool);
348 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
352 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
353 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
354 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
355 DPAA2_SET_FD_BPID(fd, bpid);
356 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
357 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
358 /*Set Scatter gather table and Scatter gather entries*/
359 sgt = (struct qbman_sge *)(
360 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
361 + DPAA2_GET_FD_OFFSET(fd));
363 for (i = 0; i < mbuf->nb_segs; i++) {
365 /*Resetting the buffer pool id and offset field*/
366 sge->fin_bpid_offset = 0;
367 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
368 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
369 sge->length = cur_seg->data_len;
370 if (RTE_MBUF_DIRECT(cur_seg)) {
371 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
372 /* If refcnt > 1, invalid bpid is set to ensure
373 * buffer is not freed by HW
375 DPAA2_SET_FLE_IVP(sge);
376 rte_mbuf_refcnt_update(cur_seg, -1);
378 DPAA2_SET_FLE_BPID(sge,
379 mempool_to_bpid(cur_seg->pool));
380 cur_seg = cur_seg->next;
382 /* Get owner MBUF from indirect buffer */
383 mi = rte_mbuf_from_indirect(cur_seg);
384 if (rte_mbuf_refcnt_read(mi) > 1) {
385 /* If refcnt > 1, invalid bpid is set to ensure
386 * owner buffer is not freed by HW
388 DPAA2_SET_FLE_IVP(sge);
390 DPAA2_SET_FLE_BPID(sge,
391 mempool_to_bpid(mi->pool));
392 rte_mbuf_refcnt_update(mi, 1);
395 cur_seg = cur_seg->next;
396 prev_seg->next = NULL;
397 rte_pktmbuf_free(prev_seg);
400 DPAA2_SG_SET_FINAL(sge, true);
405 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
406 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
408 static void __attribute__ ((noinline)) __attribute__((hot))
409 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
410 struct qbman_fd *fd, uint16_t bpid)
412 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
413 if (rte_vlan_insert(&mbuf)) {
414 rte_pktmbuf_free(mbuf);
418 /*Resetting the buffer pool id and offset field*/
419 fd->simple.bpid_offset = 0;
421 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
423 PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
424 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
425 mbuf, mbuf->buf_addr, mbuf->data_off,
426 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
427 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
428 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
429 if (RTE_MBUF_DIRECT(mbuf)) {
430 if (rte_mbuf_refcnt_read(mbuf) > 1) {
431 DPAA2_SET_FD_IVP(fd);
432 rte_mbuf_refcnt_update(mbuf, -1);
437 mi = rte_mbuf_from_indirect(mbuf);
438 if (rte_mbuf_refcnt_read(mi) > 1)
439 DPAA2_SET_FD_IVP(fd);
441 rte_mbuf_refcnt_update(mi, 1);
442 rte_pktmbuf_free(mbuf);
446 static inline int __attribute__((hot))
447 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
448 struct qbman_fd *fd, uint16_t bpid)
453 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
454 int ret = rte_vlan_insert(&mbuf);
459 if (rte_dpaa2_mbuf_alloc_bulk(
460 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
461 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
464 m = (struct rte_mbuf *)mb;
465 memcpy((char *)m->buf_addr + mbuf->data_off,
466 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
469 /* Copy required fields */
470 m->data_off = mbuf->data_off;
471 m->ol_flags = mbuf->ol_flags;
472 m->packet_type = mbuf->packet_type;
473 m->tx_offload = mbuf->tx_offload;
475 /*Resetting the buffer pool id and offset field*/
476 fd->simple.bpid_offset = 0;
478 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
480 PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
481 (void *)mbuf, mbuf->buf_addr);
483 PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
484 DPAA2_GET_FD_ADDR(fd),
485 DPAA2_GET_FD_BPID(fd),
486 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
487 DPAA2_GET_FD_OFFSET(fd),
488 DPAA2_GET_FD_LEN(fd));
494 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
496 /* Function receive frames for a given device and VQ*/
497 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
498 struct qbman_result *dq_storage;
499 uint32_t fqid = dpaa2_q->fqid;
501 uint8_t is_last = 0, status;
502 struct qbman_swp *swp;
503 const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE], *next_fd;
504 struct qbman_pull_desc pulldesc;
505 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
506 struct rte_eth_dev *dev = dpaa2_q->dev;
508 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
509 ret = dpaa2_affine_qbman_swp();
511 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
515 swp = DPAA2_PER_LCORE_PORTAL;
516 if (!q_storage->active_dqs) {
517 q_storage->toggle = 0;
518 dq_storage = q_storage->dq_storage[q_storage->toggle];
519 qbman_pull_desc_clear(&pulldesc);
520 qbman_pull_desc_set_numframes(&pulldesc,
521 (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
522 DPAA2_DQRR_RING_SIZE : nb_pkts);
523 qbman_pull_desc_set_fq(&pulldesc, fqid);
524 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
525 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
526 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
527 while (!qbman_check_command_complete(
528 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
530 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
533 if (qbman_swp_pull(swp, &pulldesc)) {
534 PMD_RX_LOG(WARNING, "VDQ command is not issued."
536 /* Portal was busy, try again */
541 q_storage->active_dqs = dq_storage;
542 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
543 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
545 dq_storage = q_storage->active_dqs;
546 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
547 /* Check if the previous issued command is completed.
548 * Also seems like the SWP is shared between the Ethernet Driver
549 * and the SEC driver.
551 while (!qbman_check_command_complete(dq_storage))
553 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
554 clear_swp_active_dqs(q_storage->active_dpio_id);
556 /* Loop until the dq_storage is updated with
559 while (!qbman_check_new_result(dq_storage))
561 rte_prefetch0((void *)((uint64_t)(dq_storage + 2)));
562 /* Check whether Last Pull command is Expired and
563 * setting Condition for Loop termination
565 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
567 /* Check for valid frame. */
568 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
569 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
572 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
574 next_fd = qbman_result_DQ_fd(dq_storage + 1);
575 /* Prefetch Annotation address for the parse results */
576 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(next_fd)
577 + DPAA2_FD_PTA_SIZE + 16));
579 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
580 bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
582 bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
583 bufs[num_rx]->port = dev->data->port_id;
585 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
586 rte_vlan_strip(bufs[num_rx]);
592 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
593 while (!qbman_check_command_complete(
594 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
596 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
598 q_storage->toggle ^= 1;
599 dq_storage = q_storage->dq_storage[q_storage->toggle];
600 qbman_pull_desc_clear(&pulldesc);
601 qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
602 qbman_pull_desc_set_fq(&pulldesc, fqid);
603 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
604 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
605 /* Issue a volatile dequeue command. */
607 if (qbman_swp_pull(swp, &pulldesc)) {
608 PMD_RX_LOG(WARNING, "VDQ command is not issued."
614 q_storage->active_dqs = dq_storage;
615 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
616 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
618 dpaa2_q->rx_pkts += num_rx;
620 /* Return the total number of packets received to DPAA2 app */
624 void __attribute__((hot))
625 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
626 const struct qbman_fd *fd,
627 const struct qbman_result *dq,
628 struct dpaa2_queue *rxq,
629 struct rte_event *ev)
631 ev->mbuf = eth_fd_to_mbuf(fd);
633 ev->flow_id = rxq->ev.flow_id;
634 ev->sub_event_type = rxq->ev.sub_event_type;
635 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
636 ev->op = RTE_EVENT_OP_NEW;
637 ev->sched_type = rxq->ev.sched_type;
638 ev->queue_id = rxq->ev.queue_id;
639 ev->priority = rxq->ev.priority;
641 qbman_swp_dqrr_consume(swp, dq);
645 * Callback to handle sending packets through WRIOP based interface
648 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
650 /* Function to transmit the frames to given device and VQ*/
651 uint32_t loop, retry_count;
653 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
655 uint32_t frames_to_send;
656 struct rte_mempool *mp;
657 struct qbman_eq_desc eqdesc;
658 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
659 struct qbman_swp *swp;
662 struct rte_eth_dev *dev = dpaa2_q->dev;
663 struct dpaa2_dev_priv *priv = dev->data->dev_private;
665 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
666 ret = dpaa2_affine_qbman_swp();
668 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
672 swp = DPAA2_PER_LCORE_PORTAL;
674 PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
676 /*Prepare enqueue descriptor*/
677 qbman_eq_desc_clear(&eqdesc);
678 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
679 qbman_eq_desc_set_response(&eqdesc, 0, 0);
680 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
681 dpaa2_q->flow_id, dpaa2_q->tc_index);
683 /*Clear the unused FD fields before sending*/
685 /*Check if the queue is congested*/
687 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
689 /* Retry for some time before giving up */
690 if (retry_count > CONG_RETRY_COUNT)
694 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
696 for (loop = 0; loop < frames_to_send; loop++) {
697 fd_arr[loop].simple.frc = 0;
698 DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
699 DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
700 if (likely(RTE_MBUF_DIRECT(*bufs))) {
702 /* Check the basic scenario and set
703 * the FD appropriately here itself.
705 if (likely(mp && mp->ops_index ==
706 priv->bp_list->dpaa2_ops_index &&
707 (*bufs)->nb_segs == 1 &&
708 rte_mbuf_refcnt_read((*bufs)) == 1)) {
709 if (unlikely((*bufs)->ol_flags
710 & PKT_TX_VLAN_PKT)) {
711 ret = rte_vlan_insert(bufs);
715 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
716 &fd_arr[loop], mempool_to_bpid(mp));
721 mi = rte_mbuf_from_indirect(*bufs);
724 /* Not a hw_pkt pool allocated frame */
725 if (unlikely(!mp || !priv->bp_list)) {
726 PMD_TX_LOG(ERR, "err: no bpool attached");
730 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
731 PMD_TX_LOG(ERR, "non hw offload bufffer ");
732 /* alloc should be from the default buffer pool
733 * attached to this interface
735 bpid = priv->bp_list->buf_pool.bpid;
737 if (unlikely((*bufs)->nb_segs > 1)) {
738 PMD_TX_LOG(ERR, "S/G support not added"
739 " for non hw offload buffer");
742 if (eth_copy_mbuf_to_fd(*bufs,
743 &fd_arr[loop], bpid)) {
746 /* free the original packet */
747 rte_pktmbuf_free(*bufs);
749 bpid = mempool_to_bpid(mp);
750 if (unlikely((*bufs)->nb_segs > 1)) {
751 if (eth_mbuf_to_sg_fd(*bufs,
752 &fd_arr[loop], bpid))
755 eth_mbuf_to_fd(*bufs,
756 &fd_arr[loop], bpid);
762 while (loop < frames_to_send) {
763 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
764 &fd_arr[loop], frames_to_send - loop);
767 num_tx += frames_to_send;
768 nb_pkts -= frames_to_send;
770 dpaa2_q->tx_pkts += num_tx;
774 /* send any already prepared fd */
779 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
780 &fd_arr[i], loop - i);
785 dpaa2_q->tx_pkts += num_tx;
790 * Dummy DPDK callback for TX.
792 * This function is used to temporarily replace the real callback during
793 * unsafe control operations on the queue, or in case of error.
796 * Generic pointer to TX queue structure.
798 * Packets to transmit.
800 * Number of packets in array.
803 * Number of packets successfully transmitted (<= pkts_n).
806 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)