1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_logs.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24 #include <dpaa2_eventdev.h>
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
29 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
30 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
31 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
32 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
33 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
34 DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \
37 static inline void __attribute__((hot))
38 dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
40 PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc);
42 m->packet_type = RTE_PTYPE_UNKNOWN;
44 case DPAA2_PKT_TYPE_ETHER:
45 m->packet_type = RTE_PTYPE_L2_ETHER;
47 case DPAA2_PKT_TYPE_IPV4:
48 m->packet_type = RTE_PTYPE_L2_ETHER |
51 case DPAA2_PKT_TYPE_IPV6:
52 m->packet_type = RTE_PTYPE_L2_ETHER |
55 case DPAA2_PKT_TYPE_IPV4_EXT:
56 m->packet_type = RTE_PTYPE_L2_ETHER |
57 RTE_PTYPE_L3_IPV4_EXT;
59 case DPAA2_PKT_TYPE_IPV6_EXT:
60 m->packet_type = RTE_PTYPE_L2_ETHER |
61 RTE_PTYPE_L3_IPV6_EXT;
63 case DPAA2_PKT_TYPE_IPV4_TCP:
64 m->packet_type = RTE_PTYPE_L2_ETHER |
65 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
67 case DPAA2_PKT_TYPE_IPV6_TCP:
68 m->packet_type = RTE_PTYPE_L2_ETHER |
69 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
71 case DPAA2_PKT_TYPE_IPV4_UDP:
72 m->packet_type = RTE_PTYPE_L2_ETHER |
73 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
75 case DPAA2_PKT_TYPE_IPV6_UDP:
76 m->packet_type = RTE_PTYPE_L2_ETHER |
77 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
79 case DPAA2_PKT_TYPE_IPV4_SCTP:
80 m->packet_type = RTE_PTYPE_L2_ETHER |
81 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
83 case DPAA2_PKT_TYPE_IPV6_SCTP:
84 m->packet_type = RTE_PTYPE_L2_ETHER |
85 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
87 case DPAA2_PKT_TYPE_IPV4_ICMP:
88 m->packet_type = RTE_PTYPE_L2_ETHER |
89 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
91 case DPAA2_PKT_TYPE_IPV6_ICMP:
92 m->packet_type = RTE_PTYPE_L2_ETHER |
93 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
95 case DPAA2_PKT_TYPE_VLAN_1:
96 case DPAA2_PKT_TYPE_VLAN_2:
97 m->ol_flags |= PKT_RX_VLAN;
99 /* More switch cases can be added */
100 /* TODO: Add handling for checksum error check from FRC */
102 m->packet_type = RTE_PTYPE_UNKNOWN;
106 static inline uint32_t __attribute__((hot))
107 dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
109 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
110 struct dpaa2_annot_hdr *annotation =
111 (struct dpaa2_annot_hdr *)hw_annot_addr;
113 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
114 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
115 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
117 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
118 pkt_type = RTE_PTYPE_L2_ETHER;
123 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
124 L3_IPV4_N_PRESENT)) {
125 pkt_type |= RTE_PTYPE_L3_IPV4;
126 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
127 L3_IP_N_OPT_PRESENT))
128 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
130 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
131 L3_IPV6_N_PRESENT)) {
132 pkt_type |= RTE_PTYPE_L3_IPV6;
133 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
134 L3_IP_N_OPT_PRESENT))
135 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
140 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
141 L3_IP_1_MORE_FRAGMENT |
142 L3_IP_N_FIRST_FRAGMENT |
143 L3_IP_N_MORE_FRAGMENT)) {
144 pkt_type |= RTE_PTYPE_L4_FRAG;
147 pkt_type |= RTE_PTYPE_L4_NONFRAG;
150 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
151 pkt_type |= RTE_PTYPE_L4_UDP;
153 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
154 pkt_type |= RTE_PTYPE_L4_TCP;
156 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
157 pkt_type |= RTE_PTYPE_L4_SCTP;
159 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
160 pkt_type |= RTE_PTYPE_L4_ICMP;
162 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
163 pkt_type |= RTE_PTYPE_UNKNOWN;
170 static inline uint32_t __attribute__((hot))
171 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
173 struct dpaa2_annot_hdr *annotation =
174 (struct dpaa2_annot_hdr *)hw_annot_addr;
176 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
178 /* Return some common types from parse processing */
179 switch (annotation->word4) {
181 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
183 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
184 case DPAA2_L3_IPv4_TCP:
185 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
187 case DPAA2_L3_IPv4_UDP:
188 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
190 case DPAA2_L3_IPv6_TCP:
191 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
193 case DPAA2_L3_IPv6_UDP:
194 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
197 PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n");
201 return dpaa2_dev_rx_parse_slow(hw_annot_addr);
204 static inline void __attribute__((hot))
205 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
207 struct dpaa2_annot_hdr *annotation =
208 (struct dpaa2_annot_hdr *)hw_annot_addr;
210 if (BIT_ISSET_AT_POS(annotation->word3,
211 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
212 mbuf->ol_flags |= PKT_RX_VLAN;
214 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
215 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
217 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
218 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
221 static inline struct rte_mbuf *__attribute__((hot))
222 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
224 struct qbman_sge *sgt, *sge;
228 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
230 fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
232 /* Get Scatter gather table address */
233 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
236 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
238 /* First Scatter gather entry */
239 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
240 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
241 /* Prepare all the metadata for first segment */
242 first_seg->buf_addr = (uint8_t *)sg_addr;
243 first_seg->ol_flags = 0;
244 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
245 first_seg->data_len = sge->length & 0x1FFFF;
246 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
247 first_seg->nb_segs = 1;
248 first_seg->next = NULL;
249 if (dpaa2_svr_family == SVR_LX2160A)
250 dpaa2_dev_rx_parse_frc(first_seg,
251 DPAA2_GET_FD_FRC_PARSE_SUM(fd));
253 first_seg->packet_type = dpaa2_dev_rx_parse(
254 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
255 + DPAA2_FD_PTA_SIZE);
256 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
257 DPAA2_GET_FD_ADDR(fd)) +
258 DPAA2_FD_PTA_SIZE, first_seg);
260 rte_mbuf_refcnt_set(first_seg, 1);
262 while (!DPAA2_SG_IS_FINAL(sge)) {
264 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
265 DPAA2_GET_FLE_ADDR(sge));
266 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
267 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
268 next_seg->buf_addr = (uint8_t *)sg_addr;
269 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
270 next_seg->data_len = sge->length & 0x1FFFF;
271 first_seg->nb_segs += 1;
272 rte_mbuf_refcnt_set(next_seg, 1);
273 cur_seg->next = next_seg;
274 next_seg->next = NULL;
277 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
278 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
279 rte_mbuf_refcnt_set(temp, 1);
280 rte_pktmbuf_free_seg(temp);
282 return (void *)first_seg;
285 static inline struct rte_mbuf *__attribute__((hot))
286 eth_fd_to_mbuf(const struct qbman_fd *fd)
288 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
289 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
290 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
292 /* need to repopulated some of the fields,
293 * as they may have changed in last transmission
297 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
298 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
299 mbuf->pkt_len = mbuf->data_len;
301 rte_mbuf_refcnt_set(mbuf, 1);
303 /* Parse the packet */
304 /* parse results for LX2 are there in FRC field of FD.
305 * For other DPAA2 platforms , parse results are after
306 * the private - sw annotation area
309 if (dpaa2_svr_family == SVR_LX2160A)
310 dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
312 mbuf->packet_type = dpaa2_dev_rx_parse(
313 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
314 + DPAA2_FD_PTA_SIZE);
315 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
316 DPAA2_GET_FD_ADDR(fd)) +
317 DPAA2_FD_PTA_SIZE, mbuf);
320 PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
321 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
322 mbuf, mbuf->buf_addr, mbuf->data_off,
323 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
324 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
325 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
330 static int __attribute__ ((noinline)) __attribute__((hot))
331 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
332 struct qbman_fd *fd, uint16_t bpid)
334 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
335 struct qbman_sge *sgt, *sge = NULL;
338 /* First Prepare FD to be transmited*/
339 /* Resetting the buffer pool id and offset field*/
340 fd->simple.bpid_offset = 0;
342 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
343 int ret = rte_vlan_insert(&mbuf);
348 temp = rte_pktmbuf_alloc(mbuf->pool);
350 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
354 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
355 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
356 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
357 DPAA2_SET_FD_BPID(fd, bpid);
358 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
359 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
360 /*Set Scatter gather table and Scatter gather entries*/
361 sgt = (struct qbman_sge *)(
362 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
363 + DPAA2_GET_FD_OFFSET(fd));
365 for (i = 0; i < mbuf->nb_segs; i++) {
367 /*Resetting the buffer pool id and offset field*/
368 sge->fin_bpid_offset = 0;
369 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
370 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
371 sge->length = cur_seg->data_len;
372 if (RTE_MBUF_DIRECT(cur_seg)) {
373 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
374 /* If refcnt > 1, invalid bpid is set to ensure
375 * buffer is not freed by HW
377 DPAA2_SET_FLE_IVP(sge);
378 rte_mbuf_refcnt_update(cur_seg, -1);
380 DPAA2_SET_FLE_BPID(sge,
381 mempool_to_bpid(cur_seg->pool));
382 cur_seg = cur_seg->next;
384 /* Get owner MBUF from indirect buffer */
385 mi = rte_mbuf_from_indirect(cur_seg);
386 if (rte_mbuf_refcnt_read(mi) > 1) {
387 /* If refcnt > 1, invalid bpid is set to ensure
388 * owner buffer is not freed by HW
390 DPAA2_SET_FLE_IVP(sge);
392 DPAA2_SET_FLE_BPID(sge,
393 mempool_to_bpid(mi->pool));
394 rte_mbuf_refcnt_update(mi, 1);
397 cur_seg = cur_seg->next;
398 prev_seg->next = NULL;
399 rte_pktmbuf_free(prev_seg);
402 DPAA2_SG_SET_FINAL(sge, true);
407 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
408 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
410 static void __attribute__ ((noinline)) __attribute__((hot))
411 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
412 struct qbman_fd *fd, uint16_t bpid)
414 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
415 if (rte_vlan_insert(&mbuf)) {
416 rte_pktmbuf_free(mbuf);
420 /*Resetting the buffer pool id and offset field*/
421 fd->simple.bpid_offset = 0;
423 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
425 PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
426 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
427 mbuf, mbuf->buf_addr, mbuf->data_off,
428 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
429 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
430 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
431 if (RTE_MBUF_DIRECT(mbuf)) {
432 if (rte_mbuf_refcnt_read(mbuf) > 1) {
433 DPAA2_SET_FD_IVP(fd);
434 rte_mbuf_refcnt_update(mbuf, -1);
439 mi = rte_mbuf_from_indirect(mbuf);
440 if (rte_mbuf_refcnt_read(mi) > 1)
441 DPAA2_SET_FD_IVP(fd);
443 rte_mbuf_refcnt_update(mi, 1);
444 rte_pktmbuf_free(mbuf);
448 static inline int __attribute__((hot))
449 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
450 struct qbman_fd *fd, uint16_t bpid)
455 if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
456 int ret = rte_vlan_insert(&mbuf);
461 if (rte_dpaa2_mbuf_alloc_bulk(
462 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
463 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
466 m = (struct rte_mbuf *)mb;
467 memcpy((char *)m->buf_addr + mbuf->data_off,
468 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
471 /* Copy required fields */
472 m->data_off = mbuf->data_off;
473 m->ol_flags = mbuf->ol_flags;
474 m->packet_type = mbuf->packet_type;
475 m->tx_offload = mbuf->tx_offload;
477 /*Resetting the buffer pool id and offset field*/
478 fd->simple.bpid_offset = 0;
480 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
482 PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
483 (void *)mbuf, mbuf->buf_addr);
485 PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
486 DPAA2_GET_FD_ADDR(fd),
487 DPAA2_GET_FD_BPID(fd),
488 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
489 DPAA2_GET_FD_OFFSET(fd),
490 DPAA2_GET_FD_LEN(fd));
496 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
498 /* Function receive frames for a given device and VQ*/
499 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
500 struct qbman_result *dq_storage, *dq_storage1 = 0;
501 uint32_t fqid = dpaa2_q->fqid;
502 int ret, num_rx = 0, next_pull = 0, num_pulled, num_to_pull;
503 uint8_t pending, is_repeat, status;
504 struct qbman_swp *swp;
505 const struct qbman_fd *fd, *next_fd;
506 struct qbman_pull_desc pulldesc;
507 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
508 struct rte_eth_dev *dev = dpaa2_q->dev;
510 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
511 ret = dpaa2_affine_qbman_swp();
513 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
517 swp = DPAA2_PER_LCORE_PORTAL;
519 /* if the original request for this q was from another portal */
520 if (unlikely(DPAA2_PER_LCORE_DPIO->index !=
521 q_storage->active_dpio_id)) {
522 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
523 while (!qbman_check_command_complete(get_swp_active_dqs
524 (DPAA2_PER_LCORE_DPIO->index)))
526 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
528 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
531 if (unlikely(!q_storage->active_dqs)) {
532 q_storage->toggle = 0;
533 dq_storage = q_storage->dq_storage[q_storage->toggle];
534 q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
535 DPAA2_DQRR_RING_SIZE : nb_pkts;
536 qbman_pull_desc_clear(&pulldesc);
537 qbman_pull_desc_set_numframes(&pulldesc,
538 q_storage->last_num_pkts);
539 qbman_pull_desc_set_fq(&pulldesc, fqid);
540 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
541 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
543 if (qbman_swp_pull(swp, &pulldesc)) {
545 "VDQ command not issued.QBMAN busy\n");
546 /* Portal was busy, try again */
551 q_storage->active_dqs = dq_storage;
552 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
555 /* pkt to pull in current pull request */
556 num_to_pull = q_storage->last_num_pkts;
558 /* Number of packet requested is more than current pull request */
559 if (nb_pkts > num_to_pull)
560 next_pull = nb_pkts - num_to_pull;
562 dq_storage = q_storage->active_dqs;
563 /* Check if the previous issued command is completed.
564 * Also seems like the SWP is shared between the Ethernet Driver
565 * and the SEC driver.
567 while (!qbman_check_command_complete(dq_storage))
569 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
570 clear_swp_active_dqs(q_storage->active_dpio_id);
575 /* issue the deq command one more time to get another set of packets */
577 q_storage->toggle ^= 1;
578 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
579 qbman_pull_desc_clear(&pulldesc);
581 if (next_pull > DPAA2_DQRR_RING_SIZE) {
582 qbman_pull_desc_set_numframes(&pulldesc,
583 DPAA2_DQRR_RING_SIZE);
584 next_pull = next_pull - DPAA2_DQRR_RING_SIZE;
585 q_storage->last_num_pkts = DPAA2_DQRR_RING_SIZE;
587 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
588 q_storage->last_num_pkts = next_pull;
591 qbman_pull_desc_set_fq(&pulldesc, fqid);
592 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
593 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
595 if (qbman_swp_pull(swp, &pulldesc)) {
597 "VDQ command not issued.QBMAN busy\n");
598 /* Portal was busy, try again */
604 q_storage->active_dqs = dq_storage1;
605 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
608 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
614 /* Loop until the dq_storage is updated with
617 while (!qbman_check_new_result(dq_storage))
619 rte_prefetch0((void *)((uint64_t)(dq_storage + 2)));
620 /* Check whether Last Pull command is Expired and
621 * setting Condition for Loop termination
623 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
625 /* Check for valid frame. */
626 status = qbman_result_DQ_flags(dq_storage);
627 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
630 fd = qbman_result_DQ_fd(dq_storage);
632 next_fd = qbman_result_DQ_fd(dq_storage + 1);
633 /* Prefetch Annotation address for the parse results */
634 rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
635 + DPAA2_FD_PTA_SIZE + 16));
637 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
638 bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
640 bufs[num_rx] = eth_fd_to_mbuf(fd);
641 bufs[num_rx]->port = dev->data->port_id;
643 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
644 rte_vlan_strip(bufs[num_rx]);
651 /* Another VDQ request pending and this request returned full */
653 /* all packets pulled from this pull request */
654 if (num_pulled == num_to_pull) {
655 /* pkt to pull in current pull request */
656 num_to_pull = q_storage->last_num_pkts;
658 dq_storage = dq_storage1;
660 while (!qbman_check_command_complete(dq_storage))
664 /* if this request did not returned all pkts */
669 q_storage->toggle ^= 1;
670 dq_storage = q_storage->dq_storage[q_storage->toggle];
671 q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
672 DPAA2_DQRR_RING_SIZE : nb_pkts;
673 qbman_pull_desc_clear(&pulldesc);
674 qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts);
675 qbman_pull_desc_set_fq(&pulldesc, fqid);
676 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
677 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
678 /* issue a volatile dequeue command for next pull */
680 if (qbman_swp_pull(swp, &pulldesc)) {
681 PMD_RX_LOG(WARNING, "VDQ command is not issued."
687 q_storage->active_dqs = dq_storage;
688 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
691 dpaa2_q->rx_pkts += num_rx;
696 void __attribute__((hot))
697 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
698 const struct qbman_fd *fd,
699 const struct qbman_result *dq,
700 struct dpaa2_queue *rxq,
701 struct rte_event *ev)
703 ev->mbuf = eth_fd_to_mbuf(fd);
705 ev->flow_id = rxq->ev.flow_id;
706 ev->sub_event_type = rxq->ev.sub_event_type;
707 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
708 ev->op = RTE_EVENT_OP_NEW;
709 ev->sched_type = rxq->ev.sched_type;
710 ev->queue_id = rxq->ev.queue_id;
711 ev->priority = rxq->ev.priority;
713 qbman_swp_dqrr_consume(swp, dq);
716 void dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
717 const struct qbman_fd *fd,
718 const struct qbman_result *dq,
719 struct dpaa2_queue *rxq,
720 struct rte_event *ev)
722 uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
724 ev->mbuf = eth_fd_to_mbuf(fd);
726 ev->flow_id = rxq->ev.flow_id;
727 ev->sub_event_type = rxq->ev.sub_event_type;
728 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
729 ev->op = RTE_EVENT_OP_NEW;
730 ev->sched_type = rxq->ev.sched_type;
731 ev->queue_id = rxq->ev.queue_id;
732 ev->priority = rxq->ev.priority;
734 ev->mbuf->seqn = dqrr_index + 1;
735 DPAA2_PER_LCORE_DQRR_SIZE++;
736 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
737 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
741 * Callback to handle sending packets through WRIOP based interface
744 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
746 /* Function to transmit the frames to given device and VQ*/
747 uint32_t loop, retry_count;
749 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
751 uint32_t frames_to_send;
752 struct rte_mempool *mp;
753 struct qbman_eq_desc eqdesc;
754 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
755 struct qbman_swp *swp;
758 struct rte_eth_dev *dev = dpaa2_q->dev;
759 struct dpaa2_dev_priv *priv = dev->data->dev_private;
760 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
762 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
763 ret = dpaa2_affine_qbman_swp();
765 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
769 swp = DPAA2_PER_LCORE_PORTAL;
771 PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
773 /*Prepare enqueue descriptor*/
774 qbman_eq_desc_clear(&eqdesc);
775 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
776 qbman_eq_desc_set_response(&eqdesc, 0, 0);
777 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
778 dpaa2_q->flow_id, dpaa2_q->tc_index);
779 /*Clear the unused FD fields before sending*/
781 /*Check if the queue is congested*/
783 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
785 /* Retry for some time before giving up */
786 if (retry_count > CONG_RETRY_COUNT)
790 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
792 for (loop = 0; loop < frames_to_send; loop++) {
794 uint8_t dqrr_index = (*bufs)->seqn - 1;
796 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
798 DPAA2_PER_LCORE_DQRR_SIZE--;
799 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
800 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
803 fd_arr[loop].simple.frc = 0;
804 DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
805 DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
806 if (likely(RTE_MBUF_DIRECT(*bufs))) {
808 /* Check the basic scenario and set
809 * the FD appropriately here itself.
811 if (likely(mp && mp->ops_index ==
812 priv->bp_list->dpaa2_ops_index &&
813 (*bufs)->nb_segs == 1 &&
814 rte_mbuf_refcnt_read((*bufs)) == 1)) {
815 if (unlikely((*bufs)->ol_flags
816 & PKT_TX_VLAN_PKT)) {
817 ret = rte_vlan_insert(bufs);
821 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
822 &fd_arr[loop], mempool_to_bpid(mp));
827 mi = rte_mbuf_from_indirect(*bufs);
830 /* Not a hw_pkt pool allocated frame */
831 if (unlikely(!mp || !priv->bp_list)) {
832 PMD_TX_LOG(ERR, "err: no bpool attached");
836 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
837 PMD_TX_LOG(ERR, "non hw offload bufffer ");
838 /* alloc should be from the default buffer pool
839 * attached to this interface
841 bpid = priv->bp_list->buf_pool.bpid;
843 if (unlikely((*bufs)->nb_segs > 1)) {
844 PMD_TX_LOG(ERR, "S/G support not added"
845 " for non hw offload buffer");
848 if (eth_copy_mbuf_to_fd(*bufs,
849 &fd_arr[loop], bpid)) {
852 /* free the original packet */
853 rte_pktmbuf_free(*bufs);
855 bpid = mempool_to_bpid(mp);
856 if (unlikely((*bufs)->nb_segs > 1)) {
857 if (eth_mbuf_to_sg_fd(*bufs,
858 &fd_arr[loop], bpid))
861 eth_mbuf_to_fd(*bufs,
862 &fd_arr[loop], bpid);
868 while (loop < frames_to_send) {
869 loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
870 &fd_arr[loop], &flags[loop],
871 frames_to_send - loop);
874 num_tx += frames_to_send;
875 nb_pkts -= frames_to_send;
877 dpaa2_q->tx_pkts += num_tx;
881 /* send any already prepared fd */
886 i += qbman_swp_enqueue_multiple(swp, &eqdesc,
894 dpaa2_q->tx_pkts += num_tx;
899 * Dummy DPDK callback for TX.
901 * This function is used to temporarily replace the real callback during
902 * unsafe control operations on the queue, or in case of error.
905 * Generic pointer to TX queue structure.
907 * Packets to transmit.
909 * Number of packets in array.
912 * Number of packets successfully transmitted (<= pkts_n).
915 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)