1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2019 NXP
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
28 static inline uint32_t __attribute__((hot))
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30 struct dpaa2_annot_hdr *annotation);
32 static void enable_tx_tstamp(struct qbman_fd *fd) __attribute__((unused));
34 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
35 DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
36 DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
37 DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
38 DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
39 DPAA2_SET_FD_FRC(_fd, 0); \
40 DPAA2_RESET_FD_CTRL(_fd); \
41 DPAA2_RESET_FD_FLC(_fd); \
44 static inline void __attribute__((hot))
45 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd)
47 struct dpaa2_annot_hdr *annotation;
48 uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
50 m->packet_type = RTE_PTYPE_UNKNOWN;
52 case DPAA2_PKT_TYPE_ETHER:
53 m->packet_type = RTE_PTYPE_L2_ETHER;
55 case DPAA2_PKT_TYPE_IPV4:
56 m->packet_type = RTE_PTYPE_L2_ETHER |
59 case DPAA2_PKT_TYPE_IPV6:
60 m->packet_type = RTE_PTYPE_L2_ETHER |
63 case DPAA2_PKT_TYPE_IPV4_EXT:
64 m->packet_type = RTE_PTYPE_L2_ETHER |
65 RTE_PTYPE_L3_IPV4_EXT;
67 case DPAA2_PKT_TYPE_IPV6_EXT:
68 m->packet_type = RTE_PTYPE_L2_ETHER |
69 RTE_PTYPE_L3_IPV6_EXT;
71 case DPAA2_PKT_TYPE_IPV4_TCP:
72 m->packet_type = RTE_PTYPE_L2_ETHER |
73 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
75 case DPAA2_PKT_TYPE_IPV6_TCP:
76 m->packet_type = RTE_PTYPE_L2_ETHER |
77 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
79 case DPAA2_PKT_TYPE_IPV4_UDP:
80 m->packet_type = RTE_PTYPE_L2_ETHER |
81 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
83 case DPAA2_PKT_TYPE_IPV6_UDP:
84 m->packet_type = RTE_PTYPE_L2_ETHER |
85 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
87 case DPAA2_PKT_TYPE_IPV4_SCTP:
88 m->packet_type = RTE_PTYPE_L2_ETHER |
89 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
91 case DPAA2_PKT_TYPE_IPV6_SCTP:
92 m->packet_type = RTE_PTYPE_L2_ETHER |
93 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
95 case DPAA2_PKT_TYPE_IPV4_ICMP:
96 m->packet_type = RTE_PTYPE_L2_ETHER |
97 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
99 case DPAA2_PKT_TYPE_IPV6_ICMP:
100 m->packet_type = RTE_PTYPE_L2_ETHER |
101 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
104 m->packet_type = dpaa2_dev_rx_parse_slow(m,
105 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
106 + DPAA2_FD_PTA_SIZE));
108 m->hash.rss = fd->simple.flc_hi;
109 m->ol_flags |= PKT_RX_RSS_HASH;
111 if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) {
112 annotation = (struct dpaa2_annot_hdr *)
113 ((size_t)DPAA2_IOVA_TO_VADDR(
114 DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE);
115 m->timestamp = annotation->word2;
116 m->ol_flags |= PKT_RX_TIMESTAMP;
117 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp);
120 DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
121 "ol_flags =0x%" PRIx64 "",
122 frc, m->packet_type, m->ol_flags);
125 static inline uint32_t __attribute__((hot))
126 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
127 struct dpaa2_annot_hdr *annotation)
129 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
132 DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
133 "(4)=0x%" PRIx64 "\t",
134 annotation->word3, annotation->word4);
136 #if defined(RTE_LIBRTE_IEEE1588)
137 if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
138 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
141 if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
142 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
143 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
144 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
145 mbuf->ol_flags |= PKT_RX_VLAN;
146 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
147 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
148 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
149 (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
150 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
151 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
152 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
155 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
156 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
158 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
159 pkt_type |= RTE_PTYPE_L2_ETHER;
164 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
165 L3_IPV4_N_PRESENT)) {
166 pkt_type |= RTE_PTYPE_L3_IPV4;
167 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
168 L3_IP_N_OPT_PRESENT))
169 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
171 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
172 L3_IPV6_N_PRESENT)) {
173 pkt_type |= RTE_PTYPE_L3_IPV6;
174 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
175 L3_IP_N_OPT_PRESENT))
176 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
181 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
182 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
183 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
184 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
186 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
187 L3_IP_1_MORE_FRAGMENT |
188 L3_IP_N_FIRST_FRAGMENT |
189 L3_IP_N_MORE_FRAGMENT)) {
190 pkt_type |= RTE_PTYPE_L4_FRAG;
193 pkt_type |= RTE_PTYPE_L4_NONFRAG;
196 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
197 pkt_type |= RTE_PTYPE_L4_UDP;
199 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
200 pkt_type |= RTE_PTYPE_L4_TCP;
202 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
203 pkt_type |= RTE_PTYPE_L4_SCTP;
205 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
206 pkt_type |= RTE_PTYPE_L4_ICMP;
208 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
209 pkt_type |= RTE_PTYPE_UNKNOWN;
215 static inline uint32_t __attribute__((hot))
216 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
218 struct dpaa2_annot_hdr *annotation =
219 (struct dpaa2_annot_hdr *)hw_annot_addr;
221 DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
224 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
225 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
226 else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
227 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
229 mbuf->ol_flags |= PKT_RX_TIMESTAMP;
230 mbuf->timestamp = annotation->word2;
231 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp);
233 /* Check detailed parsing requirement */
234 if (annotation->word3 & 0x7FFFFC3FFFF)
235 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
237 /* Return some common types from parse processing */
238 switch (annotation->word4) {
240 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
242 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
243 case DPAA2_L3_IPv4_TCP:
244 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
246 case DPAA2_L3_IPv4_UDP:
247 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
249 case DPAA2_L3_IPv6_TCP:
250 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
252 case DPAA2_L3_IPv6_UDP:
253 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
259 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
262 static inline struct rte_mbuf *__attribute__((hot))
263 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
266 struct qbman_sge *sgt, *sge;
267 size_t sg_addr, fd_addr;
269 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
271 fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
273 /* Get Scatter gather table address */
274 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
277 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
279 /* First Scatter gather entry */
280 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
281 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
282 /* Prepare all the metadata for first segment */
283 first_seg->buf_addr = (uint8_t *)sg_addr;
284 first_seg->ol_flags = 0;
285 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
286 first_seg->data_len = sge->length & 0x1FFFF;
287 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
288 first_seg->nb_segs = 1;
289 first_seg->next = NULL;
290 first_seg->port = port_id;
291 if (dpaa2_svr_family == SVR_LX2160A)
292 dpaa2_dev_rx_parse_new(first_seg, fd);
294 first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
295 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
296 + DPAA2_FD_PTA_SIZE));
298 rte_mbuf_refcnt_set(first_seg, 1);
300 while (!DPAA2_SG_IS_FINAL(sge)) {
302 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
303 DPAA2_GET_FLE_ADDR(sge));
304 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
305 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
306 next_seg->buf_addr = (uint8_t *)sg_addr;
307 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
308 next_seg->data_len = sge->length & 0x1FFFF;
309 first_seg->nb_segs += 1;
310 rte_mbuf_refcnt_set(next_seg, 1);
311 cur_seg->next = next_seg;
312 next_seg->next = NULL;
315 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
316 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
317 rte_mbuf_refcnt_set(temp, 1);
318 rte_pktmbuf_free_seg(temp);
320 return (void *)first_seg;
323 static inline struct rte_mbuf *__attribute__((hot))
324 eth_fd_to_mbuf(const struct qbman_fd *fd,
327 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
328 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
329 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
331 /* need to repopulated some of the fields,
332 * as they may have changed in last transmission
336 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
337 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
338 mbuf->pkt_len = mbuf->data_len;
339 mbuf->port = port_id;
341 rte_mbuf_refcnt_set(mbuf, 1);
343 /* Parse the packet */
344 /* parse results for LX2 are there in FRC field of FD.
345 * For other DPAA2 platforms , parse results are after
346 * the private - sw annotation area
349 if (dpaa2_svr_family == SVR_LX2160A)
350 dpaa2_dev_rx_parse_new(mbuf, fd);
352 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
353 (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
354 + DPAA2_FD_PTA_SIZE));
356 DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
357 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
358 mbuf, mbuf->buf_addr, mbuf->data_off,
359 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
360 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
361 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
366 static int __attribute__ ((noinline)) __attribute__((hot))
367 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
368 struct qbman_fd *fd, uint16_t bpid)
370 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
371 struct qbman_sge *sgt, *sge = NULL;
374 temp = rte_pktmbuf_alloc(mbuf->pool);
376 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
380 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
381 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
382 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
383 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
384 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
385 DPAA2_RESET_FD_FRC(fd);
386 DPAA2_RESET_FD_CTRL(fd);
387 /*Set Scatter gather table and Scatter gather entries*/
388 sgt = (struct qbman_sge *)(
389 (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
390 + DPAA2_GET_FD_OFFSET(fd));
392 for (i = 0; i < mbuf->nb_segs; i++) {
394 /*Resetting the buffer pool id and offset field*/
395 sge->fin_bpid_offset = 0;
396 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
397 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
398 sge->length = cur_seg->data_len;
399 if (RTE_MBUF_DIRECT(cur_seg)) {
400 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
401 /* If refcnt > 1, invalid bpid is set to ensure
402 * buffer is not freed by HW
404 DPAA2_SET_FLE_IVP(sge);
405 rte_mbuf_refcnt_update(cur_seg, -1);
407 DPAA2_SET_FLE_BPID(sge,
408 mempool_to_bpid(cur_seg->pool));
409 cur_seg = cur_seg->next;
411 /* Get owner MBUF from indirect buffer */
412 mi = rte_mbuf_from_indirect(cur_seg);
413 if (rte_mbuf_refcnt_read(mi) > 1) {
414 /* If refcnt > 1, invalid bpid is set to ensure
415 * owner buffer is not freed by HW
417 DPAA2_SET_FLE_IVP(sge);
419 DPAA2_SET_FLE_BPID(sge,
420 mempool_to_bpid(mi->pool));
421 rte_mbuf_refcnt_update(mi, 1);
424 cur_seg = cur_seg->next;
425 prev_seg->next = NULL;
426 rte_pktmbuf_free(prev_seg);
429 DPAA2_SG_SET_FINAL(sge, true);
434 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
435 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
437 static void __attribute__ ((noinline)) __attribute__((hot))
438 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
439 struct qbman_fd *fd, uint16_t bpid)
441 DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
443 DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
444 "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
445 mbuf, mbuf->buf_addr, mbuf->data_off,
446 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
447 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
448 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
449 if (RTE_MBUF_DIRECT(mbuf)) {
450 if (rte_mbuf_refcnt_read(mbuf) > 1) {
451 DPAA2_SET_FD_IVP(fd);
452 rte_mbuf_refcnt_update(mbuf, -1);
457 mi = rte_mbuf_from_indirect(mbuf);
458 if (rte_mbuf_refcnt_read(mi) > 1)
459 DPAA2_SET_FD_IVP(fd);
461 rte_mbuf_refcnt_update(mi, 1);
462 rte_pktmbuf_free(mbuf);
466 static inline int __attribute__((hot))
467 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
468 struct qbman_fd *fd, uint16_t bpid)
473 if (rte_dpaa2_mbuf_alloc_bulk(
474 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
475 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
478 m = (struct rte_mbuf *)mb;
479 memcpy((char *)m->buf_addr + mbuf->data_off,
480 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
483 /* Copy required fields */
484 m->data_off = mbuf->data_off;
485 m->ol_flags = mbuf->ol_flags;
486 m->packet_type = mbuf->packet_type;
487 m->tx_offload = mbuf->tx_offload;
489 DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
492 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
493 " meta: %d, off: %d, len: %d\n",
496 DPAA2_GET_FD_ADDR(fd),
497 DPAA2_GET_FD_BPID(fd),
498 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
499 DPAA2_GET_FD_OFFSET(fd),
500 DPAA2_GET_FD_LEN(fd));
505 /* This function assumes that caller will be keep the same value for nb_pkts
506 * across calls per queue, if that is not the case, better use non-prefetch
507 * version of rx call.
508 * It will return the packets as requested in previous call without honoring
509 * the current nb_pkts or bufs space.
512 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
514 /* Function receive frames for a given device and VQ*/
515 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
516 struct qbman_result *dq_storage, *dq_storage1 = NULL;
517 uint32_t fqid = dpaa2_q->fqid;
518 int ret, num_rx = 0, pull_size;
519 uint8_t pending, status;
520 struct qbman_swp *swp;
521 const struct qbman_fd *fd, *next_fd;
522 struct qbman_pull_desc pulldesc;
523 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
524 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
525 #if defined(RTE_LIBRTE_IEEE1588)
526 struct dpaa2_dev_priv *priv = eth_data->dev_private;
529 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
530 ret = dpaa2_affine_qbman_ethrx_swp();
532 DPAA2_PMD_ERR("Failure in affining portal");
537 if (unlikely(!rte_dpaa2_bpid_info &&
538 rte_eal_process_type() == RTE_PROC_SECONDARY))
539 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
541 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
542 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
543 if (unlikely(!q_storage->active_dqs)) {
544 q_storage->toggle = 0;
545 dq_storage = q_storage->dq_storage[q_storage->toggle];
546 q_storage->last_num_pkts = pull_size;
547 qbman_pull_desc_clear(&pulldesc);
548 qbman_pull_desc_set_numframes(&pulldesc,
549 q_storage->last_num_pkts);
550 qbman_pull_desc_set_fq(&pulldesc, fqid);
551 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
552 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
553 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
554 while (!qbman_check_command_complete(
556 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
558 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
561 if (qbman_swp_pull(swp, &pulldesc)) {
562 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
563 " QBMAN is busy (1)\n");
564 /* Portal was busy, try again */
569 q_storage->active_dqs = dq_storage;
570 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
571 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
575 dq_storage = q_storage->active_dqs;
576 rte_prefetch0((void *)(size_t)(dq_storage));
577 rte_prefetch0((void *)(size_t)(dq_storage + 1));
579 /* Prepare next pull descriptor. This will give space for the
580 * prefething done on DQRR entries
582 q_storage->toggle ^= 1;
583 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
584 qbman_pull_desc_clear(&pulldesc);
585 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
586 qbman_pull_desc_set_fq(&pulldesc, fqid);
587 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
588 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
590 /* Check if the previous issued command is completed.
591 * Also seems like the SWP is shared between the Ethernet Driver
592 * and the SEC driver.
594 while (!qbman_check_command_complete(dq_storage))
596 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
597 clear_swp_active_dqs(q_storage->active_dpio_id);
602 /* Loop until the dq_storage is updated with
605 while (!qbman_check_new_result(dq_storage))
607 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
608 /* Check whether Last Pull command is Expired and
609 * setting Condition for Loop termination
611 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
613 /* Check for valid frame. */
614 status = qbman_result_DQ_flags(dq_storage);
615 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
618 fd = qbman_result_DQ_fd(dq_storage);
620 if (dpaa2_svr_family != SVR_LX2160A) {
621 next_fd = qbman_result_DQ_fd(dq_storage + 1);
622 /* Prefetch Annotation address for the parse results */
623 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(
624 next_fd) + DPAA2_FD_PTA_SIZE + 16));
627 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
628 bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
630 bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
631 #if defined(RTE_LIBRTE_IEEE1588)
632 priv->rx_timestamp = bufs[num_rx]->timestamp;
635 if (eth_data->dev_conf.rxmode.offloads &
636 DEV_RX_OFFLOAD_VLAN_STRIP)
637 rte_vlan_strip(bufs[num_rx]);
643 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
644 while (!qbman_check_command_complete(
645 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
647 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
649 /* issue a volatile dequeue command for next pull */
651 if (qbman_swp_pull(swp, &pulldesc)) {
652 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
653 "QBMAN is busy (2)\n");
658 q_storage->active_dqs = dq_storage1;
659 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
660 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
662 dpaa2_q->rx_pkts += num_rx;
667 void __attribute__((hot))
668 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
669 const struct qbman_fd *fd,
670 const struct qbman_result *dq,
671 struct dpaa2_queue *rxq,
672 struct rte_event *ev)
674 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
675 DPAA2_FD_PTA_SIZE + 16));
677 ev->flow_id = rxq->ev.flow_id;
678 ev->sub_event_type = rxq->ev.sub_event_type;
679 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
680 ev->op = RTE_EVENT_OP_NEW;
681 ev->sched_type = rxq->ev.sched_type;
682 ev->queue_id = rxq->ev.queue_id;
683 ev->priority = rxq->ev.priority;
685 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
687 qbman_swp_dqrr_consume(swp, dq);
690 void __attribute__((hot))
691 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
692 const struct qbman_fd *fd,
693 const struct qbman_result *dq,
694 struct dpaa2_queue *rxq,
695 struct rte_event *ev)
699 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
700 DPAA2_FD_PTA_SIZE + 16));
702 ev->flow_id = rxq->ev.flow_id;
703 ev->sub_event_type = rxq->ev.sub_event_type;
704 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
705 ev->op = RTE_EVENT_OP_NEW;
706 ev->sched_type = rxq->ev.sched_type;
707 ev->queue_id = rxq->ev.queue_id;
708 ev->priority = rxq->ev.priority;
710 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
712 dqrr_index = qbman_get_dqrr_idx(dq);
713 ev->mbuf->seqn = dqrr_index + 1;
714 DPAA2_PER_LCORE_DQRR_SIZE++;
715 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
716 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
719 void __attribute__((hot))
720 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
721 const struct qbman_fd *fd,
722 const struct qbman_result *dq,
723 struct dpaa2_queue *rxq,
724 struct rte_event *ev)
726 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
727 DPAA2_FD_PTA_SIZE + 16));
729 ev->flow_id = rxq->ev.flow_id;
730 ev->sub_event_type = rxq->ev.sub_event_type;
731 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
732 ev->op = RTE_EVENT_OP_NEW;
733 ev->sched_type = rxq->ev.sched_type;
734 ev->queue_id = rxq->ev.queue_id;
735 ev->priority = rxq->ev.priority;
737 ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
739 ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
740 ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
741 ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
743 qbman_swp_dqrr_consume(swp, dq);
747 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
749 /* Function receive frames for a given device and VQ */
750 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
751 struct qbman_result *dq_storage;
752 uint32_t fqid = dpaa2_q->fqid;
753 int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
754 uint8_t pending, status;
755 struct qbman_swp *swp;
756 const struct qbman_fd *fd, *next_fd;
757 struct qbman_pull_desc pulldesc;
758 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
760 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
761 ret = dpaa2_affine_qbman_swp();
763 DPAA2_PMD_ERR("Failure in affining portal\n");
767 swp = DPAA2_PER_LCORE_PORTAL;
770 dq_storage = dpaa2_q->q_storage->dq_storage[0];
771 qbman_pull_desc_clear(&pulldesc);
772 qbman_pull_desc_set_fq(&pulldesc, fqid);
773 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
774 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
776 if (next_pull > dpaa2_dqrr_size) {
777 qbman_pull_desc_set_numframes(&pulldesc,
779 next_pull -= dpaa2_dqrr_size;
781 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
786 if (qbman_swp_pull(swp, &pulldesc)) {
788 "VDQ command is not issued.QBMAN is busy\n");
789 /* Portal was busy, try again */
795 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
796 /* Check if the previous issued command is completed. */
797 while (!qbman_check_command_complete(dq_storage))
803 /* Loop until the dq_storage is updated with
806 while (!qbman_check_new_result(dq_storage))
808 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
809 /* Check whether Last Pull command is Expired and
810 * setting Condition for Loop termination
812 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
814 /* Check for valid frame. */
815 status = qbman_result_DQ_flags(dq_storage);
816 if (unlikely((status &
817 QBMAN_DQ_STAT_VALIDFRAME) == 0))
820 fd = qbman_result_DQ_fd(dq_storage);
822 next_fd = qbman_result_DQ_fd(dq_storage + 1);
823 /* Prefetch Annotation address for the parse results */
825 (void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
826 + DPAA2_FD_PTA_SIZE + 16));
828 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
829 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
832 bufs[num_rx] = eth_fd_to_mbuf(fd,
835 if (eth_data->dev_conf.rxmode.offloads &
836 DEV_RX_OFFLOAD_VLAN_STRIP) {
837 rte_vlan_strip(bufs[num_rx]);
844 /* Last VDQ provided all packets and more packets are requested */
845 } while (next_pull && num_pulled == dpaa2_dqrr_size);
847 dpaa2_q->rx_pkts += num_rx;
852 uint16_t dpaa2_dev_tx_conf(void *queue)
854 /* Function receive frames for a given device and VQ */
855 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
856 struct qbman_result *dq_storage;
857 uint32_t fqid = dpaa2_q->fqid;
858 int ret, num_tx_conf = 0, num_pulled;
859 uint8_t pending, status;
860 struct qbman_swp *swp;
861 const struct qbman_fd *fd, *next_fd;
862 struct qbman_pull_desc pulldesc;
863 struct qbman_release_desc releasedesc;
866 #if defined(RTE_LIBRTE_IEEE1588)
867 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
868 struct dpaa2_dev_priv *priv = eth_data->dev_private;
869 struct dpaa2_annot_hdr *annotation;
872 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
873 ret = dpaa2_affine_qbman_swp();
875 DPAA2_PMD_ERR("Failure in affining portal\n");
879 swp = DPAA2_PER_LCORE_PORTAL;
882 dq_storage = dpaa2_q->q_storage->dq_storage[0];
883 qbman_pull_desc_clear(&pulldesc);
884 qbman_pull_desc_set_fq(&pulldesc, fqid);
885 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
886 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
888 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
891 if (qbman_swp_pull(swp, &pulldesc)) {
892 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
894 /* Portal was busy, try again */
900 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
901 /* Check if the previous issued command is completed. */
902 while (!qbman_check_command_complete(dq_storage))
908 /* Loop until the dq_storage is updated with
911 while (!qbman_check_new_result(dq_storage))
913 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
914 /* Check whether Last Pull command is Expired and
915 * setting Condition for Loop termination
917 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
919 /* Check for valid frame. */
920 status = qbman_result_DQ_flags(dq_storage);
921 if (unlikely((status &
922 QBMAN_DQ_STAT_VALIDFRAME) == 0))
925 fd = qbman_result_DQ_fd(dq_storage);
927 next_fd = qbman_result_DQ_fd(dq_storage + 1);
928 /* Prefetch Annotation address for the parse results */
929 rte_prefetch0((void *)(size_t)
930 (DPAA2_GET_FD_ADDR(next_fd) +
931 DPAA2_FD_PTA_SIZE + 16));
933 bpid = DPAA2_GET_FD_BPID(fd);
935 /* Create a release descriptor required for releasing
938 qbman_release_desc_clear(&releasedesc);
939 qbman_release_desc_set_bpid(&releasedesc, bpid);
941 buf = DPAA2_GET_FD_ADDR(fd);
942 /* feed them to bman */
944 ret = qbman_swp_release(swp, &releasedesc,
946 } while (ret == -EBUSY);
951 #if defined(RTE_LIBRTE_IEEE1588)
952 annotation = (struct dpaa2_annot_hdr *)((size_t)
953 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
955 priv->tx_timestamp = annotation->word2;
959 /* Last VDQ provided all packets and more packets are requested */
960 } while (num_pulled == dpaa2_dqrr_size);
962 dpaa2_q->rx_pkts += num_tx_conf;
967 /* Configure the egress frame annotation for timestamp update */
968 static void enable_tx_tstamp(struct qbman_fd *fd)
970 struct dpaa2_faead *fd_faead;
972 /* Set frame annotation status field as valid */
973 (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
975 /* Set frame annotation egress action descriptor as valid */
976 (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
978 /* Set Annotation Length as 128B */
979 (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
981 /* enable update of confirmation frame annotation */
982 fd_faead = (struct dpaa2_faead *)((size_t)
983 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
984 DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
985 fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
986 DPAA2_ANNOT_FAEAD_UPD;
990 * Callback to handle sending packets through WRIOP based interface
993 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
995 /* Function to transmit the frames to given device and VQ*/
996 uint32_t loop, retry_count;
998 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1000 uint32_t frames_to_send;
1001 struct rte_mempool *mp;
1002 struct qbman_eq_desc eqdesc;
1003 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1004 struct qbman_swp *swp;
1005 uint16_t num_tx = 0;
1007 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1008 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1009 uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1011 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1012 ret = dpaa2_affine_qbman_swp();
1014 DPAA2_PMD_ERR("Failure in affining portal");
1018 swp = DPAA2_PER_LCORE_PORTAL;
1020 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1021 eth_data, dpaa2_q->fqid);
1023 #ifdef RTE_LIBRTE_IEEE1588
1024 /* IEEE1588 driver need pointer to tx confirmation queue
1025 * corresponding to last packet transmitted for reading
1028 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1029 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1032 /*Prepare enqueue descriptor*/
1033 qbman_eq_desc_clear(&eqdesc);
1034 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1035 qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1037 /*Clear the unused FD fields before sending*/
1039 /*Check if the queue is congested*/
1041 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1043 /* Retry for some time before giving up */
1044 if (retry_count > CONG_RETRY_COUNT)
1048 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1049 dpaa2_eqcr_size : nb_pkts;
1051 for (loop = 0; loop < frames_to_send; loop++) {
1052 if ((*bufs)->seqn) {
1053 uint8_t dqrr_index = (*bufs)->seqn - 1;
1055 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1057 DPAA2_PER_LCORE_DQRR_SIZE--;
1058 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1059 (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
1062 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1064 /* Check the basic scenario and set
1065 * the FD appropriately here itself.
1067 if (likely(mp && mp->ops_index ==
1068 priv->bp_list->dpaa2_ops_index &&
1069 (*bufs)->nb_segs == 1 &&
1070 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1071 if (unlikely(((*bufs)->ol_flags
1072 & PKT_TX_VLAN_PKT) ||
1073 (eth_data->dev_conf.txmode.offloads
1074 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1075 ret = rte_vlan_insert(bufs);
1079 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1080 &fd_arr[loop], mempool_to_bpid(mp));
1082 #ifdef RTE_LIBRTE_IEEE1588
1083 enable_tx_tstamp(&fd_arr[loop]);
1088 mi = rte_mbuf_from_indirect(*bufs);
1091 /* Not a hw_pkt pool allocated frame */
1092 if (unlikely(!mp || !priv->bp_list)) {
1093 DPAA2_PMD_ERR("Err: No buffer pool attached");
1097 if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1098 (eth_data->dev_conf.txmode.offloads
1099 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1100 int ret = rte_vlan_insert(bufs);
1104 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1105 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1106 /* alloc should be from the default buffer pool
1107 * attached to this interface
1109 bpid = priv->bp_list->buf_pool.bpid;
1111 if (unlikely((*bufs)->nb_segs > 1)) {
1112 DPAA2_PMD_ERR("S/G support not added"
1113 " for non hw offload buffer");
1116 if (eth_copy_mbuf_to_fd(*bufs,
1117 &fd_arr[loop], bpid)) {
1120 /* free the original packet */
1121 rte_pktmbuf_free(*bufs);
1123 bpid = mempool_to_bpid(mp);
1124 if (unlikely((*bufs)->nb_segs > 1)) {
1125 if (eth_mbuf_to_sg_fd(*bufs,
1126 &fd_arr[loop], bpid))
1129 eth_mbuf_to_fd(*bufs,
1130 &fd_arr[loop], bpid);
1133 #ifdef RTE_LIBRTE_IEEE1588
1134 enable_tx_tstamp(&fd_arr[loop]);
1141 while (loop < frames_to_send) {
1142 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1143 &fd_arr[loop], &flags[loop],
1144 frames_to_send - loop);
1145 if (unlikely(ret < 0)) {
1147 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1161 dpaa2_q->tx_pkts += num_tx;
1165 /* send any already prepared fd */
1171 ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1175 if (unlikely(ret < 0)) {
1177 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1187 dpaa2_q->tx_pkts += num_tx;
1192 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1194 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1195 struct qbman_fd *fd;
1198 fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1200 /* Setting port id does not matter as we are to free the mbuf */
1201 m = eth_fd_to_mbuf(fd, 0);
1202 rte_pktmbuf_free(m);
1206 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1208 struct qbman_eq_desc *eqdesc)
1210 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1211 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1212 struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1213 struct eqresp_metadata *eqresp_meta;
1214 uint16_t orpid, seqnum;
1217 qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1219 if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1220 orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
1221 DPAA2_EQCR_OPRID_SHIFT;
1222 seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
1223 DPAA2_EQCR_SEQNUM_SHIFT;
1225 if (!priv->en_loose_ordered) {
1226 qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1227 qbman_eq_desc_set_response(eqdesc, (uint64_t)
1228 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1229 dpio_dev->eqresp_pi]), 1);
1230 qbman_eq_desc_set_token(eqdesc, 1);
1232 eqresp_meta = &dpio_dev->eqresp_meta[
1233 dpio_dev->eqresp_pi];
1234 eqresp_meta->dpaa2_q = dpaa2_q;
1235 eqresp_meta->mp = m->pool;
1237 dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1238 dpio_dev->eqresp_pi++ :
1239 (dpio_dev->eqresp_pi = 0);
1241 qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1244 dq_idx = m->seqn - 1;
1245 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1246 DPAA2_PER_LCORE_DQRR_SIZE--;
1247 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1249 m->seqn = DPAA2_INVALID_MBUF_SEQN;
1252 /* Callback to handle sending ordered packets through WRIOP based interface */
1254 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1256 /* Function to transmit the frames to given device and VQ*/
1257 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1258 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1259 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1260 struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1261 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1262 struct rte_mbuf *mi;
1263 struct rte_mempool *mp;
1264 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1265 struct qbman_swp *swp;
1266 uint32_t frames_to_send, num_free_eq_desc;
1267 uint32_t loop, retry_count;
1269 uint16_t num_tx = 0;
1272 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1273 ret = dpaa2_affine_qbman_swp();
1275 DPAA2_PMD_ERR("Failure in affining portal");
1279 swp = DPAA2_PER_LCORE_PORTAL;
1281 DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1282 eth_data, dpaa2_q->fqid);
1284 /* This would also handle normal and atomic queues as any type
1285 * of packet can be enqueued when ordered queues are being used.
1288 /*Check if the queue is congested*/
1290 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1292 /* Retry for some time before giving up */
1293 if (retry_count > CONG_RETRY_COUNT)
1297 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1298 dpaa2_eqcr_size : nb_pkts;
1300 if (!priv->en_loose_ordered) {
1301 if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
1302 num_free_eq_desc = dpaa2_free_eq_descriptors();
1303 if (num_free_eq_desc < frames_to_send)
1304 frames_to_send = num_free_eq_desc;
1308 for (loop = 0; loop < frames_to_send; loop++) {
1309 /*Prepare enqueue descriptor*/
1310 qbman_eq_desc_clear(&eqdesc[loop]);
1312 if ((*bufs)->seqn) {
1313 /* Use only queue 0 for Tx in case of atomic/
1314 * ordered packets as packets can get unordered
1315 * when being tranmitted out from the interface
1317 dpaa2_set_enqueue_descriptor(order_sendq,
1321 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1322 DPAA2_EQ_RESP_ERR_FQ);
1323 qbman_eq_desc_set_fq(&eqdesc[loop],
1327 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1329 /* Check the basic scenario and set
1330 * the FD appropriately here itself.
1332 if (likely(mp && mp->ops_index ==
1333 priv->bp_list->dpaa2_ops_index &&
1334 (*bufs)->nb_segs == 1 &&
1335 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1336 if (unlikely((*bufs)->ol_flags
1337 & PKT_TX_VLAN_PKT)) {
1338 ret = rte_vlan_insert(bufs);
1342 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1344 mempool_to_bpid(mp));
1349 mi = rte_mbuf_from_indirect(*bufs);
1352 /* Not a hw_pkt pool allocated frame */
1353 if (unlikely(!mp || !priv->bp_list)) {
1354 DPAA2_PMD_ERR("Err: No buffer pool attached");
1358 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1359 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1360 /* alloc should be from the default buffer pool
1361 * attached to this interface
1363 bpid = priv->bp_list->buf_pool.bpid;
1365 if (unlikely((*bufs)->nb_segs > 1)) {
1367 "S/G not supp for non hw offload buffer");
1370 if (eth_copy_mbuf_to_fd(*bufs,
1371 &fd_arr[loop], bpid)) {
1374 /* free the original packet */
1375 rte_pktmbuf_free(*bufs);
1377 bpid = mempool_to_bpid(mp);
1378 if (unlikely((*bufs)->nb_segs > 1)) {
1379 if (eth_mbuf_to_sg_fd(*bufs,
1384 eth_mbuf_to_fd(*bufs,
1385 &fd_arr[loop], bpid);
1393 while (loop < frames_to_send) {
1394 ret = qbman_swp_enqueue_multiple_desc(swp,
1395 &eqdesc[loop], &fd_arr[loop],
1396 frames_to_send - loop);
1397 if (unlikely(ret < 0)) {
1399 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1413 dpaa2_q->tx_pkts += num_tx;
1417 /* send any already prepared fd */
1423 ret = qbman_swp_enqueue_multiple_desc(swp,
1424 &eqdesc[loop], &fd_arr[i], loop - i);
1425 if (unlikely(ret < 0)) {
1427 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1437 dpaa2_q->tx_pkts += num_tx;
1442 * Dummy DPDK callback for TX.
1444 * This function is used to temporarily replace the real callback during
1445 * unsafe control operations on the queue, or in case of error.
1448 * Generic pointer to TX queue structure.
1450 * Packets to transmit.
1452 * Number of packets in array.
1455 * Number of packets successfully transmitted (<= pkts_n).
1458 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1466 #if defined(RTE_TOOLCHAIN_GCC)
1467 #pragma GCC diagnostic push
1468 #pragma GCC diagnostic ignored "-Wcast-qual"
1469 #elif defined(RTE_TOOLCHAIN_CLANG)
1470 #pragma clang diagnostic push
1471 #pragma clang diagnostic ignored "-Wcast-qual"
1474 /* This function loopbacks all the received packets.*/
1476 dpaa2_dev_loopback_rx(void *queue,
1477 struct rte_mbuf **bufs __rte_unused,
1480 /* Function receive frames for a given device and VQ*/
1481 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1482 struct qbman_result *dq_storage, *dq_storage1 = NULL;
1483 uint32_t fqid = dpaa2_q->fqid;
1484 int ret, num_rx = 0, num_tx = 0, pull_size;
1485 uint8_t pending, status;
1486 struct qbman_swp *swp;
1487 struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1488 struct qbman_pull_desc pulldesc;
1489 struct qbman_eq_desc eqdesc;
1490 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1491 struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1492 struct dpaa2_dev_priv *priv = eth_data->dev_private;
1493 struct dpaa2_queue *tx_q = priv->tx_vq[0];
1494 /* todo - currently we are using 1st TX queue only for loopback*/
1496 if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1497 ret = dpaa2_affine_qbman_ethrx_swp();
1499 DPAA2_PMD_ERR("Failure in affining portal");
1503 swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1504 pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1505 if (unlikely(!q_storage->active_dqs)) {
1506 q_storage->toggle = 0;
1507 dq_storage = q_storage->dq_storage[q_storage->toggle];
1508 q_storage->last_num_pkts = pull_size;
1509 qbman_pull_desc_clear(&pulldesc);
1510 qbman_pull_desc_set_numframes(&pulldesc,
1511 q_storage->last_num_pkts);
1512 qbman_pull_desc_set_fq(&pulldesc, fqid);
1513 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1514 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1515 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1516 while (!qbman_check_command_complete(
1518 DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1520 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1523 if (qbman_swp_pull(swp, &pulldesc)) {
1525 "VDQ command not issued.QBMAN busy\n");
1526 /* Portal was busy, try again */
1531 q_storage->active_dqs = dq_storage;
1532 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1533 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1537 dq_storage = q_storage->active_dqs;
1538 rte_prefetch0((void *)(size_t)(dq_storage));
1539 rte_prefetch0((void *)(size_t)(dq_storage + 1));
1541 /* Prepare next pull descriptor. This will give space for the
1542 * prefething done on DQRR entries
1544 q_storage->toggle ^= 1;
1545 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1546 qbman_pull_desc_clear(&pulldesc);
1547 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1548 qbman_pull_desc_set_fq(&pulldesc, fqid);
1549 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1550 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1552 /*Prepare enqueue descriptor*/
1553 qbman_eq_desc_clear(&eqdesc);
1554 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1555 qbman_eq_desc_set_response(&eqdesc, 0, 0);
1556 qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1558 /* Check if the previous issued command is completed.
1559 * Also seems like the SWP is shared between the Ethernet Driver
1560 * and the SEC driver.
1562 while (!qbman_check_command_complete(dq_storage))
1564 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1565 clear_swp_active_dqs(q_storage->active_dpio_id);
1570 /* Loop until the dq_storage is updated with
1571 * new token by QBMAN
1573 while (!qbman_check_new_result(dq_storage))
1575 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1576 /* Check whether Last Pull command is Expired and
1577 * setting Condition for Loop termination
1579 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1581 /* Check for valid frame. */
1582 status = qbman_result_DQ_flags(dq_storage);
1583 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1586 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1592 while (num_tx < num_rx) {
1593 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1594 &fd[num_tx], 0, num_rx - num_tx);
1597 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1598 while (!qbman_check_command_complete(
1599 get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1601 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1603 /* issue a volatile dequeue command for next pull */
1605 if (qbman_swp_pull(swp, &pulldesc)) {
1606 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1607 "QBMAN is busy (2)\n");
1612 q_storage->active_dqs = dq_storage1;
1613 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1614 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1616 dpaa2_q->rx_pkts += num_rx;
1617 dpaa2_q->tx_pkts += num_tx;
1621 #if defined(RTE_TOOLCHAIN_GCC)
1622 #pragma GCC diagnostic pop
1623 #elif defined(RTE_TOOLCHAIN_CLANG)
1624 #pragma clang diagnostic pop