1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2017,2019 NXP
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_malloc.h>
36 #include <rte_eventdev.h>
38 #include "dpaa_ethdev.h"
39 #include "dpaa_rxtx.h"
40 #include <rte_dpaa_bus.h>
41 #include <dpaa_mempool.h>
50 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
53 (_fd)->opaque_addr = 0; \
54 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
55 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
56 (_fd)->opaque |= (_mbuf)->pkt_len; \
57 (_fd)->addr = (_mbuf)->buf_iova; \
58 (_fd)->bpid = _bpid; \
61 #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
62 static void dpaa_display_frame(const struct qm_fd *fd)
67 printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
68 __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
69 fd->offset, fd->length20, fd->status);
71 ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
73 printf("%02x ", *ptr);
74 for (ii = 1; ii < fd->length20; ii++) {
75 printf("%02x ", *ptr);
83 #define dpaa_display_frame(a)
86 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
87 uint64_t prs __rte_unused)
89 DPAA_DP_LOG(DEBUG, "Slow parsing");
90 /*TBD:XXX: to be implemented*/
93 static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
95 struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
96 uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
98 DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
101 case DPAA_PKT_TYPE_IPV4:
102 m->packet_type = RTE_PTYPE_L2_ETHER |
105 case DPAA_PKT_TYPE_IPV6:
106 m->packet_type = RTE_PTYPE_L2_ETHER |
109 case DPAA_PKT_TYPE_ETHER:
110 m->packet_type = RTE_PTYPE_L2_ETHER;
112 case DPAA_PKT_TYPE_IPV4_FRAG:
113 case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
114 case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
115 case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
116 m->packet_type = RTE_PTYPE_L2_ETHER |
117 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
119 case DPAA_PKT_TYPE_IPV6_FRAG:
120 case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
121 case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
122 case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
123 m->packet_type = RTE_PTYPE_L2_ETHER |
124 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
126 case DPAA_PKT_TYPE_IPV4_EXT:
127 m->packet_type = RTE_PTYPE_L2_ETHER |
128 RTE_PTYPE_L3_IPV4_EXT;
130 case DPAA_PKT_TYPE_IPV6_EXT:
131 m->packet_type = RTE_PTYPE_L2_ETHER |
132 RTE_PTYPE_L3_IPV6_EXT;
134 case DPAA_PKT_TYPE_IPV4_TCP:
135 m->packet_type = RTE_PTYPE_L2_ETHER |
136 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
138 case DPAA_PKT_TYPE_IPV6_TCP:
139 m->packet_type = RTE_PTYPE_L2_ETHER |
140 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
142 case DPAA_PKT_TYPE_IPV4_UDP:
143 m->packet_type = RTE_PTYPE_L2_ETHER |
144 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
146 case DPAA_PKT_TYPE_IPV6_UDP:
147 m->packet_type = RTE_PTYPE_L2_ETHER |
148 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
150 case DPAA_PKT_TYPE_IPV4_EXT_UDP:
151 m->packet_type = RTE_PTYPE_L2_ETHER |
152 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
154 case DPAA_PKT_TYPE_IPV6_EXT_UDP:
155 m->packet_type = RTE_PTYPE_L2_ETHER |
156 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
158 case DPAA_PKT_TYPE_IPV4_EXT_TCP:
159 m->packet_type = RTE_PTYPE_L2_ETHER |
160 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
162 case DPAA_PKT_TYPE_IPV6_EXT_TCP:
163 m->packet_type = RTE_PTYPE_L2_ETHER |
164 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
166 case DPAA_PKT_TYPE_IPV4_SCTP:
167 m->packet_type = RTE_PTYPE_L2_ETHER |
168 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
170 case DPAA_PKT_TYPE_IPV6_SCTP:
171 m->packet_type = RTE_PTYPE_L2_ETHER |
172 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
174 case DPAA_PKT_TYPE_NONE:
177 /* More switch cases can be added */
179 dpaa_slow_parsing(m, prs);
182 m->tx_offload = annot->parse.ip_off[0];
183 m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
184 << DPAA_PKT_L3_LEN_SHIFT;
186 /* Set the hash values */
187 m->hash.rss = (uint32_t)(annot->hash);
188 /* All packets with Bad checksum are dropped by interface (and
189 * corresponding notification issued to RX error queues).
191 m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD;
193 /* Check if Vlan is present */
194 if (prs & DPAA_PARSE_VLAN_MASK)
195 m->ol_flags |= PKT_RX_VLAN;
196 /* Packet received without stripping the vlan */
199 static inline void dpaa_checksum(struct rte_mbuf *mbuf)
201 struct rte_ether_hdr *eth_hdr =
202 rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
203 char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
204 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
205 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
207 DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
209 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
210 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
211 RTE_PTYPE_L3_IPV4_EXT)) {
212 ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
213 ipv4_hdr->hdr_checksum = 0;
214 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
215 } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
216 RTE_PTYPE_L3_IPV6) ||
217 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
218 RTE_PTYPE_L3_IPV6_EXT))
219 ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
221 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
222 struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
225 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
226 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
228 else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
229 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
231 } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
233 struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
235 udp_hdr->dgram_cksum = 0;
236 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPv4))
237 udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
239 else /* assume ethertype == RTE_ETHER_TYPE_IPv6 */
240 udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
245 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
246 struct qm_fd *fd, char *prs_buf)
248 struct dpaa_eth_parse_results_t *prs;
250 DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
252 prs = GET_TX_PRS(prs_buf);
255 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
256 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
257 RTE_PTYPE_L3_IPV4_EXT))
258 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
259 else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
260 RTE_PTYPE_L3_IPV6) ||
261 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
262 RTE_PTYPE_L3_IPV6_EXT))
263 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
265 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
266 prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
267 else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
268 prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
270 prs->ip_off[0] = mbuf->l2_len;
271 prs->l4_off = mbuf->l3_len + mbuf->l2_len;
272 /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
273 fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
277 dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
279 if (!mbuf->packet_type) {
280 struct rte_net_hdr_lens hdr_lens;
282 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
283 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
284 | RTE_PTYPE_L4_MASK);
285 mbuf->l2_len = hdr_lens.l2_len;
286 mbuf->l3_len = hdr_lens.l3_len;
288 if (mbuf->data_off < (DEFAULT_TX_ICEOF +
289 sizeof(struct dpaa_eth_parse_results_t))) {
290 DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
291 "Not enough Headroom "
292 "space for correct Checksum offload."
293 "So Calculating checksum in Software.");
296 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
301 dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
303 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
304 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
305 struct qm_sg_entry *sgt, *sg_temp;
306 void *vaddr, *sg_vaddr;
308 uint8_t fd_offset = fd->offset;
310 vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
312 DPAA_PMD_ERR("unable to convert physical address");
315 sgt = vaddr + fd_offset;
317 hw_sg_to_cpu(sg_temp);
318 temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
319 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
321 first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
322 bp_info->meta_data_size);
323 first_seg->data_off = sg_temp->offset;
324 first_seg->data_len = sg_temp->length;
325 first_seg->pkt_len = sg_temp->length;
326 rte_mbuf_refcnt_set(first_seg, 1);
328 first_seg->port = ifid;
329 first_seg->nb_segs = 1;
330 first_seg->ol_flags = 0;
331 prev_seg = first_seg;
332 while (i < DPAA_SGT_MAX_ENTRIES) {
334 hw_sg_to_cpu(sg_temp);
335 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
336 qm_sg_entry_get64(sg_temp));
337 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
338 bp_info->meta_data_size);
339 cur_seg->data_off = sg_temp->offset;
340 cur_seg->data_len = sg_temp->length;
341 first_seg->pkt_len += sg_temp->length;
342 first_seg->nb_segs += 1;
343 rte_mbuf_refcnt_set(cur_seg, 1);
344 prev_seg->next = cur_seg;
345 if (sg_temp->final) {
346 cur_seg->next = NULL;
351 DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
352 first_seg->pkt_len, first_seg->nb_segs);
354 dpaa_eth_packet_info(first_seg, vaddr);
355 rte_pktmbuf_free_seg(temp);
360 static inline struct rte_mbuf *
361 dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
363 struct rte_mbuf *mbuf;
364 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
367 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
371 if (unlikely(format == qm_fd_sg))
372 return dpaa_eth_sg_to_mbuf(fd, ifid);
374 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
375 length = fd->opaque & DPAA_FD_LENGTH_MASK;
377 DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
379 /* Ignoring case when format != qm_fd_contig */
380 dpaa_display_frame(fd);
381 ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
383 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
384 /* Prefetch the Parse results and packet data to L1 */
385 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
387 mbuf->data_off = offset;
388 mbuf->data_len = length;
389 mbuf->pkt_len = length;
395 rte_mbuf_refcnt_set(mbuf, 1);
396 dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
401 /* Specific for LS1043 */
403 dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
404 void **bufs, int num_bufs)
406 struct rte_mbuf *mbuf;
407 struct dpaa_bp_info *bp_info;
408 const struct qm_fd *fd;
410 struct dpaa_if *dpaa_intf;
415 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
416 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
417 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
418 bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
420 for (i = 0; i < num_bufs; i++) {
421 if (i < num_bufs - 1) {
422 bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
423 ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
424 rte_prefetch0((void *)((uint8_t *)ptr +
426 bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
427 bp_info->meta_data_size);
431 dpaa_intf = fq[0]->dpaa_intf;
433 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
434 DPAA_FD_FORMAT_SHIFT;
435 if (unlikely(format == qm_fd_sg)) {
436 bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
440 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
441 DPAA_FD_OFFSET_SHIFT;
442 length = fd->opaque & DPAA_FD_LENGTH_MASK;
445 mbuf->data_off = offset;
446 mbuf->data_len = length;
447 mbuf->pkt_len = length;
448 mbuf->port = dpaa_intf->ifid;
453 rte_mbuf_refcnt_set(mbuf, 1);
454 dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
459 dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
460 void **bufs, int num_bufs)
462 struct rte_mbuf *mbuf;
463 const struct qm_fd *fd;
464 struct dpaa_if *dpaa_intf;
469 for (i = 0; i < num_bufs; i++) {
471 dpaa_intf = fq[0]->dpaa_intf;
473 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
474 DPAA_FD_FORMAT_SHIFT;
475 if (unlikely(format == qm_fd_sg)) {
476 bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
480 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
481 DPAA_FD_OFFSET_SHIFT;
482 length = fd->opaque & DPAA_FD_LENGTH_MASK;
485 mbuf->data_off = offset;
486 mbuf->data_len = length;
487 mbuf->pkt_len = length;
488 mbuf->port = dpaa_intf->ifid;
493 rte_mbuf_refcnt_set(mbuf, 1);
494 dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
498 void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
500 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
501 void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
503 /* In case of LS1046, annotation stashing is disabled due to L2 cache
504 * being bottleneck in case of multicore scanario for this platform.
505 * So we prefetch the annoation beforehand, so that it is available
506 * in cache when accessed.
508 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
510 *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
514 dpaa_eth_queue_portal_rx(struct qman_fq *fq,
515 struct rte_mbuf **bufs,
520 if (unlikely(fq->qp == NULL)) {
521 ret = rte_dpaa_portal_fq_init((void *)0, fq);
523 DPAA_PMD_ERR("Failure in affining portal %d", ret);
528 return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
531 enum qman_cb_dqrr_result
532 dpaa_rx_cb_parallel(void *event,
533 struct qman_portal *qm __always_unused,
535 const struct qm_dqrr_entry *dqrr,
538 u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
539 struct rte_mbuf *mbuf;
540 struct rte_event *ev = (struct rte_event *)event;
542 mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
543 ev->event_ptr = (void *)mbuf;
544 ev->flow_id = fq->ev.flow_id;
545 ev->sub_event_type = fq->ev.sub_event_type;
546 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
547 ev->op = RTE_EVENT_OP_NEW;
548 ev->sched_type = fq->ev.sched_type;
549 ev->queue_id = fq->ev.queue_id;
550 ev->priority = fq->ev.priority;
551 ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
552 mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
555 return qman_cb_dqrr_consume;
558 enum qman_cb_dqrr_result
559 dpaa_rx_cb_atomic(void *event,
560 struct qman_portal *qm __always_unused,
562 const struct qm_dqrr_entry *dqrr,
566 u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
567 struct rte_mbuf *mbuf;
568 struct rte_event *ev = (struct rte_event *)event;
570 mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
571 ev->event_ptr = (void *)mbuf;
572 ev->flow_id = fq->ev.flow_id;
573 ev->sub_event_type = fq->ev.sub_event_type;
574 ev->event_type = RTE_EVENT_TYPE_ETHDEV;
575 ev->op = RTE_EVENT_OP_NEW;
576 ev->sched_type = fq->ev.sched_type;
577 ev->queue_id = fq->ev.queue_id;
578 ev->priority = fq->ev.priority;
580 /* Save active dqrr entries */
581 index = DQRR_PTR2IDX(dqrr);
582 DPAA_PER_LCORE_DQRR_SIZE++;
583 DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
584 DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
585 ev->impl_opaque = index + 1;
586 mbuf->seqn = (uint32_t)index + 1;
589 return qman_cb_dqrr_defer;
592 uint16_t dpaa_eth_queue_rx(void *q,
593 struct rte_mbuf **bufs,
596 struct qman_fq *fq = q;
597 struct qm_dqrr_entry *dq;
598 uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
599 int num_rx_bufs, ret;
600 uint32_t vdqcr_flags = 0;
602 if (unlikely(rte_dpaa_bpid_info == NULL &&
603 rte_eal_process_type() == RTE_PROC_SECONDARY))
604 rte_dpaa_bpid_info = fq->bp_array;
606 if (likely(fq->is_static))
607 return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
609 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
610 ret = rte_dpaa_portal_init((void *)0);
612 DPAA_PMD_ERR("Failure in affining portal");
617 /* Until request for four buffers, we provide exact number of buffers.
618 * Otherwise we do not set the QM_VDQCR_EXACT flag.
619 * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
620 * requested, so we request two less in this case.
623 vdqcr_flags = QM_VDQCR_EXACT;
624 num_rx_bufs = nb_bufs;
626 num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
627 (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
629 ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
634 dq = qman_dequeue(fq);
637 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
638 qman_dqrr_consume(fq, dq);
639 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
644 static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
648 struct bm_buffer bufs;
650 ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
652 DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
656 DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d",
657 (uint64_t)bufs.addr, bufs.bpid);
659 buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
660 - bp_info->meta_data_size;
668 static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
669 struct dpaa_if *dpaa_intf)
671 struct rte_mbuf *dpaa_mbuf;
673 /* allocate pktbuffer on bpid for dpaa port */
674 dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
678 memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + RTE_PKTMBUF_HEADROOM, (void *)
679 ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
681 /* Copy only the required fields */
682 dpaa_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
683 dpaa_mbuf->pkt_len = mbuf->pkt_len;
684 dpaa_mbuf->ol_flags = mbuf->ol_flags;
685 dpaa_mbuf->packet_type = mbuf->packet_type;
686 dpaa_mbuf->tx_offload = mbuf->tx_offload;
687 rte_pktmbuf_free(mbuf);
692 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
696 struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
697 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
698 struct rte_mbuf *temp, *mi;
699 struct qm_sg_entry *sg_temp, *sgt;
702 DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
704 temp = rte_pktmbuf_alloc(bp_info->mp);
706 DPAA_PMD_ERR("Failure in allocation of mbuf");
709 if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
711 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
718 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
719 if (!mbuf->packet_type) {
720 struct rte_net_hdr_lens hdr_lens;
722 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
723 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
724 | RTE_PTYPE_L4_MASK);
725 mbuf->l2_len = hdr_lens.l2_len;
726 mbuf->l3_len = hdr_lens.l3_len;
728 if (temp->data_off < DEFAULT_TX_ICEOF
729 + sizeof(struct dpaa_eth_parse_results_t))
730 temp->data_off = DEFAULT_TX_ICEOF
731 + sizeof(struct dpaa_eth_parse_results_t);
732 dcbz_64(temp->buf_addr);
733 dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
736 sgt = temp->buf_addr + temp->data_off;
737 fd->format = QM_FD_SG;
738 fd->addr = temp->buf_iova;
739 fd->offset = temp->data_off;
741 fd->length20 = mbuf->pkt_len;
743 while (i < DPAA_SGT_MAX_ENTRIES) {
747 sg_temp->addr = cur_seg->buf_iova;
748 sg_temp->offset = cur_seg->data_off;
749 sg_temp->length = cur_seg->data_len;
750 if (RTE_MBUF_DIRECT(cur_seg)) {
751 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
752 /*If refcnt > 1, invalid bpid is set to ensure
753 * buffer is not freed by HW.
755 sg_temp->bpid = 0xff;
756 rte_mbuf_refcnt_update(cur_seg, -1);
759 DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
761 cur_seg = cur_seg->next;
763 /* Get owner MBUF from indirect buffer */
764 mi = rte_mbuf_from_indirect(cur_seg);
765 if (rte_mbuf_refcnt_read(mi) > 1) {
766 /*If refcnt > 1, invalid bpid is set to ensure
767 * owner buffer is not freed by HW.
769 sg_temp->bpid = 0xff;
771 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
772 rte_mbuf_refcnt_update(mi, 1);
775 cur_seg = cur_seg->next;
776 prev_seg->next = NULL;
777 rte_pktmbuf_free(prev_seg);
779 if (cur_seg == NULL) {
781 cpu_to_hw_sg(sg_temp);
784 cpu_to_hw_sg(sg_temp);
789 /* Handle mbufs which are not segmented (non SG) */
791 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
792 struct dpaa_bp_info *bp_info,
793 struct qm_fd *fd_arr)
795 struct rte_mbuf *mi = NULL;
797 if (RTE_MBUF_DIRECT(mbuf)) {
798 if (rte_mbuf_refcnt_read(mbuf) > 1) {
799 /* In case of direct mbuf and mbuf being cloned,
800 * BMAN should _not_ release buffer.
802 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
803 /* Buffer should be releasd by EAL */
804 rte_mbuf_refcnt_update(mbuf, -1);
806 /* In case of direct mbuf and no cloning, mbuf can be
809 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
812 /* This is data-containing core mbuf: 'mi' */
813 mi = rte_mbuf_from_indirect(mbuf);
814 if (rte_mbuf_refcnt_read(mi) > 1) {
815 /* In case of indirect mbuf, and mbuf being cloned,
816 * BMAN should _not_ release it and let EAL release
817 * it through pktmbuf_free below.
819 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
821 /* In case of indirect mbuf, and no cloning, core mbuf
822 * should be released by BMAN.
823 * Increate refcnt of core mbuf so that when
824 * pktmbuf_free is called and mbuf is released, EAL
825 * doesn't try to release core mbuf which would have
826 * been released by BMAN.
828 rte_mbuf_refcnt_update(mi, 1);
829 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
831 rte_pktmbuf_free(mbuf);
834 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
835 dpaa_unsegmented_checksum(mbuf, fd_arr);
838 /* Handle all mbufs on dpaa BMAN managed pool */
839 static inline uint16_t
840 tx_on_dpaa_pool(struct rte_mbuf *mbuf,
841 struct dpaa_bp_info *bp_info,
842 struct qm_fd *fd_arr)
844 DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
846 if (mbuf->nb_segs == 1) {
847 /* Case for non-segmented buffers */
848 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
849 } else if (mbuf->nb_segs > 1 &&
850 mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
851 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
852 DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
856 DPAA_PMD_DEBUG("Number of Segments not supported");
863 /* Handle all mbufs on an external pool (non-dpaa) */
864 static inline uint16_t
865 tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
866 struct qm_fd *fd_arr)
868 struct dpaa_if *dpaa_intf = txq->dpaa_intf;
869 struct rte_mbuf *dmable_mbuf;
871 DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
872 "Allocating an offloaded buffer");
873 dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
875 DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
879 DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid);
880 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
881 dpaa_unsegmented_checksum(mbuf, fd_arr);
887 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
889 struct rte_mbuf *mbuf, *mi = NULL;
890 struct rte_mempool *mp;
891 struct dpaa_bp_info *bp_info;
892 struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
893 uint32_t frames_to_send, loop, sent = 0;
896 uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
898 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
899 ret = rte_dpaa_portal_init((void *)0);
901 DPAA_PMD_ERR("Failure in affining portal");
906 DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
909 frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
910 DPAA_TX_BURST_SIZE : nb_bufs;
911 for (loop = 0; loop < frames_to_send; loop++) {
914 if (seqn != DPAA_INVALID_MBUF_SEQN) {
916 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
918 ((index & QM_EQCR_DCA_IDXMASK) << 8);
919 flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
920 DPAA_PER_LCORE_DQRR_SIZE--;
921 DPAA_PER_LCORE_DQRR_HELD &=
926 if (likely(RTE_MBUF_DIRECT(mbuf))) {
928 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
929 if (likely(mp->ops_index ==
930 bp_info->dpaa_ops_index &&
931 mbuf->nb_segs == 1 &&
932 rte_mbuf_refcnt_read(mbuf) == 1)) {
933 DPAA_MBUF_TO_CONTIG_FD(mbuf,
934 &fd_arr[loop], bp_info->bpid);
936 DPAA_TX_CKSUM_OFFLOAD_MASK)
937 dpaa_unsegmented_checksum(mbuf,
942 mi = rte_mbuf_from_indirect(mbuf);
946 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
947 if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
948 state = tx_on_dpaa_pool(mbuf, bp_info,
950 if (unlikely(state)) {
951 /* Set frames_to_send & nb_bufs so
952 * that packets are transmitted till
955 frames_to_send = loop;
960 state = tx_on_external_pool(q, mbuf,
962 if (unlikely(state)) {
963 /* Set frames_to_send & nb_bufs so
964 * that packets are transmitted till
967 frames_to_send = loop;
976 while (loop < frames_to_send) {
977 loop += qman_enqueue_multi(q, &fd_arr[loop],
979 frames_to_send - loop);
981 nb_bufs -= frames_to_send;
982 sent += frames_to_send;
985 DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
990 uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
991 struct rte_mbuf **bufs __rte_unused,
992 uint16_t nb_bufs __rte_unused)
994 DPAA_DP_LOG(DEBUG, "Drop all packets");
996 /* Drop all incoming packets. No need to free packets here
997 * because the rte_eth f/w frees up the packets through tx_buffer
998 * callback in case this functions returns count less than nb_bufs