1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev.h>
30 #include <rte_malloc.h>
37 #include "dpaa_ethdev.h"
38 #include "dpaa_rxtx.h"
39 #include <rte_dpaa_bus.h>
40 #include <dpaa_mempool.h>
48 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
51 (_fd)->opaque_addr = 0; \
52 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
53 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
54 (_fd)->opaque |= (_mbuf)->pkt_len; \
55 (_fd)->addr = (_mbuf)->buf_iova; \
56 (_fd)->bpid = _bpid; \
59 #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
60 void dpaa_display_frame(const struct qm_fd *fd)
65 printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
66 __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
67 fd->offset, fd->length20, fd->status);
69 ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
71 printf("%02x ", *ptr);
72 for (ii = 1; ii < fd->length20; ii++) {
73 printf("%02x ", *ptr);
81 #define dpaa_display_frame(a)
84 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
85 uint64_t prs __rte_unused)
87 DPAA_DP_LOG(DEBUG, "Slow parsing");
88 /*TBD:XXX: to be implemented*/
91 static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
92 uint64_t fd_virt_addr)
94 struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
95 uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
97 DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
100 case DPAA_PKT_TYPE_NONE:
103 case DPAA_PKT_TYPE_ETHER:
104 m->packet_type = RTE_PTYPE_L2_ETHER;
106 case DPAA_PKT_TYPE_IPV4:
107 m->packet_type = RTE_PTYPE_L2_ETHER |
110 case DPAA_PKT_TYPE_IPV6:
111 m->packet_type = RTE_PTYPE_L2_ETHER |
114 case DPAA_PKT_TYPE_IPV4_FRAG:
115 case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
116 case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
117 case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
118 m->packet_type = RTE_PTYPE_L2_ETHER |
119 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
121 case DPAA_PKT_TYPE_IPV6_FRAG:
122 case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
123 case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
124 case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
125 m->packet_type = RTE_PTYPE_L2_ETHER |
126 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
128 case DPAA_PKT_TYPE_IPV4_EXT:
129 m->packet_type = RTE_PTYPE_L2_ETHER |
130 RTE_PTYPE_L3_IPV4_EXT;
132 case DPAA_PKT_TYPE_IPV6_EXT:
133 m->packet_type = RTE_PTYPE_L2_ETHER |
134 RTE_PTYPE_L3_IPV6_EXT;
136 case DPAA_PKT_TYPE_IPV4_TCP:
137 m->packet_type = RTE_PTYPE_L2_ETHER |
138 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
140 case DPAA_PKT_TYPE_IPV6_TCP:
141 m->packet_type = RTE_PTYPE_L2_ETHER |
142 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
144 case DPAA_PKT_TYPE_IPV4_UDP:
145 m->packet_type = RTE_PTYPE_L2_ETHER |
146 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
148 case DPAA_PKT_TYPE_IPV6_UDP:
149 m->packet_type = RTE_PTYPE_L2_ETHER |
150 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
152 case DPAA_PKT_TYPE_IPV4_EXT_UDP:
153 m->packet_type = RTE_PTYPE_L2_ETHER |
154 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
156 case DPAA_PKT_TYPE_IPV6_EXT_UDP:
157 m->packet_type = RTE_PTYPE_L2_ETHER |
158 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
160 case DPAA_PKT_TYPE_IPV4_EXT_TCP:
161 m->packet_type = RTE_PTYPE_L2_ETHER |
162 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
164 case DPAA_PKT_TYPE_IPV6_EXT_TCP:
165 m->packet_type = RTE_PTYPE_L2_ETHER |
166 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
168 case DPAA_PKT_TYPE_IPV4_SCTP:
169 m->packet_type = RTE_PTYPE_L2_ETHER |
170 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
172 case DPAA_PKT_TYPE_IPV6_SCTP:
173 m->packet_type = RTE_PTYPE_L2_ETHER |
174 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
176 /* More switch cases can be added */
178 dpaa_slow_parsing(m, prs);
181 m->tx_offload = annot->parse.ip_off[0];
182 m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
183 << DPAA_PKT_L3_LEN_SHIFT;
185 /* Set the hash values */
186 m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
187 m->ol_flags = PKT_RX_RSS_HASH;
188 /* All packets with Bad checksum are dropped by interface (and
189 * corresponding notification issued to RX error queues).
191 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
193 /* Check if Vlan is present */
194 if (prs & DPAA_PARSE_VLAN_MASK)
195 m->ol_flags |= PKT_RX_VLAN;
196 /* Packet received without stripping the vlan */
199 static inline void dpaa_checksum(struct rte_mbuf *mbuf)
201 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
202 char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
203 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
204 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
206 DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
208 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
209 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
210 RTE_PTYPE_L3_IPV4_EXT)) {
211 ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
212 ipv4_hdr->hdr_checksum = 0;
213 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
214 } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
215 RTE_PTYPE_L3_IPV6) ||
216 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
217 RTE_PTYPE_L3_IPV6_EXT))
218 ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
220 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
221 struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
224 if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
225 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
227 else /* assume ethertype == ETHER_TYPE_IPv6 */
228 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
230 } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
232 struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
234 udp_hdr->dgram_cksum = 0;
235 if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
236 udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
238 else /* assume ethertype == ETHER_TYPE_IPv6 */
239 udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
244 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
245 struct qm_fd *fd, char *prs_buf)
247 struct dpaa_eth_parse_results_t *prs;
249 DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
251 prs = GET_TX_PRS(prs_buf);
254 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
255 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
256 RTE_PTYPE_L3_IPV4_EXT))
257 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
258 else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
259 RTE_PTYPE_L3_IPV6) ||
260 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
261 RTE_PTYPE_L3_IPV6_EXT))
262 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
264 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
265 prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
266 else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
267 prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
269 prs->ip_off[0] = mbuf->l2_len;
270 prs->l4_off = mbuf->l3_len + mbuf->l2_len;
271 /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
272 fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
276 dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
278 if (!mbuf->packet_type) {
279 struct rte_net_hdr_lens hdr_lens;
281 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
282 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
283 | RTE_PTYPE_L4_MASK);
284 mbuf->l2_len = hdr_lens.l2_len;
285 mbuf->l3_len = hdr_lens.l3_len;
287 if (mbuf->data_off < (DEFAULT_TX_ICEOF +
288 sizeof(struct dpaa_eth_parse_results_t))) {
289 DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
290 "Not enough Headroom "
291 "space for correct Checksum offload."
292 "So Calculating checksum in Software.");
295 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
300 dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
302 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
303 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
304 struct qm_sg_entry *sgt, *sg_temp;
305 void *vaddr, *sg_vaddr;
307 uint8_t fd_offset = fd->offset;
309 DPAA_DP_LOG(DEBUG, "Received an SG frame");
311 vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
313 DPAA_PMD_ERR("unable to convert physical address");
316 sgt = vaddr + fd_offset;
318 hw_sg_to_cpu(sg_temp);
319 temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
320 sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
322 first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
323 bp_info->meta_data_size);
324 first_seg->data_off = sg_temp->offset;
325 first_seg->data_len = sg_temp->length;
326 first_seg->pkt_len = sg_temp->length;
327 rte_mbuf_refcnt_set(first_seg, 1);
329 first_seg->port = ifid;
330 first_seg->nb_segs = 1;
331 first_seg->ol_flags = 0;
332 prev_seg = first_seg;
333 while (i < DPAA_SGT_MAX_ENTRIES) {
335 hw_sg_to_cpu(sg_temp);
336 sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
337 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
338 bp_info->meta_data_size);
339 cur_seg->data_off = sg_temp->offset;
340 cur_seg->data_len = sg_temp->length;
341 first_seg->pkt_len += sg_temp->length;
342 first_seg->nb_segs += 1;
343 rte_mbuf_refcnt_set(cur_seg, 1);
344 prev_seg->next = cur_seg;
345 if (sg_temp->final) {
346 cur_seg->next = NULL;
352 dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
353 rte_pktmbuf_free_seg(temp);
358 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
361 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
362 struct rte_mbuf *mbuf;
365 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
367 (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
368 uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
370 DPAA_DP_LOG(DEBUG, " FD--->MBUF");
372 if (unlikely(format == qm_fd_sg))
373 return dpaa_eth_sg_to_mbuf(fd, ifid);
375 /* Ignoring case when format != qm_fd_contig */
376 dpaa_display_frame(fd);
377 ptr = rte_dpaa_mem_ptov(fd->addr);
378 /* Ignoring case when ptr would be NULL. That is only possible incase
379 * of a corrupted packet
382 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
383 /* Prefetch the Parse results and packet data to L1 */
384 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
385 rte_prefetch0((void *)((uint8_t *)ptr + offset));
387 mbuf->data_off = offset;
388 mbuf->data_len = length;
389 mbuf->pkt_len = length;
395 rte_mbuf_refcnt_set(mbuf, 1);
396 dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
401 uint16_t dpaa_eth_queue_rx(void *q,
402 struct rte_mbuf **bufs,
405 struct qman_fq *fq = q;
406 struct qm_dqrr_entry *dq;
407 uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
410 ret = rte_dpaa_portal_init((void *)0);
412 DPAA_PMD_ERR("Failure in affining portal");
416 ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
417 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
422 dq = qman_dequeue(fq);
425 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
426 qman_dqrr_consume(fq, dq);
427 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
432 static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
436 struct bm_buffer bufs;
438 ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
440 DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
444 DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
445 (uint64_t)bufs.addr, bufs.bpid);
447 buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
455 static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
456 struct dpaa_if *dpaa_intf)
458 struct rte_mbuf *dpaa_mbuf;
460 /* allocate pktbuffer on bpid for dpaa port */
461 dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
465 memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
466 ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
468 /* Copy only the required fields */
469 dpaa_mbuf->data_off = mbuf->data_off;
470 dpaa_mbuf->pkt_len = mbuf->pkt_len;
471 dpaa_mbuf->ol_flags = mbuf->ol_flags;
472 dpaa_mbuf->packet_type = mbuf->packet_type;
473 dpaa_mbuf->tx_offload = mbuf->tx_offload;
474 rte_pktmbuf_free(mbuf);
479 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
483 struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
484 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
485 struct rte_mbuf *temp, *mi;
486 struct qm_sg_entry *sg_temp, *sgt;
489 DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
491 temp = rte_pktmbuf_alloc(bp_info->mp);
493 DPAA_PMD_ERR("Failure in allocation of mbuf");
496 if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
498 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
505 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
506 if (!mbuf->packet_type) {
507 struct rte_net_hdr_lens hdr_lens;
509 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
510 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
511 | RTE_PTYPE_L4_MASK);
512 mbuf->l2_len = hdr_lens.l2_len;
513 mbuf->l3_len = hdr_lens.l3_len;
515 if (temp->data_off < DEFAULT_TX_ICEOF
516 + sizeof(struct dpaa_eth_parse_results_t))
517 temp->data_off = DEFAULT_TX_ICEOF
518 + sizeof(struct dpaa_eth_parse_results_t);
519 dcbz_64(temp->buf_addr);
520 dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
523 sgt = temp->buf_addr + temp->data_off;
524 fd->format = QM_FD_SG;
525 fd->addr = temp->buf_iova;
526 fd->offset = temp->data_off;
528 fd->length20 = mbuf->pkt_len;
530 while (i < DPAA_SGT_MAX_ENTRIES) {
534 sg_temp->addr = cur_seg->buf_iova;
535 sg_temp->offset = cur_seg->data_off;
536 sg_temp->length = cur_seg->data_len;
537 if (RTE_MBUF_DIRECT(cur_seg)) {
538 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
539 /*If refcnt > 1, invalid bpid is set to ensure
540 * buffer is not freed by HW.
542 sg_temp->bpid = 0xff;
543 rte_mbuf_refcnt_update(cur_seg, -1);
546 DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
548 cur_seg = cur_seg->next;
550 /* Get owner MBUF from indirect buffer */
551 mi = rte_mbuf_from_indirect(cur_seg);
552 if (rte_mbuf_refcnt_read(mi) > 1) {
553 /*If refcnt > 1, invalid bpid is set to ensure
554 * owner buffer is not freed by HW.
556 sg_temp->bpid = 0xff;
558 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
559 rte_mbuf_refcnt_update(mi, 1);
562 cur_seg = cur_seg->next;
563 prev_seg->next = NULL;
564 rte_pktmbuf_free(prev_seg);
566 if (cur_seg == NULL) {
568 cpu_to_hw_sg(sg_temp);
571 cpu_to_hw_sg(sg_temp);
576 /* Handle mbufs which are not segmented (non SG) */
578 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
579 struct dpaa_bp_info *bp_info,
580 struct qm_fd *fd_arr)
582 struct rte_mbuf *mi = NULL;
584 if (RTE_MBUF_DIRECT(mbuf)) {
585 if (rte_mbuf_refcnt_read(mbuf) > 1) {
586 /* In case of direct mbuf and mbuf being cloned,
587 * BMAN should _not_ release buffer.
589 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
590 /* Buffer should be releasd by EAL */
591 rte_mbuf_refcnt_update(mbuf, -1);
593 /* In case of direct mbuf and no cloning, mbuf can be
596 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
599 /* This is data-containing core mbuf: 'mi' */
600 mi = rte_mbuf_from_indirect(mbuf);
601 if (rte_mbuf_refcnt_read(mi) > 1) {
602 /* In case of indirect mbuf, and mbuf being cloned,
603 * BMAN should _not_ release it and let EAL release
604 * it through pktmbuf_free below.
606 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
608 /* In case of indirect mbuf, and no cloning, core mbuf
609 * should be released by BMAN.
610 * Increate refcnt of core mbuf so that when
611 * pktmbuf_free is called and mbuf is released, EAL
612 * doesn't try to release core mbuf which would have
613 * been released by BMAN.
615 rte_mbuf_refcnt_update(mi, 1);
616 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
618 rte_pktmbuf_free(mbuf);
621 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
622 dpaa_unsegmented_checksum(mbuf, fd_arr);
625 /* Handle all mbufs on dpaa BMAN managed pool */
626 static inline uint16_t
627 tx_on_dpaa_pool(struct rte_mbuf *mbuf,
628 struct dpaa_bp_info *bp_info,
629 struct qm_fd *fd_arr)
631 DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
633 if (mbuf->nb_segs == 1) {
634 /* Case for non-segmented buffers */
635 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
636 } else if (mbuf->nb_segs > 1 &&
637 mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
638 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
639 DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
643 DPAA_PMD_DEBUG("Number of Segments not supported");
650 /* Handle all mbufs on an external pool (non-dpaa) */
651 static inline uint16_t
652 tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
653 struct qm_fd *fd_arr)
655 struct dpaa_if *dpaa_intf = txq->dpaa_intf;
656 struct rte_mbuf *dmable_mbuf;
658 DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
659 "Allocating an offloaded buffer");
660 dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
662 DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
666 DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid);
672 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
674 struct rte_mbuf *mbuf, *mi = NULL;
675 struct rte_mempool *mp;
676 struct dpaa_bp_info *bp_info;
677 struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
678 uint32_t frames_to_send, loop, sent = 0;
682 ret = rte_dpaa_portal_init((void *)0);
684 DPAA_PMD_ERR("Failure in affining portal");
688 DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
691 frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
692 DPAA_TX_BURST_SIZE : nb_bufs;
693 for (loop = 0; loop < frames_to_send; loop++) {
695 if (likely(RTE_MBUF_DIRECT(mbuf))) {
697 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
698 if (likely(mp->ops_index ==
699 bp_info->dpaa_ops_index &&
700 mbuf->nb_segs == 1 &&
701 rte_mbuf_refcnt_read(mbuf) == 1)) {
702 DPAA_MBUF_TO_CONTIG_FD(mbuf,
703 &fd_arr[loop], bp_info->bpid);
705 DPAA_TX_CKSUM_OFFLOAD_MASK)
706 dpaa_unsegmented_checksum(mbuf,
711 mi = rte_mbuf_from_indirect(mbuf);
715 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
716 if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
717 state = tx_on_dpaa_pool(mbuf, bp_info,
719 if (unlikely(state)) {
720 /* Set frames_to_send & nb_bufs so
721 * that packets are transmitted till
724 frames_to_send = loop;
729 state = tx_on_external_pool(q, mbuf,
731 if (unlikely(state)) {
732 /* Set frames_to_send & nb_bufs so
733 * that packets are transmitted till
736 frames_to_send = loop;
745 while (loop < frames_to_send) {
746 loop += qman_enqueue_multi(q, &fd_arr[loop],
747 frames_to_send - loop);
749 nb_bufs -= frames_to_send;
750 sent += frames_to_send;
753 DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
758 uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
759 struct rte_mbuf **bufs __rte_unused,
760 uint16_t nb_bufs __rte_unused)
762 DPAA_DP_LOG(DEBUG, "Drop all packets");
764 /* Drop all incoming packets. No need to free packets here
765 * because the rte_eth f/w frees up the packets through tx_buffer
766 * callback in case this functions returns count less than nb_bufs