4 * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_config.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_interrupts.h>
47 #include <rte_debug.h>
49 #include <rte_atomic.h>
50 #include <rte_branch_prediction.h>
51 #include <rte_memory.h>
52 #include <rte_memzone.h>
53 #include <rte_tailq.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
64 #include "dpaa_ethdev.h"
65 #include "dpaa_rxtx.h"
66 #include <rte_dpaa_bus.h>
67 #include <dpaa_mempool.h>
75 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
78 (_fd)->opaque_addr = 0; \
79 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
80 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
81 (_fd)->opaque |= (_mbuf)->pkt_len; \
82 (_fd)->addr = (_mbuf)->buf_physaddr; \
83 (_fd)->bpid = _bpid; \
86 #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
87 void dpaa_display_frame(const struct qm_fd *fd)
92 printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
93 __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
94 fd->offset, fd->length20, fd->status);
96 ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
98 printf("%02x ", *ptr);
99 for (ii = 1; ii < fd->length20; ii++) {
100 printf("%02x ", *ptr);
108 #define dpaa_display_frame(a)
111 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
112 uint64_t prs __rte_unused)
114 DPAA_DP_LOG(DEBUG, "Slow parsing");
115 /*TBD:XXX: to be implemented*/
118 static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
119 uint64_t fd_virt_addr)
121 struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
122 uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
124 DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
127 case DPAA_PKT_TYPE_NONE:
130 case DPAA_PKT_TYPE_ETHER:
131 m->packet_type = RTE_PTYPE_L2_ETHER;
133 case DPAA_PKT_TYPE_IPV4:
134 m->packet_type = RTE_PTYPE_L2_ETHER |
137 case DPAA_PKT_TYPE_IPV6:
138 m->packet_type = RTE_PTYPE_L2_ETHER |
141 case DPAA_PKT_TYPE_IPV4_FRAG:
142 case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
143 case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
144 case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
145 m->packet_type = RTE_PTYPE_L2_ETHER |
146 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
148 case DPAA_PKT_TYPE_IPV6_FRAG:
149 case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
150 case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
151 case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
152 m->packet_type = RTE_PTYPE_L2_ETHER |
153 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
155 case DPAA_PKT_TYPE_IPV4_EXT:
156 m->packet_type = RTE_PTYPE_L2_ETHER |
157 RTE_PTYPE_L3_IPV4_EXT;
159 case DPAA_PKT_TYPE_IPV6_EXT:
160 m->packet_type = RTE_PTYPE_L2_ETHER |
161 RTE_PTYPE_L3_IPV6_EXT;
163 case DPAA_PKT_TYPE_IPV4_TCP:
164 m->packet_type = RTE_PTYPE_L2_ETHER |
165 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
167 case DPAA_PKT_TYPE_IPV6_TCP:
168 m->packet_type = RTE_PTYPE_L2_ETHER |
169 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
171 case DPAA_PKT_TYPE_IPV4_UDP:
172 m->packet_type = RTE_PTYPE_L2_ETHER |
173 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
175 case DPAA_PKT_TYPE_IPV6_UDP:
176 m->packet_type = RTE_PTYPE_L2_ETHER |
177 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
179 case DPAA_PKT_TYPE_IPV4_EXT_UDP:
180 m->packet_type = RTE_PTYPE_L2_ETHER |
181 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
183 case DPAA_PKT_TYPE_IPV6_EXT_UDP:
184 m->packet_type = RTE_PTYPE_L2_ETHER |
185 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
187 case DPAA_PKT_TYPE_IPV4_EXT_TCP:
188 m->packet_type = RTE_PTYPE_L2_ETHER |
189 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
191 case DPAA_PKT_TYPE_IPV6_EXT_TCP:
192 m->packet_type = RTE_PTYPE_L2_ETHER |
193 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
195 case DPAA_PKT_TYPE_IPV4_SCTP:
196 m->packet_type = RTE_PTYPE_L2_ETHER |
197 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
199 case DPAA_PKT_TYPE_IPV6_SCTP:
200 m->packet_type = RTE_PTYPE_L2_ETHER |
201 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
203 /* More switch cases can be added */
205 dpaa_slow_parsing(m, prs);
208 m->tx_offload = annot->parse.ip_off[0];
209 m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
210 << DPAA_PKT_L3_LEN_SHIFT;
212 /* Set the hash values */
213 m->hash.rss = (uint32_t)(rte_be_to_cpu_64(annot->hash));
214 m->ol_flags = PKT_RX_RSS_HASH;
215 /* All packets with Bad checksum are dropped by interface (and
216 * corresponding notification issued to RX error queues).
218 m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
220 /* Check if Vlan is present */
221 if (prs & DPAA_PARSE_VLAN_MASK)
222 m->ol_flags |= PKT_RX_VLAN_PKT;
223 /* Packet received without stripping the vlan */
226 static inline void dpaa_checksum(struct rte_mbuf *mbuf)
228 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
229 char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
230 struct ipv4_hdr *ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
231 struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
233 DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
235 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
236 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
237 RTE_PTYPE_L3_IPV4_EXT)) {
238 ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
239 ipv4_hdr->hdr_checksum = 0;
240 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
241 } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
242 RTE_PTYPE_L3_IPV6) ||
243 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
244 RTE_PTYPE_L3_IPV6_EXT))
245 ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
247 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
248 struct tcp_hdr *tcp_hdr = (struct tcp_hdr *)(l3_hdr +
251 if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
252 tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
254 else /* assume ethertype == ETHER_TYPE_IPv6 */
255 tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
257 } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
259 struct udp_hdr *udp_hdr = (struct udp_hdr *)(l3_hdr +
261 udp_hdr->dgram_cksum = 0;
262 if (eth_hdr->ether_type == htons(ETHER_TYPE_IPv4))
263 udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
265 else /* assume ethertype == ETHER_TYPE_IPv6 */
266 udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
271 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
272 struct qm_fd *fd, char *prs_buf)
274 struct dpaa_eth_parse_results_t *prs;
276 DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
278 prs = GET_TX_PRS(prs_buf);
281 if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
282 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
283 RTE_PTYPE_L3_IPV4_EXT))
284 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
285 else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
286 RTE_PTYPE_L3_IPV6) ||
287 ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
288 RTE_PTYPE_L3_IPV6_EXT))
289 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
291 if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
292 prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
293 else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
294 prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
296 prs->ip_off[0] = mbuf->l2_len;
297 prs->l4_off = mbuf->l3_len + mbuf->l2_len;
298 /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
299 fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
303 dpaa_eth_sg_to_mbuf(struct qm_fd *fd, uint32_t ifid)
305 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
306 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
307 struct qm_sg_entry *sgt, *sg_temp;
308 void *vaddr, *sg_vaddr;
310 uint8_t fd_offset = fd->offset;
312 DPAA_DP_LOG(DEBUG, "Received an SG frame");
314 vaddr = rte_dpaa_mem_ptov(qm_fd_addr(fd));
316 DPAA_PMD_ERR("unable to convert physical address");
319 sgt = vaddr + fd_offset;
321 hw_sg_to_cpu(sg_temp);
322 temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
323 sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
325 first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
326 bp_info->meta_data_size);
327 first_seg->data_off = sg_temp->offset;
328 first_seg->data_len = sg_temp->length;
329 first_seg->pkt_len = sg_temp->length;
330 rte_mbuf_refcnt_set(first_seg, 1);
332 first_seg->port = ifid;
333 first_seg->nb_segs = 1;
334 first_seg->ol_flags = 0;
335 prev_seg = first_seg;
336 while (i < DPAA_SGT_MAX_ENTRIES) {
338 hw_sg_to_cpu(sg_temp);
339 sg_vaddr = rte_dpaa_mem_ptov(qm_sg_entry_get64(sg_temp));
340 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
341 bp_info->meta_data_size);
342 cur_seg->data_off = sg_temp->offset;
343 cur_seg->data_len = sg_temp->length;
344 first_seg->pkt_len += sg_temp->length;
345 first_seg->nb_segs += 1;
346 rte_mbuf_refcnt_set(cur_seg, 1);
347 prev_seg->next = cur_seg;
348 if (sg_temp->final) {
349 cur_seg->next = NULL;
355 dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
356 rte_pktmbuf_free_seg(temp);
361 static inline struct rte_mbuf *dpaa_eth_fd_to_mbuf(struct qm_fd *fd,
364 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
365 struct rte_mbuf *mbuf;
368 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
370 (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
371 uint32_t length = fd->opaque & DPAA_FD_LENGTH_MASK;
373 DPAA_DP_LOG(DEBUG, " FD--->MBUF");
375 if (unlikely(format == qm_fd_sg))
376 return dpaa_eth_sg_to_mbuf(fd, ifid);
378 /* Ignoring case when format != qm_fd_contig */
379 dpaa_display_frame(fd);
380 ptr = rte_dpaa_mem_ptov(fd->addr);
381 /* Ignoring case when ptr would be NULL. That is only possible incase
382 * of a corrupted packet
385 mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
386 /* Prefetch the Parse results and packet data to L1 */
387 rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
388 rte_prefetch0((void *)((uint8_t *)ptr + offset));
390 mbuf->data_off = offset;
391 mbuf->data_len = length;
392 mbuf->pkt_len = length;
398 rte_mbuf_refcnt_set(mbuf, 1);
399 dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
404 uint16_t dpaa_eth_queue_rx(void *q,
405 struct rte_mbuf **bufs,
408 struct qman_fq *fq = q;
409 struct qm_dqrr_entry *dq;
410 uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
413 ret = rte_dpaa_portal_init((void *)0);
415 DPAA_PMD_ERR("Failure in affining portal");
419 ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
420 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
425 dq = qman_dequeue(fq);
428 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
429 qman_dqrr_consume(fq, dq);
430 } while (fq->flags & QMAN_FQ_STATE_VDQCR);
435 static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
439 struct bm_buffer bufs;
441 ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
443 DPAA_PMD_WARN("Failed to allocate buffers %d", ret);
447 DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
448 (uint64_t)bufs.addr, bufs.bpid);
450 buf = (uint64_t)rte_dpaa_mem_ptov(bufs.addr) - bp_info->meta_data_size;
458 static struct rte_mbuf *dpaa_get_dmable_mbuf(struct rte_mbuf *mbuf,
459 struct dpaa_if *dpaa_intf)
461 struct rte_mbuf *dpaa_mbuf;
463 /* allocate pktbuffer on bpid for dpaa port */
464 dpaa_mbuf = dpaa_get_pktbuf(dpaa_intf->bp_info);
468 memcpy((uint8_t *)(dpaa_mbuf->buf_addr) + mbuf->data_off, (void *)
469 ((uint8_t *)(mbuf->buf_addr) + mbuf->data_off), mbuf->pkt_len);
471 /* Copy only the required fields */
472 dpaa_mbuf->data_off = mbuf->data_off;
473 dpaa_mbuf->pkt_len = mbuf->pkt_len;
474 dpaa_mbuf->ol_flags = mbuf->ol_flags;
475 dpaa_mbuf->packet_type = mbuf->packet_type;
476 dpaa_mbuf->tx_offload = mbuf->tx_offload;
477 rte_pktmbuf_free(mbuf);
482 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
486 struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
487 struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
488 struct rte_mbuf *temp, *mi;
489 struct qm_sg_entry *sg_temp, *sgt;
492 DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
494 temp = rte_pktmbuf_alloc(bp_info->mp);
496 DPAA_PMD_ERR("Failure in allocation of mbuf");
499 if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
501 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
508 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
509 if (temp->data_off < DEFAULT_TX_ICEOF
510 + sizeof(struct dpaa_eth_parse_results_t))
511 temp->data_off = DEFAULT_TX_ICEOF
512 + sizeof(struct dpaa_eth_parse_results_t);
513 dcbz_64(temp->buf_addr);
514 dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
517 sgt = temp->buf_addr + temp->data_off;
518 fd->format = QM_FD_SG;
519 fd->addr = temp->buf_physaddr;
520 fd->offset = temp->data_off;
522 fd->length20 = mbuf->pkt_len;
524 while (i < DPAA_SGT_MAX_ENTRIES) {
528 sg_temp->addr = cur_seg->buf_physaddr;
529 sg_temp->offset = cur_seg->data_off;
530 sg_temp->length = cur_seg->data_len;
531 if (RTE_MBUF_DIRECT(cur_seg)) {
532 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
533 /*If refcnt > 1, invalid bpid is set to ensure
534 * buffer is not freed by HW.
536 sg_temp->bpid = 0xff;
537 rte_mbuf_refcnt_update(cur_seg, -1);
540 DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
542 cur_seg = cur_seg->next;
544 /* Get owner MBUF from indirect buffer */
545 mi = rte_mbuf_from_indirect(cur_seg);
546 if (rte_mbuf_refcnt_read(mi) > 1) {
547 /*If refcnt > 1, invalid bpid is set to ensure
548 * owner buffer is not freed by HW.
550 sg_temp->bpid = 0xff;
552 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
553 rte_mbuf_refcnt_update(mi, 1);
556 cur_seg = cur_seg->next;
557 prev_seg->next = NULL;
558 rte_pktmbuf_free(prev_seg);
560 if (cur_seg == NULL) {
562 cpu_to_hw_sg(sg_temp);
565 cpu_to_hw_sg(sg_temp);
570 /* Handle mbufs which are not segmented (non SG) */
572 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
573 struct dpaa_bp_info *bp_info,
574 struct qm_fd *fd_arr)
576 struct rte_mbuf *mi = NULL;
578 if (RTE_MBUF_DIRECT(mbuf)) {
579 if (rte_mbuf_refcnt_read(mbuf) > 1) {
580 /* In case of direct mbuf and mbuf being cloned,
581 * BMAN should _not_ release buffer.
583 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
584 /* Buffer should be releasd by EAL */
585 rte_mbuf_refcnt_update(mbuf, -1);
587 /* In case of direct mbuf and no cloning, mbuf can be
590 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
593 /* This is data-containing core mbuf: 'mi' */
594 mi = rte_mbuf_from_indirect(mbuf);
595 if (rte_mbuf_refcnt_read(mi) > 1) {
596 /* In case of indirect mbuf, and mbuf being cloned,
597 * BMAN should _not_ release it and let EAL release
598 * it through pktmbuf_free below.
600 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
602 /* In case of indirect mbuf, and no cloning, core mbuf
603 * should be released by BMAN.
604 * Increate refcnt of core mbuf so that when
605 * pktmbuf_free is called and mbuf is released, EAL
606 * doesn't try to release core mbuf which would have
607 * been released by BMAN.
609 rte_mbuf_refcnt_update(mi, 1);
610 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
612 rte_pktmbuf_free(mbuf);
615 if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
616 if (mbuf->data_off < (DEFAULT_TX_ICEOF +
617 sizeof(struct dpaa_eth_parse_results_t))) {
618 DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
619 "Not enough Headroom "
620 "space for correct Checksum offload."
621 "So Calculating checksum in Software.");
624 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
629 /* Handle all mbufs on dpaa BMAN managed pool */
630 static inline uint16_t
631 tx_on_dpaa_pool(struct rte_mbuf *mbuf,
632 struct dpaa_bp_info *bp_info,
633 struct qm_fd *fd_arr)
635 DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
637 if (mbuf->nb_segs == 1) {
638 /* Case for non-segmented buffers */
639 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
640 } else if (mbuf->nb_segs > 1 &&
641 mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
642 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
643 DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
647 DPAA_PMD_DEBUG("Number of Segments not supported");
654 /* Handle all mbufs on an external pool (non-dpaa) */
655 static inline uint16_t
656 tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
657 struct qm_fd *fd_arr)
659 struct dpaa_if *dpaa_intf = txq->dpaa_intf;
660 struct rte_mbuf *dmable_mbuf;
662 DPAA_DP_LOG(DEBUG, "Non-BMAN offloaded buffer."
663 "Allocating an offloaded buffer");
664 dmable_mbuf = dpaa_get_dmable_mbuf(mbuf, dpaa_intf);
666 DPAA_DP_LOG(DEBUG, "no dpaa buffers.");
670 DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, dpaa_intf->bp_info->bpid);
676 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
678 struct rte_mbuf *mbuf, *mi = NULL;
679 struct rte_mempool *mp;
680 struct dpaa_bp_info *bp_info;
681 struct qm_fd fd_arr[MAX_TX_RING_SLOTS];
682 uint32_t frames_to_send, loop, i = 0;
686 ret = rte_dpaa_portal_init((void *)0);
688 DPAA_PMD_ERR("Failure in affining portal");
692 DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
695 frames_to_send = (nb_bufs >> 3) ? MAX_TX_RING_SLOTS : nb_bufs;
696 for (loop = 0; loop < frames_to_send; loop++, i++) {
698 if (RTE_MBUF_DIRECT(mbuf)) {
701 mi = rte_mbuf_from_indirect(mbuf);
705 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
706 if (likely(mp->ops_index == bp_info->dpaa_ops_index)) {
707 state = tx_on_dpaa_pool(mbuf, bp_info,
709 if (unlikely(state)) {
710 /* Set frames_to_send & nb_bufs so
711 * that packets are transmitted till
714 frames_to_send = loop;
719 state = tx_on_external_pool(q, mbuf,
721 if (unlikely(state)) {
722 /* Set frames_to_send & nb_bufs so
723 * that packets are transmitted till
726 frames_to_send = loop;
735 while (loop < frames_to_send) {
736 loop += qman_enqueue_multi(q, &fd_arr[loop],
737 frames_to_send - loop);
739 nb_bufs -= frames_to_send;
742 DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", i, q);
747 uint16_t dpaa_eth_tx_drop_all(void *q __rte_unused,
748 struct rte_mbuf **bufs __rte_unused,
749 uint16_t nb_bufs __rte_unused)
751 DPAA_DP_LOG(DEBUG, "Drop all packets");
753 /* Drop all incoming packets. No need to free packets here
754 * because the rte_eth f/w frees up the packets through tx_buffer
755 * callback in case this functions returns count less than nb_bufs