net/dpaa: update process specific device info
[dpdk.git] / drivers / net / dpaa / dpaa_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017,2019 NXP
5  *
6  */
7
8 /* System headers */
9 #include <inttypes.h>
10 #include <unistd.h>
11 #include <stdio.h>
12 #include <limits.h>
13 #include <sched.h>
14 #include <pthread.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_ip.h>
33 #include <rte_tcp.h>
34 #include <rte_udp.h>
35 #include <rte_net.h>
36 #include <rte_eventdev.h>
37
38 #include "dpaa_ethdev.h"
39 #include "dpaa_rxtx.h"
40 #include <rte_dpaa_bus.h>
41 #include <dpaa_mempool.h>
42
43 #include <qman.h>
44 #include <fsl_usd.h>
45 #include <fsl_qman.h>
46 #include <fsl_bman.h>
47 #include <dpaa_of.h>
48 #include <netcfg.h>
49
50 #define DPAA_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) \
51         do { \
52                 (_fd)->cmd = 0; \
53                 (_fd)->opaque_addr = 0; \
54                 (_fd)->opaque = QM_FD_CONTIG << DPAA_FD_FORMAT_SHIFT; \
55                 (_fd)->opaque |= ((_mbuf)->data_off) << DPAA_FD_OFFSET_SHIFT; \
56                 (_fd)->opaque |= (_mbuf)->pkt_len; \
57                 (_fd)->addr = (_mbuf)->buf_iova; \
58                 (_fd)->bpid = _bpid; \
59         } while (0)
60
61 #if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
62 static void dpaa_display_frame(const struct qm_fd *fd)
63 {
64         int ii;
65         char *ptr;
66
67         printf("%s::bpid %x addr %08x%08x, format %d off %d, len %d stat %x\n",
68                __func__, fd->bpid, fd->addr_hi, fd->addr_lo, fd->format,
69                 fd->offset, fd->length20, fd->status);
70
71         ptr = (char *)rte_dpaa_mem_ptov(fd->addr);
72         ptr += fd->offset;
73         printf("%02x ", *ptr);
74         for (ii = 1; ii < fd->length20; ii++) {
75                 printf("%02x ", *ptr);
76                 if ((ii % 16) == 0)
77                         printf("\n");
78                 ptr++;
79         }
80         printf("\n");
81 }
82 #else
83 #define dpaa_display_frame(a)
84 #endif
85
86 static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
87                                      uint64_t prs __rte_unused)
88 {
89         DPAA_DP_LOG(DEBUG, "Slow parsing");
90         /*TBD:XXX: to be implemented*/
91 }
92
93 static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
94 {
95         struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
96         uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
97
98         DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
99
100         switch (prs) {
101         case DPAA_PKT_TYPE_IPV4:
102                 m->packet_type = RTE_PTYPE_L2_ETHER |
103                         RTE_PTYPE_L3_IPV4;
104                 break;
105         case DPAA_PKT_TYPE_IPV6:
106                 m->packet_type = RTE_PTYPE_L2_ETHER |
107                         RTE_PTYPE_L3_IPV6;
108                 break;
109         case DPAA_PKT_TYPE_ETHER:
110                 m->packet_type = RTE_PTYPE_L2_ETHER;
111                 break;
112         case DPAA_PKT_TYPE_IPV4_FRAG:
113         case DPAA_PKT_TYPE_IPV4_FRAG_UDP:
114         case DPAA_PKT_TYPE_IPV4_FRAG_TCP:
115         case DPAA_PKT_TYPE_IPV4_FRAG_SCTP:
116                 m->packet_type = RTE_PTYPE_L2_ETHER |
117                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG;
118                 break;
119         case DPAA_PKT_TYPE_IPV6_FRAG:
120         case DPAA_PKT_TYPE_IPV6_FRAG_UDP:
121         case DPAA_PKT_TYPE_IPV6_FRAG_TCP:
122         case DPAA_PKT_TYPE_IPV6_FRAG_SCTP:
123                 m->packet_type = RTE_PTYPE_L2_ETHER |
124                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG;
125                 break;
126         case DPAA_PKT_TYPE_IPV4_EXT:
127                 m->packet_type = RTE_PTYPE_L2_ETHER |
128                         RTE_PTYPE_L3_IPV4_EXT;
129                 break;
130         case DPAA_PKT_TYPE_IPV6_EXT:
131                 m->packet_type = RTE_PTYPE_L2_ETHER |
132                         RTE_PTYPE_L3_IPV6_EXT;
133                 break;
134         case DPAA_PKT_TYPE_IPV4_TCP:
135                 m->packet_type = RTE_PTYPE_L2_ETHER |
136                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
137                 break;
138         case DPAA_PKT_TYPE_IPV6_TCP:
139                 m->packet_type = RTE_PTYPE_L2_ETHER |
140                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
141                 break;
142         case DPAA_PKT_TYPE_IPV4_UDP:
143                 m->packet_type = RTE_PTYPE_L2_ETHER |
144                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
145                 break;
146         case DPAA_PKT_TYPE_IPV6_UDP:
147                 m->packet_type = RTE_PTYPE_L2_ETHER |
148                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
149                 break;
150         case DPAA_PKT_TYPE_IPV4_EXT_UDP:
151                 m->packet_type = RTE_PTYPE_L2_ETHER |
152                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP;
153                 break;
154         case DPAA_PKT_TYPE_IPV6_EXT_UDP:
155                 m->packet_type = RTE_PTYPE_L2_ETHER |
156                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP;
157                 break;
158         case DPAA_PKT_TYPE_IPV4_EXT_TCP:
159                 m->packet_type = RTE_PTYPE_L2_ETHER |
160                         RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP;
161                 break;
162         case DPAA_PKT_TYPE_IPV6_EXT_TCP:
163                 m->packet_type = RTE_PTYPE_L2_ETHER |
164                         RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP;
165                 break;
166         case DPAA_PKT_TYPE_IPV4_SCTP:
167                 m->packet_type = RTE_PTYPE_L2_ETHER |
168                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
169                 break;
170         case DPAA_PKT_TYPE_IPV6_SCTP:
171                 m->packet_type = RTE_PTYPE_L2_ETHER |
172                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
173                 break;
174         case DPAA_PKT_TYPE_NONE:
175                 m->packet_type = 0;
176                 break;
177         /* More switch cases can be added */
178         default:
179                 dpaa_slow_parsing(m, prs);
180         }
181
182         m->tx_offload = annot->parse.ip_off[0];
183         m->tx_offload |= (annot->parse.l4_off - annot->parse.ip_off[0])
184                                         << DPAA_PKT_L3_LEN_SHIFT;
185
186         /* Set the hash values */
187         m->hash.rss = (uint32_t)(annot->hash);
188         /* All packets with Bad checksum are dropped by interface (and
189          * corresponding notification issued to RX error queues).
190          */
191         m->ol_flags = PKT_RX_RSS_HASH | PKT_RX_IP_CKSUM_GOOD;
192
193         /* Check if Vlan is present */
194         if (prs & DPAA_PARSE_VLAN_MASK)
195                 m->ol_flags |= PKT_RX_VLAN;
196         /* Packet received without stripping the vlan */
197 }
198
199 static inline void dpaa_checksum(struct rte_mbuf *mbuf)
200 {
201         struct rte_ether_hdr *eth_hdr =
202                 rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
203         char *l3_hdr = (char *)eth_hdr + mbuf->l2_len;
204         struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
205         struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
206
207         DPAA_DP_LOG(DEBUG, "Calculating checksum for mbuf: %p", mbuf);
208
209         if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
210             ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
211             RTE_PTYPE_L3_IPV4_EXT)) {
212                 ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
213                 ipv4_hdr->hdr_checksum = 0;
214                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
215         } else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
216                    RTE_PTYPE_L3_IPV6) ||
217                    ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
218                    RTE_PTYPE_L3_IPV6_EXT))
219                 ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
220
221         if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP) {
222                 struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)(l3_hdr +
223                                           mbuf->l3_len);
224                 tcp_hdr->cksum = 0;
225                 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
226                         tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
227                                                                tcp_hdr);
228                 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
229                         tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
230                                                                tcp_hdr);
231         } else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) ==
232                    RTE_PTYPE_L4_UDP) {
233                 struct rte_udp_hdr *udp_hdr = (struct rte_udp_hdr *)(l3_hdr +
234                                                              mbuf->l3_len);
235                 udp_hdr->dgram_cksum = 0;
236                 if (eth_hdr->ether_type == htons(RTE_ETHER_TYPE_IPV4))
237                         udp_hdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4_hdr,
238                                                                      udp_hdr);
239                 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
240                         udp_hdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6_hdr,
241                                                                      udp_hdr);
242         }
243 }
244
245 static inline void dpaa_checksum_offload(struct rte_mbuf *mbuf,
246                                          struct qm_fd *fd, char *prs_buf)
247 {
248         struct dpaa_eth_parse_results_t *prs;
249
250         DPAA_DP_LOG(DEBUG, " Offloading checksum for mbuf: %p", mbuf);
251
252         prs = GET_TX_PRS(prs_buf);
253         prs->l3r = 0;
254         prs->l4r = 0;
255         if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) ||
256            ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
257            RTE_PTYPE_L3_IPV4_EXT))
258                 prs->l3r = DPAA_L3_PARSE_RESULT_IPV4;
259         else if (((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
260                    RTE_PTYPE_L3_IPV6) ||
261                  ((mbuf->packet_type & RTE_PTYPE_L3_MASK) ==
262                 RTE_PTYPE_L3_IPV6_EXT))
263                 prs->l3r = DPAA_L3_PARSE_RESULT_IPV6;
264
265         if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP)
266                 prs->l4r = DPAA_L4_PARSE_RESULT_TCP;
267         else if ((mbuf->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP)
268                 prs->l4r = DPAA_L4_PARSE_RESULT_UDP;
269
270         prs->ip_off[0] = mbuf->l2_len;
271         prs->l4_off = mbuf->l3_len + mbuf->l2_len;
272         /* Enable L3 (and L4, if TCP or UDP) HW checksum*/
273         fd->cmd = DPAA_FD_CMD_RPD | DPAA_FD_CMD_DTC;
274 }
275
276 static inline void
277 dpaa_unsegmented_checksum(struct rte_mbuf *mbuf, struct qm_fd *fd_arr)
278 {
279         if (!mbuf->packet_type) {
280                 struct rte_net_hdr_lens hdr_lens;
281
282                 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
283                                 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
284                                 | RTE_PTYPE_L4_MASK);
285                 mbuf->l2_len = hdr_lens.l2_len;
286                 mbuf->l3_len = hdr_lens.l3_len;
287         }
288         if (mbuf->data_off < (DEFAULT_TX_ICEOF +
289             sizeof(struct dpaa_eth_parse_results_t))) {
290                 DPAA_DP_LOG(DEBUG, "Checksum offload Err: "
291                         "Not enough Headroom "
292                         "space for correct Checksum offload."
293                         "So Calculating checksum in Software.");
294                 dpaa_checksum(mbuf);
295         } else {
296                 dpaa_checksum_offload(mbuf, fd_arr, mbuf->buf_addr);
297         }
298 }
299
300 struct rte_mbuf *
301 dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
302 {
303         struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
304         struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
305         struct qm_sg_entry *sgt, *sg_temp;
306         void *vaddr, *sg_vaddr;
307         int i = 0;
308         uint16_t fd_offset = fd->offset;
309
310         vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
311         if (!vaddr) {
312                 DPAA_PMD_ERR("unable to convert physical address");
313                 return NULL;
314         }
315         sgt = vaddr + fd_offset;
316         sg_temp = &sgt[i++];
317         hw_sg_to_cpu(sg_temp);
318         temp = (struct rte_mbuf *)((char *)vaddr - bp_info->meta_data_size);
319         sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_sg_entry_get64(sg_temp));
320
321         first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
322                                                 bp_info->meta_data_size);
323         first_seg->data_off = sg_temp->offset;
324         first_seg->data_len = sg_temp->length;
325         first_seg->pkt_len = sg_temp->length;
326         rte_mbuf_refcnt_set(first_seg, 1);
327
328         first_seg->port = ifid;
329         first_seg->nb_segs = 1;
330         first_seg->ol_flags = 0;
331         prev_seg = first_seg;
332         while (i < DPAA_SGT_MAX_ENTRIES) {
333                 sg_temp = &sgt[i++];
334                 hw_sg_to_cpu(sg_temp);
335                 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
336                                              qm_sg_entry_get64(sg_temp));
337                 cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
338                                                       bp_info->meta_data_size);
339                 cur_seg->data_off = sg_temp->offset;
340                 cur_seg->data_len = sg_temp->length;
341                 first_seg->pkt_len += sg_temp->length;
342                 first_seg->nb_segs += 1;
343                 rte_mbuf_refcnt_set(cur_seg, 1);
344                 prev_seg->next = cur_seg;
345                 if (sg_temp->final) {
346                         cur_seg->next = NULL;
347                         break;
348                 }
349                 prev_seg = cur_seg;
350         }
351         DPAA_DP_LOG(DEBUG, "Received an SG frame len =%d, num_sg =%d",
352                         first_seg->pkt_len, first_seg->nb_segs);
353
354         dpaa_eth_packet_info(first_seg, vaddr);
355         rte_pktmbuf_free_seg(temp);
356
357         return first_seg;
358 }
359
360 static inline struct rte_mbuf *
361 dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
362 {
363         struct rte_mbuf *mbuf;
364         struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
365         void *ptr;
366         uint8_t format =
367                 (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
368         uint16_t offset;
369         uint32_t length;
370
371         if (unlikely(format == qm_fd_sg))
372                 return dpaa_eth_sg_to_mbuf(fd, ifid);
373
374         offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >> DPAA_FD_OFFSET_SHIFT;
375         length = fd->opaque & DPAA_FD_LENGTH_MASK;
376
377         DPAA_DP_LOG(DEBUG, " FD--->MBUF off %d len = %d", offset, length);
378
379         /* Ignoring case when format != qm_fd_contig */
380         dpaa_display_frame(fd);
381         ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
382
383         mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
384         /* Prefetch the Parse results and packet data to L1 */
385         rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
386
387         mbuf->data_off = offset;
388         mbuf->data_len = length;
389         mbuf->pkt_len = length;
390
391         mbuf->port = ifid;
392         mbuf->nb_segs = 1;
393         mbuf->ol_flags = 0;
394         mbuf->next = NULL;
395         rte_mbuf_refcnt_set(mbuf, 1);
396         dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
397
398         return mbuf;
399 }
400
401 uint16_t
402 dpaa_free_mbuf(const struct qm_fd *fd)
403 {
404         struct rte_mbuf *mbuf;
405         struct dpaa_bp_info *bp_info;
406         uint8_t format;
407         void *ptr;
408
409         bp_info = DPAA_BPID_TO_POOL_INFO(fd->bpid);
410         format = (fd->opaque & DPAA_FD_FORMAT_MASK) >> DPAA_FD_FORMAT_SHIFT;
411         if (unlikely(format == qm_fd_sg)) {
412                 struct rte_mbuf *first_seg, *prev_seg, *cur_seg, *temp;
413                 struct qm_sg_entry *sgt, *sg_temp;
414                 void *vaddr, *sg_vaddr;
415                 int i = 0;
416                 uint16_t fd_offset = fd->offset;
417
418                 vaddr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
419                 if (!vaddr) {
420                         DPAA_PMD_ERR("unable to convert physical address");
421                         return -1;
422                 }
423                 sgt = vaddr + fd_offset;
424                 sg_temp = &sgt[i++];
425                 hw_sg_to_cpu(sg_temp);
426                 temp = (struct rte_mbuf *)
427                         ((char *)vaddr - bp_info->meta_data_size);
428                 sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
429                                                 qm_sg_entry_get64(sg_temp));
430
431                 first_seg = (struct rte_mbuf *)((char *)sg_vaddr -
432                                                 bp_info->meta_data_size);
433                 first_seg->nb_segs = 1;
434                 prev_seg = first_seg;
435                 while (i < DPAA_SGT_MAX_ENTRIES) {
436                         sg_temp = &sgt[i++];
437                         hw_sg_to_cpu(sg_temp);
438                         sg_vaddr = DPAA_MEMPOOL_PTOV(bp_info,
439                                                 qm_sg_entry_get64(sg_temp));
440                         cur_seg = (struct rte_mbuf *)((char *)sg_vaddr -
441                                                       bp_info->meta_data_size);
442                         first_seg->nb_segs += 1;
443                         prev_seg->next = cur_seg;
444                         if (sg_temp->final) {
445                                 cur_seg->next = NULL;
446                                 break;
447                         }
448                         prev_seg = cur_seg;
449                 }
450
451                 rte_pktmbuf_free_seg(temp);
452                 rte_pktmbuf_free_seg(first_seg);
453                 return 0;
454         }
455
456         ptr = DPAA_MEMPOOL_PTOV(bp_info, qm_fd_addr(fd));
457         mbuf = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
458
459         rte_pktmbuf_free(mbuf);
460
461         return 0;
462 }
463
464 /* Specific for LS1043 */
465 void
466 dpaa_rx_cb_no_prefetch(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
467            void **bufs, int num_bufs)
468 {
469         struct rte_mbuf *mbuf;
470         struct dpaa_bp_info *bp_info;
471         const struct qm_fd *fd;
472         void *ptr;
473         struct dpaa_if *dpaa_intf;
474         uint16_t offset, i;
475         uint32_t length;
476         uint8_t format;
477
478         bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[0]->fd.bpid);
479         ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[0]->fd));
480         rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
481         bufs[0] = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
482
483         for (i = 0; i < num_bufs; i++) {
484                 if (i < num_bufs - 1) {
485                         bp_info = DPAA_BPID_TO_POOL_INFO(dqrr[i + 1]->fd.bpid);
486                         ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dqrr[i + 1]->fd));
487                         rte_prefetch0((void *)((uint8_t *)ptr +
488                                         DEFAULT_RX_ICEOF));
489                         bufs[i + 1] = (struct rte_mbuf *)((char *)ptr -
490                                         bp_info->meta_data_size);
491                 }
492
493                 fd = &dqrr[i]->fd;
494                 dpaa_intf = fq[0]->dpaa_intf;
495
496                 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
497                                 DPAA_FD_FORMAT_SHIFT;
498                 if (unlikely(format == qm_fd_sg)) {
499                         bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
500                         continue;
501                 }
502
503                 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
504                                 DPAA_FD_OFFSET_SHIFT;
505                 length = fd->opaque & DPAA_FD_LENGTH_MASK;
506
507                 mbuf = bufs[i];
508                 mbuf->data_off = offset;
509                 mbuf->data_len = length;
510                 mbuf->pkt_len = length;
511                 mbuf->port = dpaa_intf->ifid;
512
513                 mbuf->nb_segs = 1;
514                 mbuf->ol_flags = 0;
515                 mbuf->next = NULL;
516                 rte_mbuf_refcnt_set(mbuf, 1);
517                 dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
518         }
519 }
520
521 void
522 dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
523            void **bufs, int num_bufs)
524 {
525         struct rte_mbuf *mbuf;
526         const struct qm_fd *fd;
527         struct dpaa_if *dpaa_intf;
528         uint16_t offset, i;
529         uint32_t length;
530         uint8_t format;
531
532         for (i = 0; i < num_bufs; i++) {
533                 fd = &dqrr[i]->fd;
534                 dpaa_intf = fq[0]->dpaa_intf;
535
536                 format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
537                                 DPAA_FD_FORMAT_SHIFT;
538                 if (unlikely(format == qm_fd_sg)) {
539                         bufs[i] = dpaa_eth_sg_to_mbuf(fd, dpaa_intf->ifid);
540                         continue;
541                 }
542
543                 offset = (fd->opaque & DPAA_FD_OFFSET_MASK) >>
544                                 DPAA_FD_OFFSET_SHIFT;
545                 length = fd->opaque & DPAA_FD_LENGTH_MASK;
546
547                 mbuf = bufs[i];
548                 mbuf->data_off = offset;
549                 mbuf->data_len = length;
550                 mbuf->pkt_len = length;
551                 mbuf->port = dpaa_intf->ifid;
552
553                 mbuf->nb_segs = 1;
554                 mbuf->ol_flags = 0;
555                 mbuf->next = NULL;
556                 rte_mbuf_refcnt_set(mbuf, 1);
557                 dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
558         }
559 }
560
561 void dpaa_rx_cb_prepare(struct qm_dqrr_entry *dq, void **bufs)
562 {
563         struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(dq->fd.bpid);
564         void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
565
566         /* In case of LS1046, annotation stashing is disabled due to L2 cache
567          * being bottleneck in case of multicore scanario for this platform.
568          * So we prefetch the annoation beforehand, so that it is available
569          * in cache when accessed.
570          */
571         rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
572
573         *bufs = (struct rte_mbuf *)((char *)ptr - bp_info->meta_data_size);
574 }
575
576 static uint16_t
577 dpaa_eth_queue_portal_rx(struct qman_fq *fq,
578                          struct rte_mbuf **bufs,
579                          uint16_t nb_bufs)
580 {
581         int ret;
582
583         if (unlikely(!fq->qp_initialized)) {
584                 ret = rte_dpaa_portal_fq_init((void *)0, fq);
585                 if (ret) {
586                         DPAA_PMD_ERR("Failure in affining portal %d", ret);
587                         return 0;
588                 }
589                 fq->qp_initialized = 1;
590         }
591
592         return qman_portal_poll_rx(nb_bufs, (void **)bufs, fq->qp);
593 }
594
595 enum qman_cb_dqrr_result
596 dpaa_rx_cb_parallel(void *event,
597                     struct qman_portal *qm __always_unused,
598                     struct qman_fq *fq,
599                     const struct qm_dqrr_entry *dqrr,
600                     void **bufs)
601 {
602         u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
603         struct rte_mbuf *mbuf;
604         struct rte_event *ev = (struct rte_event *)event;
605
606         mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
607         ev->event_ptr = (void *)mbuf;
608         ev->flow_id = fq->ev.flow_id;
609         ev->sub_event_type = fq->ev.sub_event_type;
610         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
611         ev->op = RTE_EVENT_OP_NEW;
612         ev->sched_type = fq->ev.sched_type;
613         ev->queue_id = fq->ev.queue_id;
614         ev->priority = fq->ev.priority;
615         ev->impl_opaque = (uint8_t)DPAA_INVALID_MBUF_SEQN;
616         mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
617         *bufs = mbuf;
618
619         return qman_cb_dqrr_consume;
620 }
621
622 enum qman_cb_dqrr_result
623 dpaa_rx_cb_atomic(void *event,
624                   struct qman_portal *qm __always_unused,
625                   struct qman_fq *fq,
626                   const struct qm_dqrr_entry *dqrr,
627                   void **bufs)
628 {
629         u8 index;
630         u32 ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
631         struct rte_mbuf *mbuf;
632         struct rte_event *ev = (struct rte_event *)event;
633
634         mbuf = dpaa_eth_fd_to_mbuf(&dqrr->fd, ifid);
635         ev->event_ptr = (void *)mbuf;
636         ev->flow_id = fq->ev.flow_id;
637         ev->sub_event_type = fq->ev.sub_event_type;
638         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
639         ev->op = RTE_EVENT_OP_NEW;
640         ev->sched_type = fq->ev.sched_type;
641         ev->queue_id = fq->ev.queue_id;
642         ev->priority = fq->ev.priority;
643
644         /* Save active dqrr entries */
645         index = DQRR_PTR2IDX(dqrr);
646         DPAA_PER_LCORE_DQRR_SIZE++;
647         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
648         DPAA_PER_LCORE_DQRR_MBUF(index) = mbuf;
649         ev->impl_opaque = index + 1;
650         mbuf->seqn = (uint32_t)index + 1;
651         *bufs = mbuf;
652
653         return qman_cb_dqrr_defer;
654 }
655
656 uint16_t dpaa_eth_queue_rx(void *q,
657                            struct rte_mbuf **bufs,
658                            uint16_t nb_bufs)
659 {
660         struct qman_fq *fq = q;
661         struct qm_dqrr_entry *dq;
662         uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
663         int num_rx_bufs, ret;
664         uint32_t vdqcr_flags = 0;
665
666         if (unlikely(rte_dpaa_bpid_info == NULL &&
667                                 rte_eal_process_type() == RTE_PROC_SECONDARY))
668                 rte_dpaa_bpid_info = fq->bp_array;
669
670         if (likely(fq->is_static))
671                 return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
672
673         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
674                 ret = rte_dpaa_portal_init((void *)0);
675                 if (ret) {
676                         DPAA_PMD_ERR("Failure in affining portal");
677                         return 0;
678                 }
679         }
680
681         /* Until request for four buffers, we provide exact number of buffers.
682          * Otherwise we do not set the QM_VDQCR_EXACT flag.
683          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
684          * requested, so we request two less in this case.
685          */
686         if (nb_bufs < 4) {
687                 vdqcr_flags = QM_VDQCR_EXACT;
688                 num_rx_bufs = nb_bufs;
689         } else {
690                 num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
691                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
692         }
693         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
694         if (ret)
695                 return 0;
696
697         do {
698                 dq = qman_dequeue(fq);
699                 if (!dq)
700                         continue;
701                 bufs[num_rx++] = dpaa_eth_fd_to_mbuf(&dq->fd, ifid);
702                 qman_dqrr_consume(fq, dq);
703         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
704
705         return num_rx;
706 }
707
708 int
709 dpaa_eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
710                 struct qm_fd *fd,
711                 uint32_t bpid)
712 {
713         struct rte_mbuf *cur_seg = mbuf, *prev_seg = NULL;
714         struct dpaa_bp_info *bp_info = DPAA_BPID_TO_POOL_INFO(bpid);
715         struct rte_mbuf *temp, *mi;
716         struct qm_sg_entry *sg_temp, *sgt;
717         int i = 0;
718
719         DPAA_DP_LOG(DEBUG, "Creating SG FD to transmit");
720
721         temp = rte_pktmbuf_alloc(bp_info->mp);
722         if (!temp) {
723                 DPAA_PMD_ERR("Failure in allocation of mbuf");
724                 return -1;
725         }
726         if (temp->buf_len < ((mbuf->nb_segs * sizeof(struct qm_sg_entry))
727                                 + temp->data_off)) {
728                 DPAA_PMD_ERR("Insufficient space in mbuf for SG entries");
729                 return -1;
730         }
731
732         fd->cmd = 0;
733         fd->opaque_addr = 0;
734
735         if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK) {
736                 if (!mbuf->packet_type) {
737                         struct rte_net_hdr_lens hdr_lens;
738
739                         mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
740                                         RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
741                                         | RTE_PTYPE_L4_MASK);
742                         mbuf->l2_len = hdr_lens.l2_len;
743                         mbuf->l3_len = hdr_lens.l3_len;
744                 }
745                 if (temp->data_off < DEFAULT_TX_ICEOF
746                         + sizeof(struct dpaa_eth_parse_results_t))
747                         temp->data_off = DEFAULT_TX_ICEOF
748                                 + sizeof(struct dpaa_eth_parse_results_t);
749                 dcbz_64(temp->buf_addr);
750                 dpaa_checksum_offload(mbuf, fd, temp->buf_addr);
751         }
752
753         sgt = temp->buf_addr + temp->data_off;
754         fd->format = QM_FD_SG;
755         fd->addr = temp->buf_iova;
756         fd->offset = temp->data_off;
757         fd->bpid = bpid;
758         fd->length20 = mbuf->pkt_len;
759
760         while (i < DPAA_SGT_MAX_ENTRIES) {
761                 sg_temp = &sgt[i++];
762                 sg_temp->opaque = 0;
763                 sg_temp->val = 0;
764                 sg_temp->addr = cur_seg->buf_iova;
765                 sg_temp->offset = cur_seg->data_off;
766                 sg_temp->length = cur_seg->data_len;
767                 if (RTE_MBUF_DIRECT(cur_seg)) {
768                         if (rte_mbuf_refcnt_read(cur_seg) > 1) {
769                                 /*If refcnt > 1, invalid bpid is set to ensure
770                                  * buffer is not freed by HW.
771                                  */
772                                 sg_temp->bpid = 0xff;
773                                 rte_mbuf_refcnt_update(cur_seg, -1);
774                         } else {
775                                 sg_temp->bpid =
776                                         DPAA_MEMPOOL_TO_BPID(cur_seg->pool);
777                         }
778                         cur_seg = cur_seg->next;
779                 } else {
780                         /* Get owner MBUF from indirect buffer */
781                         mi = rte_mbuf_from_indirect(cur_seg);
782                         if (rte_mbuf_refcnt_read(mi) > 1) {
783                                 /*If refcnt > 1, invalid bpid is set to ensure
784                                  * owner buffer is not freed by HW.
785                                  */
786                                 sg_temp->bpid = 0xff;
787                         } else {
788                                 sg_temp->bpid = DPAA_MEMPOOL_TO_BPID(mi->pool);
789                                 rte_mbuf_refcnt_update(mi, 1);
790                         }
791                         prev_seg = cur_seg;
792                         cur_seg = cur_seg->next;
793                         prev_seg->next = NULL;
794                         rte_pktmbuf_free(prev_seg);
795                 }
796                 if (cur_seg == NULL) {
797                         sg_temp->final = 1;
798                         cpu_to_hw_sg(sg_temp);
799                         break;
800                 }
801                 cpu_to_hw_sg(sg_temp);
802         }
803         return 0;
804 }
805
806 /* Handle mbufs which are not segmented (non SG) */
807 static inline void
808 tx_on_dpaa_pool_unsegmented(struct rte_mbuf *mbuf,
809                             struct dpaa_bp_info *bp_info,
810                             struct qm_fd *fd_arr)
811 {
812         struct rte_mbuf *mi = NULL;
813
814         if (RTE_MBUF_DIRECT(mbuf)) {
815                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
816                         /* In case of direct mbuf and mbuf being cloned,
817                          * BMAN should _not_ release buffer.
818                          */
819                         DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
820                         /* Buffer should be releasd by EAL */
821                         rte_mbuf_refcnt_update(mbuf, -1);
822                 } else {
823                         /* In case of direct mbuf and no cloning, mbuf can be
824                          * released by BMAN.
825                          */
826                         DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
827                 }
828         } else {
829                 /* This is data-containing core mbuf: 'mi' */
830                 mi = rte_mbuf_from_indirect(mbuf);
831                 if (rte_mbuf_refcnt_read(mi) > 1) {
832                         /* In case of indirect mbuf, and mbuf being cloned,
833                          * BMAN should _not_ release it and let EAL release
834                          * it through pktmbuf_free below.
835                          */
836                         DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, 0xff);
837                 } else {
838                         /* In case of indirect mbuf, and no cloning, core mbuf
839                          * should be released by BMAN.
840                          * Increate refcnt of core mbuf so that when
841                          * pktmbuf_free is called and mbuf is released, EAL
842                          * doesn't try to release core mbuf which would have
843                          * been released by BMAN.
844                          */
845                         rte_mbuf_refcnt_update(mi, 1);
846                         DPAA_MBUF_TO_CONTIG_FD(mbuf, fd_arr, bp_info->bpid);
847                 }
848                 rte_pktmbuf_free(mbuf);
849         }
850
851         if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
852                 dpaa_unsegmented_checksum(mbuf, fd_arr);
853 }
854
855 /* Handle all mbufs on dpaa BMAN managed pool */
856 static inline uint16_t
857 tx_on_dpaa_pool(struct rte_mbuf *mbuf,
858                 struct dpaa_bp_info *bp_info,
859                 struct qm_fd *fd_arr)
860 {
861         DPAA_DP_LOG(DEBUG, "BMAN offloaded buffer, mbuf: %p", mbuf);
862
863         if (mbuf->nb_segs == 1) {
864                 /* Case for non-segmented buffers */
865                 tx_on_dpaa_pool_unsegmented(mbuf, bp_info, fd_arr);
866         } else if (mbuf->nb_segs > 1 &&
867                    mbuf->nb_segs <= DPAA_SGT_MAX_ENTRIES) {
868                 if (dpaa_eth_mbuf_to_sg_fd(mbuf, fd_arr, bp_info->bpid)) {
869                         DPAA_PMD_DEBUG("Unable to create Scatter Gather FD");
870                         return 1;
871                 }
872         } else {
873                 DPAA_PMD_DEBUG("Number of Segments not supported");
874                 return 1;
875         }
876
877         return 0;
878 }
879
880 /* Handle all mbufs on an external pool (non-dpaa) */
881 static inline struct rte_mbuf *
882 reallocate_mbuf(struct qman_fq *txq, struct rte_mbuf *mbuf)
883 {
884         struct dpaa_if *dpaa_intf = txq->dpaa_intf;
885         struct dpaa_bp_info *bp_info = dpaa_intf->bp_info;
886         struct rte_mbuf *new_mbufs[DPAA_SGT_MAX_ENTRIES + 1] = {0};
887         struct rte_mbuf *temp_mbuf;
888         int num_new_segs, mbuf_greater, ret, extra_seg = 0, i = 0;
889         uint64_t mbufs_size, bytes_to_copy, offset1 = 0, offset2 = 0;
890         char *data;
891
892         DPAA_DP_LOG(DEBUG, "Reallocating transmit buffer");
893
894         mbufs_size = bp_info->size -
895                 bp_info->meta_data_size - RTE_PKTMBUF_HEADROOM;
896         extra_seg = !!(mbuf->pkt_len % mbufs_size);
897         num_new_segs = (mbuf->pkt_len / mbufs_size) + extra_seg;
898
899         ret = rte_pktmbuf_alloc_bulk(bp_info->mp, new_mbufs, num_new_segs);
900         if (ret != 0) {
901                 DPAA_DP_LOG(DEBUG, "Allocation for new buffers failed");
902                 return NULL;
903         }
904
905         temp_mbuf = mbuf;
906
907         while (temp_mbuf) {
908                 /* If mbuf data is less than new mbuf remaining memory */
909                 if ((temp_mbuf->data_len - offset1) < (mbufs_size - offset2)) {
910                         bytes_to_copy = temp_mbuf->data_len - offset1;
911                         mbuf_greater = -1;
912                 /* If mbuf data is greater than new mbuf remaining memory */
913                 } else if ((temp_mbuf->data_len - offset1) >
914                            (mbufs_size - offset2)) {
915                         bytes_to_copy = mbufs_size - offset2;
916                         mbuf_greater = 1;
917                 /* if mbuf data is equal to new mbuf remaining memory */
918                 } else {
919                         bytes_to_copy = temp_mbuf->data_len - offset1;
920                         mbuf_greater = 0;
921                 }
922
923                 /* Copy the data */
924                 data = rte_pktmbuf_append(new_mbufs[0], bytes_to_copy);
925
926                 rte_memcpy((uint8_t *)data, rte_pktmbuf_mtod_offset(mbuf,
927                            void *, offset1), bytes_to_copy);
928
929                 /* Set new offsets and the temp buffers */
930                 if (mbuf_greater == -1) {
931                         offset1 = 0;
932                         offset2 += bytes_to_copy;
933                         temp_mbuf = temp_mbuf->next;
934                 } else if (mbuf_greater == 1) {
935                         offset2 = 0;
936                         offset1 += bytes_to_copy;
937                         new_mbufs[i]->next = new_mbufs[i + 1];
938                         new_mbufs[0]->nb_segs++;
939                         i++;
940                 } else {
941                         offset1 = 0;
942                         offset2 = 0;
943                         temp_mbuf = temp_mbuf->next;
944                         new_mbufs[i]->next = new_mbufs[i + 1];
945                         if (new_mbufs[i + 1])
946                                 new_mbufs[0]->nb_segs++;
947                         i++;
948                 }
949         }
950
951         /* Copy other required fields */
952         new_mbufs[0]->ol_flags = mbuf->ol_flags;
953         new_mbufs[0]->packet_type = mbuf->packet_type;
954         new_mbufs[0]->tx_offload = mbuf->tx_offload;
955
956         rte_pktmbuf_free(mbuf);
957
958         return new_mbufs[0];
959 }
960
961 uint16_t
962 dpaa_eth_queue_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
963 {
964         struct rte_mbuf *mbuf, *mi = NULL;
965         struct rte_mempool *mp;
966         struct dpaa_bp_info *bp_info;
967         struct qm_fd fd_arr[DPAA_TX_BURST_SIZE];
968         uint32_t frames_to_send, loop, sent = 0;
969         uint16_t state;
970         int ret, realloc_mbuf = 0;
971         uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
972
973         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
974                 ret = rte_dpaa_portal_init((void *)0);
975                 if (ret) {
976                         DPAA_PMD_ERR("Failure in affining portal");
977                         return 0;
978                 }
979         }
980
981         DPAA_DP_LOG(DEBUG, "Transmitting %d buffers on queue: %p", nb_bufs, q);
982
983         while (nb_bufs) {
984                 frames_to_send = (nb_bufs > DPAA_TX_BURST_SIZE) ?
985                                 DPAA_TX_BURST_SIZE : nb_bufs;
986                 for (loop = 0; loop < frames_to_send; loop++) {
987                         mbuf = *(bufs++);
988                         /* In case the data offset is not multiple of 16,
989                          * FMAN can stall because of an errata. So reallocate
990                          * the buffer in such case.
991                          */
992                         if (dpaa_svr_family == SVR_LS1043A_FAMILY &&
993                                         (mbuf->data_off & 0x7F) != 0x0)
994                                 realloc_mbuf = 1;
995                         seqn = mbuf->seqn;
996                         if (seqn != DPAA_INVALID_MBUF_SEQN) {
997                                 index = seqn - 1;
998                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
999                                         flags[loop] =
1000                                            ((index & QM_EQCR_DCA_IDXMASK) << 8);
1001                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1002                                         DPAA_PER_LCORE_DQRR_SIZE--;
1003                                         DPAA_PER_LCORE_DQRR_HELD &=
1004                                                                 ~(1 << index);
1005                                 }
1006                         }
1007
1008                         if (likely(RTE_MBUF_DIRECT(mbuf))) {
1009                                 mp = mbuf->pool;
1010                                 bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1011                                 if (likely(mp->ops_index ==
1012                                                 bp_info->dpaa_ops_index &&
1013                                         mbuf->nb_segs == 1 &&
1014                                         realloc_mbuf == 0 &&
1015                                         rte_mbuf_refcnt_read(mbuf) == 1)) {
1016                                         DPAA_MBUF_TO_CONTIG_FD(mbuf,
1017                                                 &fd_arr[loop], bp_info->bpid);
1018                                         if (mbuf->ol_flags &
1019                                                 DPAA_TX_CKSUM_OFFLOAD_MASK)
1020                                                 dpaa_unsegmented_checksum(mbuf,
1021                                                         &fd_arr[loop]);
1022                                         continue;
1023                                 }
1024                         } else {
1025                                 mi = rte_mbuf_from_indirect(mbuf);
1026                                 mp = mi->pool;
1027                         }
1028
1029                         bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
1030                         if (unlikely(mp->ops_index != bp_info->dpaa_ops_index ||
1031                                      realloc_mbuf == 1)) {
1032                                 struct rte_mbuf *temp_mbuf;
1033
1034                                 temp_mbuf = reallocate_mbuf(q, mbuf);
1035                                 if (!temp_mbuf) {
1036                                         /* Set frames_to_send & nb_bufs so
1037                                          * that packets are transmitted till
1038                                          * previous frame.
1039                                          */
1040                                         frames_to_send = loop;
1041                                         nb_bufs = loop;
1042                                         goto send_pkts;
1043                                 }
1044                                 mbuf = temp_mbuf;
1045                                 realloc_mbuf = 0;
1046                         }
1047
1048                         state = tx_on_dpaa_pool(mbuf, bp_info,
1049                                                 &fd_arr[loop]);
1050                         if (unlikely(state)) {
1051                                 /* Set frames_to_send & nb_bufs so
1052                                  * that packets are transmitted till
1053                                  * previous frame.
1054                                  */
1055                                 frames_to_send = loop;
1056                                 nb_bufs = loop;
1057                                 goto send_pkts;
1058                         }
1059                 }
1060
1061 send_pkts:
1062                 loop = 0;
1063                 while (loop < frames_to_send) {
1064                         loop += qman_enqueue_multi(q, &fd_arr[loop],
1065                                                    &flags[loop],
1066                                                    frames_to_send - loop);
1067                 }
1068                 nb_bufs -= frames_to_send;
1069                 sent += frames_to_send;
1070         }
1071
1072         DPAA_DP_LOG(DEBUG, "Transmitted %d buffers on queue: %p", sent, q);
1073
1074         return sent;
1075 }
1076
1077 uint16_t
1078 dpaa_eth_queue_tx_slow(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
1079 {
1080         qman_ern_poll_free();
1081
1082         return dpaa_eth_queue_tx(q, bufs, nb_bufs);
1083 }
1084
1085 uint16_t dpaa_eth_tx_drop_all(void *q  __rte_unused,
1086                               struct rte_mbuf **bufs __rte_unused,
1087                 uint16_t nb_bufs __rte_unused)
1088 {
1089         DPAA_DP_LOG(DEBUG, "Drop all packets");
1090
1091         /* Drop all incoming packets. No need to free packets here
1092          * because the rte_eth f/w frees up the packets through tx_buffer
1093          * callback in case this functions returns count less than nb_bufs
1094          */
1095         return 0;
1096 }