7c77243b5d1a167f1063e2cc2b48f81e6bd1c587
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17 #include <rte_hexdump.h>
18
19 #include <rte_fslmc.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
28
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31                         struct dpaa2_annot_hdr *annotation);
32
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 {
38         return RTE_MBUF_DYNFIELD(mbuf,
39                 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
40 }
41
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
43         DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44         DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45         DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46         DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47         DPAA2_SET_FD_FRC(_fd, 0);               \
48         DPAA2_RESET_FD_CTRL(_fd);               \
49         DPAA2_RESET_FD_FLC(_fd);                \
50 } while (0)
51
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
54                        void *hw_annot_addr)
55 {
56         uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57         struct dpaa2_annot_hdr *annotation =
58                         (struct dpaa2_annot_hdr *)hw_annot_addr;
59
60         m->packet_type = RTE_PTYPE_UNKNOWN;
61         switch (frc) {
62         case DPAA2_PKT_TYPE_ETHER:
63                 m->packet_type = RTE_PTYPE_L2_ETHER;
64                 break;
65         case DPAA2_PKT_TYPE_IPV4:
66                 m->packet_type = RTE_PTYPE_L2_ETHER |
67                         RTE_PTYPE_L3_IPV4;
68                 break;
69         case DPAA2_PKT_TYPE_IPV6:
70                 m->packet_type = RTE_PTYPE_L2_ETHER |
71                         RTE_PTYPE_L3_IPV6;
72                 break;
73         case DPAA2_PKT_TYPE_IPV4_EXT:
74                 m->packet_type = RTE_PTYPE_L2_ETHER |
75                         RTE_PTYPE_L3_IPV4_EXT;
76                 break;
77         case DPAA2_PKT_TYPE_IPV6_EXT:
78                 m->packet_type = RTE_PTYPE_L2_ETHER |
79                         RTE_PTYPE_L3_IPV6_EXT;
80                 break;
81         case DPAA2_PKT_TYPE_IPV4_TCP:
82                 m->packet_type = RTE_PTYPE_L2_ETHER |
83                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84                 break;
85         case DPAA2_PKT_TYPE_IPV6_TCP:
86                 m->packet_type = RTE_PTYPE_L2_ETHER |
87                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88                 break;
89         case DPAA2_PKT_TYPE_IPV4_UDP:
90                 m->packet_type = RTE_PTYPE_L2_ETHER |
91                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92                 break;
93         case DPAA2_PKT_TYPE_IPV6_UDP:
94                 m->packet_type = RTE_PTYPE_L2_ETHER |
95                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96                 break;
97         case DPAA2_PKT_TYPE_IPV4_SCTP:
98                 m->packet_type = RTE_PTYPE_L2_ETHER |
99                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100                 break;
101         case DPAA2_PKT_TYPE_IPV6_SCTP:
102                 m->packet_type = RTE_PTYPE_L2_ETHER |
103                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104                 break;
105         case DPAA2_PKT_TYPE_IPV4_ICMP:
106                 m->packet_type = RTE_PTYPE_L2_ETHER |
107                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108                 break;
109         case DPAA2_PKT_TYPE_IPV6_ICMP:
110                 m->packet_type = RTE_PTYPE_L2_ETHER |
111                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
112                 break;
113         default:
114                 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115         }
116         m->hash.rss = fd->simple.flc_hi;
117         m->ol_flags |= PKT_RX_RSS_HASH;
118
119         if (dpaa2_enable_ts[m->port]) {
120                 *dpaa2_timestamp_dynfield(m) = annotation->word2;
121                 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122                 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123                                 *dpaa2_timestamp_dynfield(m));
124         }
125
126         DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127                 "ol_flags =0x%" PRIx64 "",
128                 frc, m->packet_type, m->ol_flags);
129 }
130
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133                         struct dpaa2_annot_hdr *annotation)
134 {
135         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
136         uint16_t *vlan_tci;
137
138         DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139                         "(4)=0x%" PRIx64 "\t",
140                         annotation->word3, annotation->word4);
141
142 #if defined(RTE_LIBRTE_IEEE1588)
143         if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
144                 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
145 #endif
146
147         if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
148                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
149                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
150                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
151                 mbuf->ol_flags |= PKT_RX_VLAN;
152                 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
153         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
154                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
155                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
156                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
157                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
158                 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
159         }
160
161         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
162                 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
163                 goto parse_done;
164         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
165                 pkt_type |= RTE_PTYPE_L2_ETHER;
166         } else {
167                 goto parse_done;
168         }
169
170         if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
171                                 L2_MPLS_N_PRESENT))
172                 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
173
174         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
175                              L3_IPV4_N_PRESENT)) {
176                 pkt_type |= RTE_PTYPE_L3_IPV4;
177                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
178                         L3_IP_N_OPT_PRESENT))
179                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
180
181         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
182                   L3_IPV6_N_PRESENT)) {
183                 pkt_type |= RTE_PTYPE_L3_IPV6;
184                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
185                     L3_IP_N_OPT_PRESENT))
186                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
187         } else {
188                 goto parse_done;
189         }
190
191         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
192                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
193         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
194                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
195
196         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
197             L3_IP_1_MORE_FRAGMENT |
198             L3_IP_N_FIRST_FRAGMENT |
199             L3_IP_N_MORE_FRAGMENT)) {
200                 pkt_type |= RTE_PTYPE_L4_FRAG;
201                 goto parse_done;
202         } else {
203                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
204         }
205
206         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
207                 pkt_type |= RTE_PTYPE_L4_UDP;
208
209         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
210                 pkt_type |= RTE_PTYPE_L4_TCP;
211
212         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
213                 pkt_type |= RTE_PTYPE_L4_SCTP;
214
215         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
216                 pkt_type |= RTE_PTYPE_L4_ICMP;
217
218         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
219                 pkt_type |= RTE_PTYPE_UNKNOWN;
220
221 parse_done:
222         return pkt_type;
223 }
224
225 static inline uint32_t __rte_hot
226 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
227 {
228         struct dpaa2_annot_hdr *annotation =
229                         (struct dpaa2_annot_hdr *)hw_annot_addr;
230
231         DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
232                            annotation->word4);
233
234         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
235                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
236         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
237                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
238
239         if (dpaa2_enable_ts[mbuf->port]) {
240                 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
241                 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
242                 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
243                                 *dpaa2_timestamp_dynfield(mbuf));
244         }
245
246         /* Check detailed parsing requirement */
247         if (annotation->word3 & 0x7FFFFC3FFFF)
248                 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
249
250         /* Return some common types from parse processing */
251         switch (annotation->word4) {
252         case DPAA2_L3_IPv4:
253                 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
254         case DPAA2_L3_IPv6:
255                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
256         case DPAA2_L3_IPv4_TCP:
257                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
258                                 RTE_PTYPE_L4_TCP;
259         case DPAA2_L3_IPv4_UDP:
260                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
261                                 RTE_PTYPE_L4_UDP;
262         case DPAA2_L3_IPv6_TCP:
263                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
264                                 RTE_PTYPE_L4_TCP;
265         case DPAA2_L3_IPv6_UDP:
266                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
267                                 RTE_PTYPE_L4_UDP;
268         default:
269                 break;
270         }
271
272         return dpaa2_dev_rx_parse_slow(mbuf, annotation);
273 }
274
275 static inline struct rte_mbuf *__rte_hot
276 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
277                   int port_id)
278 {
279         struct qbman_sge *sgt, *sge;
280         size_t sg_addr, fd_addr;
281         int i = 0;
282         void *hw_annot_addr;
283         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
284
285         fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
286         hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
287
288         /* Get Scatter gather table address */
289         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
290
291         sge = &sgt[i++];
292         sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
293
294         /* First Scatter gather entry */
295         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
296                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
297         /* Prepare all the metadata for first segment */
298         first_seg->buf_addr = (uint8_t *)sg_addr;
299         first_seg->ol_flags = 0;
300         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
301         first_seg->data_len = sge->length  & 0x1FFFF;
302         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
303         first_seg->nb_segs = 1;
304         first_seg->next = NULL;
305         first_seg->port = port_id;
306         if (dpaa2_svr_family == SVR_LX2160A)
307                 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
308         else
309                 first_seg->packet_type =
310                         dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
311
312         rte_mbuf_refcnt_set(first_seg, 1);
313         cur_seg = first_seg;
314         while (!DPAA2_SG_IS_FINAL(sge)) {
315                 sge = &sgt[i++];
316                 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
317                                 DPAA2_GET_FLE_ADDR(sge));
318                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
319                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
320                 next_seg->buf_addr  = (uint8_t *)sg_addr;
321                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
322                 next_seg->data_len  = sge->length  & 0x1FFFF;
323                 first_seg->nb_segs += 1;
324                 rte_mbuf_refcnt_set(next_seg, 1);
325                 cur_seg->next = next_seg;
326                 next_seg->next = NULL;
327                 cur_seg = next_seg;
328         }
329         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
330                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
331         rte_mbuf_refcnt_set(temp, 1);
332         rte_pktmbuf_free_seg(temp);
333
334         return (void *)first_seg;
335 }
336
337 static inline struct rte_mbuf *__rte_hot
338 eth_fd_to_mbuf(const struct qbman_fd *fd,
339                int port_id)
340 {
341         void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
342         void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
343         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
344                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
345
346         /* need to repopulated some of the fields,
347          * as they may have changed in last transmission
348          */
349         mbuf->nb_segs = 1;
350         mbuf->ol_flags = 0;
351         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
352         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
353         mbuf->pkt_len = mbuf->data_len;
354         mbuf->port = port_id;
355         mbuf->next = NULL;
356         rte_mbuf_refcnt_set(mbuf, 1);
357
358         /* Parse the packet */
359         /* parse results for LX2 are there in FRC field of FD.
360          * For other DPAA2 platforms , parse results are after
361          * the private - sw annotation area
362          */
363
364         if (dpaa2_svr_family == SVR_LX2160A)
365                 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
366         else
367                 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
368
369         DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
370                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
371                 mbuf, mbuf->buf_addr, mbuf->data_off,
372                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
373                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
374                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
375
376         return mbuf;
377 }
378
379 static int __rte_noinline __rte_hot
380 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
381                   struct qbman_fd *fd,
382                   struct rte_mempool *mp, uint16_t bpid)
383 {
384         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
385         struct qbman_sge *sgt, *sge = NULL;
386         int i, offset = 0;
387
388 #ifdef RTE_LIBRTE_IEEE1588
389         /* annotation area for timestamp in first buffer */
390         offset = 0x64;
391 #endif
392         if (RTE_MBUF_DIRECT(mbuf) &&
393                 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
394                 + offset))) {
395                 temp = mbuf;
396                 if (rte_mbuf_refcnt_read(temp) > 1) {
397                         /* If refcnt > 1, invalid bpid is set to ensure
398                          * buffer is not freed by HW
399                          */
400                         fd->simple.bpid_offset = 0;
401                         DPAA2_SET_FD_IVP(fd);
402                         rte_mbuf_refcnt_update(temp, -1);
403                 } else {
404                         DPAA2_SET_ONLY_FD_BPID(fd, bpid);
405                 }
406                 DPAA2_SET_FD_OFFSET(fd, offset);
407         } else {
408                 temp = rte_pktmbuf_alloc(mp);
409                 if (temp == NULL) {
410                         DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
411                         return -ENOMEM;
412                 }
413                 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
414                 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
415         }
416         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
417         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
418         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
419         DPAA2_RESET_FD_FRC(fd);
420         DPAA2_RESET_FD_CTRL(fd);
421         DPAA2_RESET_FD_FLC(fd);
422         /*Set Scatter gather table and Scatter gather entries*/
423         sgt = (struct qbman_sge *)(
424                         (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
425                         + DPAA2_GET_FD_OFFSET(fd));
426
427         for (i = 0; i < mbuf->nb_segs; i++) {
428                 sge = &sgt[i];
429                 /*Resetting the buffer pool id and offset field*/
430                 sge->fin_bpid_offset = 0;
431                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
432                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
433                 sge->length = cur_seg->data_len;
434                 if (RTE_MBUF_DIRECT(cur_seg)) {
435                         /* if we are using inline SGT in same buffers
436                          * set the FLE FMT as Frame Data Section
437                          */
438                         if (temp == cur_seg) {
439                                 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
440                                 DPAA2_SET_FLE_IVP(sge);
441                         } else {
442                                 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
443                                 /* If refcnt > 1, invalid bpid is set to ensure
444                                  * buffer is not freed by HW
445                                  */
446                                         DPAA2_SET_FLE_IVP(sge);
447                                         rte_mbuf_refcnt_update(cur_seg, -1);
448                                 } else {
449                                         DPAA2_SET_FLE_BPID(sge,
450                                                 mempool_to_bpid(cur_seg->pool));
451                                 }
452                         }
453                         cur_seg = cur_seg->next;
454                 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
455                         DPAA2_SET_FLE_IVP(sge);
456                         cur_seg = cur_seg->next;
457                 } else {
458                         /* Get owner MBUF from indirect buffer */
459                         mi = rte_mbuf_from_indirect(cur_seg);
460                         if (rte_mbuf_refcnt_read(mi) > 1) {
461                                 /* If refcnt > 1, invalid bpid is set to ensure
462                                  * owner buffer is not freed by HW
463                                  */
464                                 DPAA2_SET_FLE_IVP(sge);
465                         } else {
466                                 DPAA2_SET_FLE_BPID(sge,
467                                                    mempool_to_bpid(mi->pool));
468                                 rte_mbuf_refcnt_update(mi, 1);
469                         }
470                         prev_seg = cur_seg;
471                         cur_seg = cur_seg->next;
472                         prev_seg->next = NULL;
473                         rte_pktmbuf_free(prev_seg);
474                 }
475         }
476         DPAA2_SG_SET_FINAL(sge, true);
477         return 0;
478 }
479
480 static void
481 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
482                struct qbman_fd *fd, uint16_t bpid) __rte_unused;
483
484 static void __rte_noinline __rte_hot
485 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
486                struct qbman_fd *fd, uint16_t bpid)
487 {
488         DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
489
490         DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
491                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
492                 mbuf, mbuf->buf_addr, mbuf->data_off,
493                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
494                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
495                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
496         if (RTE_MBUF_DIRECT(mbuf)) {
497                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
498                         DPAA2_SET_FD_IVP(fd);
499                         rte_mbuf_refcnt_update(mbuf, -1);
500                 }
501         } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
502                 DPAA2_SET_FD_IVP(fd);
503         } else {
504                 struct rte_mbuf *mi;
505
506                 mi = rte_mbuf_from_indirect(mbuf);
507                 if (rte_mbuf_refcnt_read(mi) > 1)
508                         DPAA2_SET_FD_IVP(fd);
509                 else
510                         rte_mbuf_refcnt_update(mi, 1);
511                 rte_pktmbuf_free(mbuf);
512         }
513 }
514
515 static inline int __rte_hot
516 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
517                     struct qbman_fd *fd, uint16_t bpid)
518 {
519         struct rte_mbuf *m;
520         void *mb = NULL;
521
522         if (rte_dpaa2_mbuf_alloc_bulk(
523                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
524                 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
525                 return -1;
526         }
527         m = (struct rte_mbuf *)mb;
528         memcpy((char *)m->buf_addr + mbuf->data_off,
529                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
530                 mbuf->pkt_len);
531
532         /* Copy required fields */
533         m->data_off = mbuf->data_off;
534         m->ol_flags = mbuf->ol_flags;
535         m->packet_type = mbuf->packet_type;
536         m->tx_offload = mbuf->tx_offload;
537
538         DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
539
540         DPAA2_PMD_DP_DEBUG(
541                 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
542                 " meta: %d, off: %d, len: %d\n",
543                 (void *)mbuf,
544                 mbuf->buf_addr,
545                 DPAA2_GET_FD_ADDR(fd),
546                 DPAA2_GET_FD_BPID(fd),
547                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
548                 DPAA2_GET_FD_OFFSET(fd),
549                 DPAA2_GET_FD_LEN(fd));
550
551 return 0;
552 }
553
554 static void
555 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
556 {
557         /* Function receive frames for a given device and VQ */
558         struct qbman_result *dq_storage;
559         uint32_t fqid = dpaa2_q->fqid;
560         int ret, num_rx = 0, num_pulled;
561         uint8_t pending, status;
562         struct qbman_swp *swp;
563         const struct qbman_fd *fd;
564         struct qbman_pull_desc pulldesc;
565         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
566         uint32_t lcore_id = rte_lcore_id();
567         void *v_addr, *hw_annot_addr;
568         struct dpaa2_fas *fas;
569
570         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
571                 ret = dpaa2_affine_qbman_swp();
572                 if (ret) {
573                         DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
574                                 rte_gettid());
575                         return;
576                 }
577         }
578         swp = DPAA2_PER_LCORE_PORTAL;
579
580         dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
581         qbman_pull_desc_clear(&pulldesc);
582         qbman_pull_desc_set_fq(&pulldesc, fqid);
583         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
584                         (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
585         qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
586
587         while (1) {
588                 if (qbman_swp_pull(swp, &pulldesc)) {
589                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
590                         /* Portal was busy, try again */
591                         continue;
592                 }
593                 break;
594         }
595
596         /* Check if the previous issued command is completed. */
597         while (!qbman_check_command_complete(dq_storage))
598                 ;
599
600         num_pulled = 0;
601         pending = 1;
602         do {
603                 /* Loop until the dq_storage is updated with
604                  * new token by QBMAN
605                  */
606                 while (!qbman_check_new_result(dq_storage))
607                         ;
608
609                 /* Check whether Last Pull command is Expired and
610                  * setting Condition for Loop termination
611                  */
612                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
613                         pending = 0;
614                         /* Check for valid frame. */
615                         status = qbman_result_DQ_flags(dq_storage);
616                         if (unlikely((status &
617                                 QBMAN_DQ_STAT_VALIDFRAME) == 0))
618                                 continue;
619                 }
620                 fd = qbman_result_DQ_fd(dq_storage);
621                 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
622                 hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
623                 fas = hw_annot_addr;
624
625                 DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
626                         " fd_off: %d, fd_err: %x, fas_status: %x",
627                         rte_lcore_id(), eth_data->port_id,
628                         DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
629                         fas->status);
630                 rte_hexdump(stderr, "Error packet", v_addr,
631                         DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
632
633                 dq_storage++;
634                 num_rx++;
635                 num_pulled++;
636         } while (pending);
637
638         dpaa2_q->err_pkts += num_rx;
639 }
640
641 /* This function assumes that caller will be keep the same value for nb_pkts
642  * across calls per queue, if that is not the case, better use non-prefetch
643  * version of rx call.
644  * It will return the packets as requested in previous call without honoring
645  * the current nb_pkts or bufs space.
646  */
647 uint16_t
648 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
649 {
650         /* Function receive frames for a given device and VQ*/
651         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
652         struct qbman_result *dq_storage, *dq_storage1 = NULL;
653         uint32_t fqid = dpaa2_q->fqid;
654         int ret, num_rx = 0, pull_size;
655         uint8_t pending, status;
656         struct qbman_swp *swp;
657         const struct qbman_fd *fd;
658         struct qbman_pull_desc pulldesc;
659         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
660         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
661         struct dpaa2_dev_priv *priv = eth_data->dev_private;
662
663         if (unlikely(dpaa2_enable_err_queue))
664                 dump_err_pkts(priv->rx_err_vq);
665
666         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
667                 ret = dpaa2_affine_qbman_ethrx_swp();
668                 if (ret) {
669                         DPAA2_PMD_ERR("Failure in affining portal");
670                         return 0;
671                 }
672         }
673
674         if (unlikely(!rte_dpaa2_bpid_info &&
675                      rte_eal_process_type() == RTE_PROC_SECONDARY))
676                 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
677
678         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
679         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
680         if (unlikely(!q_storage->active_dqs)) {
681                 q_storage->toggle = 0;
682                 dq_storage = q_storage->dq_storage[q_storage->toggle];
683                 q_storage->last_num_pkts = pull_size;
684                 qbman_pull_desc_clear(&pulldesc);
685                 qbman_pull_desc_set_numframes(&pulldesc,
686                                               q_storage->last_num_pkts);
687                 qbman_pull_desc_set_fq(&pulldesc, fqid);
688                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
689                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
690                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
691                         while (!qbman_check_command_complete(
692                                get_swp_active_dqs(
693                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
694                                 ;
695                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
696                 }
697                 while (1) {
698                         if (qbman_swp_pull(swp, &pulldesc)) {
699                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
700                                                   " QBMAN is busy (1)\n");
701                                 /* Portal was busy, try again */
702                                 continue;
703                         }
704                         break;
705                 }
706                 q_storage->active_dqs = dq_storage;
707                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
708                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
709                                    dq_storage);
710         }
711
712         dq_storage = q_storage->active_dqs;
713         rte_prefetch0((void *)(size_t)(dq_storage));
714         rte_prefetch0((void *)(size_t)(dq_storage + 1));
715
716         /* Prepare next pull descriptor. This will give space for the
717          * prefething done on DQRR entries
718          */
719         q_storage->toggle ^= 1;
720         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
721         qbman_pull_desc_clear(&pulldesc);
722         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
723         qbman_pull_desc_set_fq(&pulldesc, fqid);
724         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
725                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
726
727         /* Check if the previous issued command is completed.
728          * Also seems like the SWP is shared between the Ethernet Driver
729          * and the SEC driver.
730          */
731         while (!qbman_check_command_complete(dq_storage))
732                 ;
733         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
734                 clear_swp_active_dqs(q_storage->active_dpio_id);
735
736         pending = 1;
737
738         do {
739                 /* Loop until the dq_storage is updated with
740                  * new token by QBMAN
741                  */
742                 while (!qbman_check_new_result(dq_storage))
743                         ;
744                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
745                 /* Check whether Last Pull command is Expired and
746                  * setting Condition for Loop termination
747                  */
748                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
749                         pending = 0;
750                         /* Check for valid frame. */
751                         status = qbman_result_DQ_flags(dq_storage);
752                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
753                                 continue;
754                 }
755                 fd = qbman_result_DQ_fd(dq_storage);
756
757 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
758                 if (dpaa2_svr_family != SVR_LX2160A) {
759                         const struct qbman_fd *next_fd =
760                                 qbman_result_DQ_fd(dq_storage + 1);
761                         /* Prefetch Annotation address for the parse results */
762                         rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
763                                 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
764                 }
765 #endif
766
767                 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
768                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
769                 else
770                         bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
771 #if defined(RTE_LIBRTE_IEEE1588)
772                 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
773 #endif
774
775                 if (eth_data->dev_conf.rxmode.offloads &
776                                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
777                         rte_vlan_strip(bufs[num_rx]);
778
779                 dq_storage++;
780                 num_rx++;
781         } while (pending);
782
783         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
784                 while (!qbman_check_command_complete(
785                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
786                         ;
787                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
788         }
789         /* issue a volatile dequeue command for next pull */
790         while (1) {
791                 if (qbman_swp_pull(swp, &pulldesc)) {
792                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
793                                           "QBMAN is busy (2)\n");
794                         continue;
795                 }
796                 break;
797         }
798         q_storage->active_dqs = dq_storage1;
799         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
800         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
801
802         dpaa2_q->rx_pkts += num_rx;
803
804         return num_rx;
805 }
806
807 void __rte_hot
808 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
809                                  const struct qbman_fd *fd,
810                                  const struct qbman_result *dq,
811                                  struct dpaa2_queue *rxq,
812                                  struct rte_event *ev)
813 {
814         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
815                 DPAA2_FD_PTA_SIZE + 16));
816
817         ev->flow_id = rxq->ev.flow_id;
818         ev->sub_event_type = rxq->ev.sub_event_type;
819         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
820         ev->op = RTE_EVENT_OP_NEW;
821         ev->sched_type = rxq->ev.sched_type;
822         ev->queue_id = rxq->ev.queue_id;
823         ev->priority = rxq->ev.priority;
824
825         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
826
827         qbman_swp_dqrr_consume(swp, dq);
828 }
829
830 void __rte_hot
831 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
832                                const struct qbman_fd *fd,
833                                const struct qbman_result *dq,
834                                struct dpaa2_queue *rxq,
835                                struct rte_event *ev)
836 {
837         uint8_t dqrr_index;
838
839         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
840                 DPAA2_FD_PTA_SIZE + 16));
841
842         ev->flow_id = rxq->ev.flow_id;
843         ev->sub_event_type = rxq->ev.sub_event_type;
844         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
845         ev->op = RTE_EVENT_OP_NEW;
846         ev->sched_type = rxq->ev.sched_type;
847         ev->queue_id = rxq->ev.queue_id;
848         ev->priority = rxq->ev.priority;
849
850         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
851
852         dqrr_index = qbman_get_dqrr_idx(dq);
853         *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
854         DPAA2_PER_LCORE_DQRR_SIZE++;
855         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
856         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
857 }
858
859 void __rte_hot
860 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
861                                 const struct qbman_fd *fd,
862                                 const struct qbman_result *dq,
863                                 struct dpaa2_queue *rxq,
864                                 struct rte_event *ev)
865 {
866         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
867                 DPAA2_FD_PTA_SIZE + 16));
868
869         ev->flow_id = rxq->ev.flow_id;
870         ev->sub_event_type = rxq->ev.sub_event_type;
871         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
872         ev->op = RTE_EVENT_OP_NEW;
873         ev->sched_type = rxq->ev.sched_type;
874         ev->queue_id = rxq->ev.queue_id;
875         ev->priority = rxq->ev.priority;
876
877         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
878
879         *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
880         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
881         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
882
883         qbman_swp_dqrr_consume(swp, dq);
884 }
885
886 uint16_t
887 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
888 {
889         /* Function receive frames for a given device and VQ */
890         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
891         struct qbman_result *dq_storage;
892         uint32_t fqid = dpaa2_q->fqid;
893         int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
894         uint8_t pending, status;
895         struct qbman_swp *swp;
896         const struct qbman_fd *fd;
897         struct qbman_pull_desc pulldesc;
898         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
899         struct dpaa2_dev_priv *priv = eth_data->dev_private;
900
901         if (unlikely(dpaa2_enable_err_queue))
902                 dump_err_pkts(priv->rx_err_vq);
903
904         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
905                 ret = dpaa2_affine_qbman_swp();
906                 if (ret) {
907                         DPAA2_PMD_ERR(
908                                 "Failed to allocate IO portal, tid: %d\n",
909                                 rte_gettid());
910                         return 0;
911                 }
912         }
913         swp = DPAA2_PER_LCORE_PORTAL;
914
915         do {
916                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
917                 qbman_pull_desc_clear(&pulldesc);
918                 qbman_pull_desc_set_fq(&pulldesc, fqid);
919                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
920                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
921
922                 if (next_pull > dpaa2_dqrr_size) {
923                         qbman_pull_desc_set_numframes(&pulldesc,
924                                 dpaa2_dqrr_size);
925                         next_pull -= dpaa2_dqrr_size;
926                 } else {
927                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
928                         next_pull = 0;
929                 }
930
931                 while (1) {
932                         if (qbman_swp_pull(swp, &pulldesc)) {
933                                 DPAA2_PMD_DP_DEBUG(
934                                         "VDQ command is not issued.QBMAN is busy\n");
935                                 /* Portal was busy, try again */
936                                 continue;
937                         }
938                         break;
939                 }
940
941                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
942                 /* Check if the previous issued command is completed. */
943                 while (!qbman_check_command_complete(dq_storage))
944                         ;
945
946                 num_pulled = 0;
947                 pending = 1;
948                 do {
949                         /* Loop until the dq_storage is updated with
950                          * new token by QBMAN
951                          */
952                         while (!qbman_check_new_result(dq_storage))
953                                 ;
954                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
955                         /* Check whether Last Pull command is Expired and
956                          * setting Condition for Loop termination
957                          */
958                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
959                                 pending = 0;
960                                 /* Check for valid frame. */
961                                 status = qbman_result_DQ_flags(dq_storage);
962                                 if (unlikely((status &
963                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
964                                         continue;
965                         }
966                         fd = qbman_result_DQ_fd(dq_storage);
967
968 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
969                         if (dpaa2_svr_family != SVR_LX2160A) {
970                                 const struct qbman_fd *next_fd =
971                                         qbman_result_DQ_fd(dq_storage + 1);
972
973                                 /* Prefetch Annotation address for the parse
974                                  * results.
975                                  */
976                                 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
977                                         DPAA2_GET_FD_ADDR(next_fd) +
978                                         DPAA2_FD_PTA_SIZE + 16)));
979                         }
980 #endif
981
982                         if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
983                                 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
984                                                         eth_data->port_id);
985                         else
986                                 bufs[num_rx] = eth_fd_to_mbuf(fd,
987                                                         eth_data->port_id);
988
989                 if (eth_data->dev_conf.rxmode.offloads &
990                                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
991                         rte_vlan_strip(bufs[num_rx]);
992                 }
993
994                         dq_storage++;
995                         num_rx++;
996                         num_pulled++;
997                 } while (pending);
998         /* Last VDQ provided all packets and more packets are requested */
999         } while (next_pull && num_pulled == dpaa2_dqrr_size);
1000
1001         dpaa2_q->rx_pkts += num_rx;
1002
1003         return num_rx;
1004 }
1005
1006 uint16_t dpaa2_dev_tx_conf(void *queue)
1007 {
1008         /* Function receive frames for a given device and VQ */
1009         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1010         struct qbman_result *dq_storage;
1011         uint32_t fqid = dpaa2_q->fqid;
1012         int ret, num_tx_conf = 0, num_pulled;
1013         uint8_t pending, status;
1014         struct qbman_swp *swp;
1015         const struct qbman_fd *fd, *next_fd;
1016         struct qbman_pull_desc pulldesc;
1017         struct qbman_release_desc releasedesc;
1018         uint32_t bpid;
1019         uint64_t buf;
1020 #if defined(RTE_LIBRTE_IEEE1588)
1021         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1022         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1023         struct dpaa2_annot_hdr *annotation;
1024 #endif
1025
1026         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1027                 ret = dpaa2_affine_qbman_swp();
1028                 if (ret) {
1029                         DPAA2_PMD_ERR(
1030                                 "Failed to allocate IO portal, tid: %d\n",
1031                                 rte_gettid());
1032                         return 0;
1033                 }
1034         }
1035         swp = DPAA2_PER_LCORE_PORTAL;
1036
1037         do {
1038                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
1039                 qbman_pull_desc_clear(&pulldesc);
1040                 qbman_pull_desc_set_fq(&pulldesc, fqid);
1041                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1042                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1043
1044                 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1045
1046                 while (1) {
1047                         if (qbman_swp_pull(swp, &pulldesc)) {
1048                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1049                                                    "QBMAN is busy\n");
1050                                 /* Portal was busy, try again */
1051                                 continue;
1052                         }
1053                         break;
1054                 }
1055
1056                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1057                 /* Check if the previous issued command is completed. */
1058                 while (!qbman_check_command_complete(dq_storage))
1059                         ;
1060
1061                 num_pulled = 0;
1062                 pending = 1;
1063                 do {
1064                         /* Loop until the dq_storage is updated with
1065                          * new token by QBMAN
1066                          */
1067                         while (!qbman_check_new_result(dq_storage))
1068                                 ;
1069                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1070                         /* Check whether Last Pull command is Expired and
1071                          * setting Condition for Loop termination
1072                          */
1073                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1074                                 pending = 0;
1075                                 /* Check for valid frame. */
1076                                 status = qbman_result_DQ_flags(dq_storage);
1077                                 if (unlikely((status &
1078                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
1079                                         continue;
1080                         }
1081                         fd = qbman_result_DQ_fd(dq_storage);
1082
1083                         next_fd = qbman_result_DQ_fd(dq_storage + 1);
1084                         /* Prefetch Annotation address for the parse results */
1085                         rte_prefetch0((void *)(size_t)
1086                                 (DPAA2_GET_FD_ADDR(next_fd) +
1087                                  DPAA2_FD_PTA_SIZE + 16));
1088
1089                         bpid = DPAA2_GET_FD_BPID(fd);
1090
1091                         /* Create a release descriptor required for releasing
1092                          * buffers into QBMAN
1093                          */
1094                         qbman_release_desc_clear(&releasedesc);
1095                         qbman_release_desc_set_bpid(&releasedesc, bpid);
1096
1097                         buf = DPAA2_GET_FD_ADDR(fd);
1098                         /* feed them to bman */
1099                         do {
1100                                 ret = qbman_swp_release(swp, &releasedesc,
1101                                                         &buf, 1);
1102                         } while (ret == -EBUSY);
1103
1104                         dq_storage++;
1105                         num_tx_conf++;
1106                         num_pulled++;
1107 #if defined(RTE_LIBRTE_IEEE1588)
1108                         annotation = (struct dpaa2_annot_hdr *)((size_t)
1109                                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1110                                 DPAA2_FD_PTA_SIZE);
1111                         priv->tx_timestamp = annotation->word2;
1112 #endif
1113                 } while (pending);
1114
1115         /* Last VDQ provided all packets and more packets are requested */
1116         } while (num_pulled == dpaa2_dqrr_size);
1117
1118         dpaa2_q->rx_pkts += num_tx_conf;
1119
1120         return num_tx_conf;
1121 }
1122
1123 /* Configure the egress frame annotation for timestamp update */
1124 static void enable_tx_tstamp(struct qbman_fd *fd)
1125 {
1126         struct dpaa2_faead *fd_faead;
1127
1128         /* Set frame annotation status field as valid */
1129         (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1130
1131         /* Set frame annotation egress action descriptor as valid */
1132         (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1133
1134         /* Set Annotation Length as 128B */
1135         (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1136
1137         /* enable update of confirmation frame annotation */
1138         fd_faead = (struct dpaa2_faead *)((size_t)
1139                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1140                         DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1141         fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1142                                 DPAA2_ANNOT_FAEAD_UPD;
1143 }
1144
1145 /*
1146  * Callback to handle sending packets through WRIOP based interface
1147  */
1148 uint16_t
1149 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1150 {
1151         /* Function to transmit the frames to given device and VQ*/
1152         uint32_t loop, retry_count;
1153         int32_t ret;
1154         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1155         struct rte_mbuf *mi;
1156         uint32_t frames_to_send;
1157         struct rte_mempool *mp;
1158         struct qbman_eq_desc eqdesc;
1159         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1160         struct qbman_swp *swp;
1161         uint16_t num_tx = 0;
1162         uint16_t bpid;
1163         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1164         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1165         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1166         struct rte_mbuf **orig_bufs = bufs;
1167
1168         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1169                 ret = dpaa2_affine_qbman_swp();
1170                 if (ret) {
1171                         DPAA2_PMD_ERR(
1172                                 "Failed to allocate IO portal, tid: %d\n",
1173                                 rte_gettid());
1174                         return 0;
1175                 }
1176         }
1177         swp = DPAA2_PER_LCORE_PORTAL;
1178
1179         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1180                         eth_data, dpaa2_q->fqid);
1181
1182 #ifdef RTE_LIBRTE_IEEE1588
1183         /* IEEE1588 driver need pointer to tx confirmation queue
1184          * corresponding to last packet transmitted for reading
1185          * the timestamp
1186          */
1187         priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1188         dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1189 #endif
1190
1191         /*Prepare enqueue descriptor*/
1192         qbman_eq_desc_clear(&eqdesc);
1193         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1194         qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1195
1196         /*Clear the unused FD fields before sending*/
1197         while (nb_pkts) {
1198                 /*Check if the queue is congested*/
1199                 retry_count = 0;
1200                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1201                         retry_count++;
1202                         /* Retry for some time before giving up */
1203                         if (retry_count > CONG_RETRY_COUNT)
1204                                 goto skip_tx;
1205                 }
1206
1207                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1208                         dpaa2_eqcr_size : nb_pkts;
1209
1210                 for (loop = 0; loop < frames_to_send; loop++) {
1211                         if (*dpaa2_seqn(*bufs)) {
1212                                 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1213
1214                                 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1215                                                 dqrr_index;
1216                                 DPAA2_PER_LCORE_DQRR_SIZE--;
1217                                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1218                                 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1219                         }
1220
1221                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1222                                 mp = (*bufs)->pool;
1223                                 /* Check the basic scenario and set
1224                                  * the FD appropriately here itself.
1225                                  */
1226                                 if (likely(mp && mp->ops_index ==
1227                                     priv->bp_list->dpaa2_ops_index &&
1228                                     (*bufs)->nb_segs == 1 &&
1229                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1230                                         if (unlikely(((*bufs)->ol_flags
1231                                                 & PKT_TX_VLAN_PKT) ||
1232                                                 (eth_data->dev_conf.txmode.offloads
1233                                                 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1234                                                 ret = rte_vlan_insert(bufs);
1235                                                 if (ret)
1236                                                         goto send_n_return;
1237                                         }
1238                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1239                                         &fd_arr[loop], mempool_to_bpid(mp));
1240                                         bufs++;
1241 #ifdef RTE_LIBRTE_IEEE1588
1242                                         enable_tx_tstamp(&fd_arr[loop]);
1243 #endif
1244                                         continue;
1245                                 }
1246                         } else {
1247                                 mi = rte_mbuf_from_indirect(*bufs);
1248                                 mp = mi->pool;
1249                         }
1250
1251                         if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1252                                 if (unlikely((*bufs)->nb_segs > 1)) {
1253                                         if (eth_mbuf_to_sg_fd(*bufs,
1254                                                               &fd_arr[loop],
1255                                                               mp, 0))
1256                                                 goto send_n_return;
1257                                 } else {
1258                                         eth_mbuf_to_fd(*bufs,
1259                                                        &fd_arr[loop], 0);
1260                                 }
1261                                 bufs++;
1262 #ifdef RTE_LIBRTE_IEEE1588
1263                                 enable_tx_tstamp(&fd_arr[loop]);
1264 #endif
1265                                 continue;
1266                         }
1267
1268                         /* Not a hw_pkt pool allocated frame */
1269                         if (unlikely(!mp || !priv->bp_list)) {
1270                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1271                                 goto send_n_return;
1272                         }
1273
1274                         if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1275                                 (eth_data->dev_conf.txmode.offloads
1276                                 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1277                                 int ret = rte_vlan_insert(bufs);
1278                                 if (ret)
1279                                         goto send_n_return;
1280                         }
1281                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1282                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1283                                 /* alloc should be from the default buffer pool
1284                                  * attached to this interface
1285                                  */
1286                                 bpid = priv->bp_list->buf_pool.bpid;
1287
1288                                 if (unlikely((*bufs)->nb_segs > 1)) {
1289                                         DPAA2_PMD_ERR("S/G support not added"
1290                                                 " for non hw offload buffer");
1291                                         goto send_n_return;
1292                                 }
1293                                 if (eth_copy_mbuf_to_fd(*bufs,
1294                                                         &fd_arr[loop], bpid)) {
1295                                         goto send_n_return;
1296                                 }
1297                                 /* free the original packet */
1298                                 rte_pktmbuf_free(*bufs);
1299                         } else {
1300                                 bpid = mempool_to_bpid(mp);
1301                                 if (unlikely((*bufs)->nb_segs > 1)) {
1302                                         if (eth_mbuf_to_sg_fd(*bufs,
1303                                                         &fd_arr[loop],
1304                                                         mp, bpid))
1305                                                 goto send_n_return;
1306                                 } else {
1307                                         eth_mbuf_to_fd(*bufs,
1308                                                        &fd_arr[loop], bpid);
1309                                 }
1310                         }
1311 #ifdef RTE_LIBRTE_IEEE1588
1312                         enable_tx_tstamp(&fd_arr[loop]);
1313 #endif
1314                         bufs++;
1315                 }
1316
1317                 loop = 0;
1318                 retry_count = 0;
1319                 while (loop < frames_to_send) {
1320                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1321                                         &fd_arr[loop], &flags[loop],
1322                                         frames_to_send - loop);
1323                         if (unlikely(ret < 0)) {
1324                                 retry_count++;
1325                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1326                                         num_tx += loop;
1327                                         nb_pkts -= loop;
1328                                         goto send_n_return;
1329                                 }
1330                         } else {
1331                                 loop += ret;
1332                                 retry_count = 0;
1333                         }
1334                 }
1335
1336                 num_tx += loop;
1337                 nb_pkts -= loop;
1338         }
1339         dpaa2_q->tx_pkts += num_tx;
1340
1341         loop = 0;
1342         while (loop < num_tx) {
1343                 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1344                         rte_pktmbuf_free(*orig_bufs);
1345                 orig_bufs++;
1346                 loop++;
1347         }
1348
1349         return num_tx;
1350
1351 send_n_return:
1352         /* send any already prepared fd */
1353         if (loop) {
1354                 unsigned int i = 0;
1355
1356                 retry_count = 0;
1357                 while (i < loop) {
1358                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1359                                                          &fd_arr[i],
1360                                                          &flags[i],
1361                                                          loop - i);
1362                         if (unlikely(ret < 0)) {
1363                                 retry_count++;
1364                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1365                                         break;
1366                         } else {
1367                                 i += ret;
1368                                 retry_count = 0;
1369                         }
1370                 }
1371                 num_tx += i;
1372         }
1373 skip_tx:
1374         dpaa2_q->tx_pkts += num_tx;
1375
1376         loop = 0;
1377         while (loop < num_tx) {
1378                 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1379                         rte_pktmbuf_free(*orig_bufs);
1380                 orig_bufs++;
1381                 loop++;
1382         }
1383
1384         return num_tx;
1385 }
1386
1387 void
1388 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1389 {
1390         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1391         struct qbman_fd *fd;
1392         struct rte_mbuf *m;
1393
1394         fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1395
1396         /* Setting port id does not matter as we are to free the mbuf */
1397         m = eth_fd_to_mbuf(fd, 0);
1398         rte_pktmbuf_free(m);
1399 }
1400
1401 static void
1402 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1403                              struct rte_mbuf *m,
1404                              struct qbman_eq_desc *eqdesc)
1405 {
1406         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1407         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1408         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1409         struct eqresp_metadata *eqresp_meta;
1410         uint16_t orpid, seqnum;
1411         uint8_t dq_idx;
1412
1413         qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1414
1415         if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1416                 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1417                         DPAA2_EQCR_OPRID_SHIFT;
1418                 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1419                         DPAA2_EQCR_SEQNUM_SHIFT;
1420
1421                 if (!priv->en_loose_ordered) {
1422                         qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1423                         qbman_eq_desc_set_response(eqdesc, (uint64_t)
1424                                 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1425                                 dpio_dev->eqresp_pi]), 1);
1426                         qbman_eq_desc_set_token(eqdesc, 1);
1427
1428                         eqresp_meta = &dpio_dev->eqresp_meta[
1429                                 dpio_dev->eqresp_pi];
1430                         eqresp_meta->dpaa2_q = dpaa2_q;
1431                         eqresp_meta->mp = m->pool;
1432
1433                         dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1434                                 dpio_dev->eqresp_pi++ :
1435                                 (dpio_dev->eqresp_pi = 0);
1436                 } else {
1437                         qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1438                 }
1439         } else {
1440                 dq_idx = *dpaa2_seqn(m) - 1;
1441                 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1442                 DPAA2_PER_LCORE_DQRR_SIZE--;
1443                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1444         }
1445         *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1446 }
1447
1448 /* Callback to handle sending ordered packets through WRIOP based interface */
1449 uint16_t
1450 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1451 {
1452         /* Function to transmit the frames to given device and VQ*/
1453         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1454         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1455         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1456         struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1457         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1458         struct rte_mbuf *mi;
1459         struct rte_mempool *mp;
1460         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1461         struct qbman_swp *swp;
1462         uint32_t frames_to_send, num_free_eq_desc;
1463         uint32_t loop, retry_count;
1464         int32_t ret;
1465         uint16_t num_tx = 0;
1466         uint16_t bpid;
1467
1468         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1469                 ret = dpaa2_affine_qbman_swp();
1470                 if (ret) {
1471                         DPAA2_PMD_ERR(
1472                                 "Failed to allocate IO portal, tid: %d\n",
1473                                 rte_gettid());
1474                         return 0;
1475                 }
1476         }
1477         swp = DPAA2_PER_LCORE_PORTAL;
1478
1479         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1480                            eth_data, dpaa2_q->fqid);
1481
1482         /* This would also handle normal and atomic queues as any type
1483          * of packet can be enqueued when ordered queues are being used.
1484          */
1485         while (nb_pkts) {
1486                 /*Check if the queue is congested*/
1487                 retry_count = 0;
1488                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1489                         retry_count++;
1490                         /* Retry for some time before giving up */
1491                         if (retry_count > CONG_RETRY_COUNT)
1492                                 goto skip_tx;
1493                 }
1494
1495                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1496                         dpaa2_eqcr_size : nb_pkts;
1497
1498                 if (!priv->en_loose_ordered) {
1499                         if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1500                                 num_free_eq_desc = dpaa2_free_eq_descriptors();
1501                                 if (num_free_eq_desc < frames_to_send)
1502                                         frames_to_send = num_free_eq_desc;
1503                         }
1504                 }
1505
1506                 for (loop = 0; loop < frames_to_send; loop++) {
1507                         /*Prepare enqueue descriptor*/
1508                         qbman_eq_desc_clear(&eqdesc[loop]);
1509
1510                         if (*dpaa2_seqn(*bufs)) {
1511                                 /* Use only queue 0 for Tx in case of atomic/
1512                                  * ordered packets as packets can get unordered
1513                                  * when being tranmitted out from the interface
1514                                  */
1515                                 dpaa2_set_enqueue_descriptor(order_sendq,
1516                                                              (*bufs),
1517                                                              &eqdesc[loop]);
1518                         } else {
1519                                 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1520                                                          DPAA2_EQ_RESP_ERR_FQ);
1521                                 qbman_eq_desc_set_fq(&eqdesc[loop],
1522                                                      dpaa2_q->fqid);
1523                         }
1524
1525                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1526                                 mp = (*bufs)->pool;
1527                                 /* Check the basic scenario and set
1528                                  * the FD appropriately here itself.
1529                                  */
1530                                 if (likely(mp && mp->ops_index ==
1531                                     priv->bp_list->dpaa2_ops_index &&
1532                                     (*bufs)->nb_segs == 1 &&
1533                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1534                                         if (unlikely((*bufs)->ol_flags
1535                                                 & PKT_TX_VLAN_PKT)) {
1536                                           ret = rte_vlan_insert(bufs);
1537                                           if (ret)
1538                                                 goto send_n_return;
1539                                         }
1540                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1541                                                 &fd_arr[loop],
1542                                                 mempool_to_bpid(mp));
1543                                         bufs++;
1544                                         continue;
1545                                 }
1546                         } else {
1547                                 mi = rte_mbuf_from_indirect(*bufs);
1548                                 mp = mi->pool;
1549                         }
1550                         /* Not a hw_pkt pool allocated frame */
1551                         if (unlikely(!mp || !priv->bp_list)) {
1552                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1553                                 goto send_n_return;
1554                         }
1555
1556                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1557                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1558                                 /* alloc should be from the default buffer pool
1559                                  * attached to this interface
1560                                  */
1561                                 bpid = priv->bp_list->buf_pool.bpid;
1562
1563                                 if (unlikely((*bufs)->nb_segs > 1)) {
1564                                         DPAA2_PMD_ERR(
1565                                                 "S/G not supp for non hw offload buffer");
1566                                         goto send_n_return;
1567                                 }
1568                                 if (eth_copy_mbuf_to_fd(*bufs,
1569                                                         &fd_arr[loop], bpid)) {
1570                                         goto send_n_return;
1571                                 }
1572                                 /* free the original packet */
1573                                 rte_pktmbuf_free(*bufs);
1574                         } else {
1575                                 bpid = mempool_to_bpid(mp);
1576                                 if (unlikely((*bufs)->nb_segs > 1)) {
1577                                         if (eth_mbuf_to_sg_fd(*bufs,
1578                                                               &fd_arr[loop],
1579                                                               mp,
1580                                                               bpid))
1581                                                 goto send_n_return;
1582                                 } else {
1583                                         eth_mbuf_to_fd(*bufs,
1584                                                        &fd_arr[loop], bpid);
1585                                 }
1586                         }
1587                         bufs++;
1588                 }
1589
1590                 loop = 0;
1591                 retry_count = 0;
1592                 while (loop < frames_to_send) {
1593                         ret = qbman_swp_enqueue_multiple_desc(swp,
1594                                         &eqdesc[loop], &fd_arr[loop],
1595                                         frames_to_send - loop);
1596                         if (unlikely(ret < 0)) {
1597                                 retry_count++;
1598                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1599                                         num_tx += loop;
1600                                         nb_pkts -= loop;
1601                                         goto send_n_return;
1602                                 }
1603                         } else {
1604                                 loop += ret;
1605                                 retry_count = 0;
1606                         }
1607                 }
1608
1609                 num_tx += loop;
1610                 nb_pkts -= loop;
1611         }
1612         dpaa2_q->tx_pkts += num_tx;
1613         return num_tx;
1614
1615 send_n_return:
1616         /* send any already prepared fd */
1617         if (loop) {
1618                 unsigned int i = 0;
1619
1620                 retry_count = 0;
1621                 while (i < loop) {
1622                         ret = qbman_swp_enqueue_multiple_desc(swp,
1623                                        &eqdesc[loop], &fd_arr[i], loop - i);
1624                         if (unlikely(ret < 0)) {
1625                                 retry_count++;
1626                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1627                                         break;
1628                         } else {
1629                                 i += ret;
1630                                 retry_count = 0;
1631                         }
1632                 }
1633                 num_tx += i;
1634         }
1635 skip_tx:
1636         dpaa2_q->tx_pkts += num_tx;
1637         return num_tx;
1638 }
1639
1640 /**
1641  * Dummy DPDK callback for TX.
1642  *
1643  * This function is used to temporarily replace the real callback during
1644  * unsafe control operations on the queue, or in case of error.
1645  *
1646  * @param dpdk_txq
1647  *   Generic pointer to TX queue structure.
1648  * @param[in] pkts
1649  *   Packets to transmit.
1650  * @param pkts_n
1651  *   Number of packets in array.
1652  *
1653  * @return
1654  *   Number of packets successfully transmitted (<= pkts_n).
1655  */
1656 uint16_t
1657 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1658 {
1659         (void)queue;
1660         (void)bufs;
1661         (void)nb_pkts;
1662         return 0;
1663 }
1664
1665 #if defined(RTE_TOOLCHAIN_GCC)
1666 #pragma GCC diagnostic push
1667 #pragma GCC diagnostic ignored "-Wcast-qual"
1668 #elif defined(RTE_TOOLCHAIN_CLANG)
1669 #pragma clang diagnostic push
1670 #pragma clang diagnostic ignored "-Wcast-qual"
1671 #endif
1672
1673 /* This function loopbacks all the received packets.*/
1674 uint16_t
1675 dpaa2_dev_loopback_rx(void *queue,
1676                       struct rte_mbuf **bufs __rte_unused,
1677                       uint16_t nb_pkts)
1678 {
1679         /* Function receive frames for a given device and VQ*/
1680         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1681         struct qbman_result *dq_storage, *dq_storage1 = NULL;
1682         uint32_t fqid = dpaa2_q->fqid;
1683         int ret, num_rx = 0, num_tx = 0, pull_size;
1684         uint8_t pending, status;
1685         struct qbman_swp *swp;
1686         struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1687         struct qbman_pull_desc pulldesc;
1688         struct qbman_eq_desc eqdesc;
1689         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1690         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1691         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1692         struct dpaa2_queue *tx_q = priv->tx_vq[0];
1693         /* todo - currently we are using 1st TX queue only for loopback*/
1694
1695         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1696                 ret = dpaa2_affine_qbman_ethrx_swp();
1697                 if (ret) {
1698                         DPAA2_PMD_ERR("Failure in affining portal");
1699                         return 0;
1700                 }
1701         }
1702         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1703         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1704         if (unlikely(!q_storage->active_dqs)) {
1705                 q_storage->toggle = 0;
1706                 dq_storage = q_storage->dq_storage[q_storage->toggle];
1707                 q_storage->last_num_pkts = pull_size;
1708                 qbman_pull_desc_clear(&pulldesc);
1709                 qbman_pull_desc_set_numframes(&pulldesc,
1710                                               q_storage->last_num_pkts);
1711                 qbman_pull_desc_set_fq(&pulldesc, fqid);
1712                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1713                         (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1714                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1715                         while (!qbman_check_command_complete(
1716                                get_swp_active_dqs(
1717                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1718                                 ;
1719                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1720                 }
1721                 while (1) {
1722                         if (qbman_swp_pull(swp, &pulldesc)) {
1723                                 DPAA2_PMD_DP_DEBUG(
1724                                         "VDQ command not issued.QBMAN busy\n");
1725                                 /* Portal was busy, try again */
1726                                 continue;
1727                         }
1728                         break;
1729                 }
1730                 q_storage->active_dqs = dq_storage;
1731                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1732                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1733                                    dq_storage);
1734         }
1735
1736         dq_storage = q_storage->active_dqs;
1737         rte_prefetch0((void *)(size_t)(dq_storage));
1738         rte_prefetch0((void *)(size_t)(dq_storage + 1));
1739
1740         /* Prepare next pull descriptor. This will give space for the
1741          * prefething done on DQRR entries
1742          */
1743         q_storage->toggle ^= 1;
1744         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1745         qbman_pull_desc_clear(&pulldesc);
1746         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1747         qbman_pull_desc_set_fq(&pulldesc, fqid);
1748         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1749                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1750
1751         /*Prepare enqueue descriptor*/
1752         qbman_eq_desc_clear(&eqdesc);
1753         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1754         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1755         qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1756
1757         /* Check if the previous issued command is completed.
1758          * Also seems like the SWP is shared between the Ethernet Driver
1759          * and the SEC driver.
1760          */
1761         while (!qbman_check_command_complete(dq_storage))
1762                 ;
1763         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1764                 clear_swp_active_dqs(q_storage->active_dpio_id);
1765
1766         pending = 1;
1767
1768         do {
1769                 /* Loop until the dq_storage is updated with
1770                  * new token by QBMAN
1771                  */
1772                 while (!qbman_check_new_result(dq_storage))
1773                         ;
1774                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1775                 /* Check whether Last Pull command is Expired and
1776                  * setting Condition for Loop termination
1777                  */
1778                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1779                         pending = 0;
1780                         /* Check for valid frame. */
1781                         status = qbman_result_DQ_flags(dq_storage);
1782                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1783                                 continue;
1784                 }
1785                 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1786
1787                 dq_storage++;
1788                 num_rx++;
1789         } while (pending);
1790
1791         while (num_tx < num_rx) {
1792                 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1793                                 &fd[num_tx], 0, num_rx - num_tx);
1794         }
1795
1796         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1797                 while (!qbman_check_command_complete(
1798                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1799                         ;
1800                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1801         }
1802         /* issue a volatile dequeue command for next pull */
1803         while (1) {
1804                 if (qbman_swp_pull(swp, &pulldesc)) {
1805                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1806                                           "QBMAN is busy (2)\n");
1807                         continue;
1808                 }
1809                 break;
1810         }
1811         q_storage->active_dqs = dq_storage1;
1812         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1813         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1814
1815         dpaa2_q->rx_pkts += num_rx;
1816         dpaa2_q->tx_pkts += num_tx;
1817
1818         return 0;
1819 }
1820 #if defined(RTE_TOOLCHAIN_GCC)
1821 #pragma GCC diagnostic pop
1822 #elif defined(RTE_TOOLCHAIN_CLANG)
1823 #pragma clang diagnostic pop
1824 #endif