net/virtio: add virtio-user ops to set owner
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2020 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
23
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
27
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30                         struct dpaa2_annot_hdr *annotation);
31
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
33
34 static inline rte_mbuf_timestamp_t *
35 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
36 {
37         return RTE_MBUF_DYNFIELD(mbuf,
38                 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
39 }
40
41 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
42         DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
43         DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
44         DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
45         DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
46         DPAA2_SET_FD_FRC(_fd, 0);               \
47         DPAA2_RESET_FD_CTRL(_fd);               \
48         DPAA2_RESET_FD_FLC(_fd);                \
49 } while (0)
50
51 static inline void __rte_hot
52 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
53                        void *hw_annot_addr)
54 {
55         uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
56         struct dpaa2_annot_hdr *annotation =
57                         (struct dpaa2_annot_hdr *)hw_annot_addr;
58
59         m->packet_type = RTE_PTYPE_UNKNOWN;
60         switch (frc) {
61         case DPAA2_PKT_TYPE_ETHER:
62                 m->packet_type = RTE_PTYPE_L2_ETHER;
63                 break;
64         case DPAA2_PKT_TYPE_IPV4:
65                 m->packet_type = RTE_PTYPE_L2_ETHER |
66                         RTE_PTYPE_L3_IPV4;
67                 break;
68         case DPAA2_PKT_TYPE_IPV6:
69                 m->packet_type = RTE_PTYPE_L2_ETHER |
70                         RTE_PTYPE_L3_IPV6;
71                 break;
72         case DPAA2_PKT_TYPE_IPV4_EXT:
73                 m->packet_type = RTE_PTYPE_L2_ETHER |
74                         RTE_PTYPE_L3_IPV4_EXT;
75                 break;
76         case DPAA2_PKT_TYPE_IPV6_EXT:
77                 m->packet_type = RTE_PTYPE_L2_ETHER |
78                         RTE_PTYPE_L3_IPV6_EXT;
79                 break;
80         case DPAA2_PKT_TYPE_IPV4_TCP:
81                 m->packet_type = RTE_PTYPE_L2_ETHER |
82                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
83                 break;
84         case DPAA2_PKT_TYPE_IPV6_TCP:
85                 m->packet_type = RTE_PTYPE_L2_ETHER |
86                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
87                 break;
88         case DPAA2_PKT_TYPE_IPV4_UDP:
89                 m->packet_type = RTE_PTYPE_L2_ETHER |
90                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
91                 break;
92         case DPAA2_PKT_TYPE_IPV6_UDP:
93                 m->packet_type = RTE_PTYPE_L2_ETHER |
94                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
95                 break;
96         case DPAA2_PKT_TYPE_IPV4_SCTP:
97                 m->packet_type = RTE_PTYPE_L2_ETHER |
98                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
99                 break;
100         case DPAA2_PKT_TYPE_IPV6_SCTP:
101                 m->packet_type = RTE_PTYPE_L2_ETHER |
102                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
103                 break;
104         case DPAA2_PKT_TYPE_IPV4_ICMP:
105                 m->packet_type = RTE_PTYPE_L2_ETHER |
106                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
107                 break;
108         case DPAA2_PKT_TYPE_IPV6_ICMP:
109                 m->packet_type = RTE_PTYPE_L2_ETHER |
110                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
111                 break;
112         default:
113                 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
114         }
115         m->hash.rss = fd->simple.flc_hi;
116         m->ol_flags |= PKT_RX_RSS_HASH;
117
118         if (dpaa2_enable_ts[m->port]) {
119                 *dpaa2_timestamp_dynfield(m) = annotation->word2;
120                 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
121                 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
122                                 *dpaa2_timestamp_dynfield(m));
123         }
124
125         DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
126                 "ol_flags =0x%" PRIx64 "",
127                 frc, m->packet_type, m->ol_flags);
128 }
129
130 static inline uint32_t __rte_hot
131 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
132                         struct dpaa2_annot_hdr *annotation)
133 {
134         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
135         uint16_t *vlan_tci;
136
137         DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
138                         "(4)=0x%" PRIx64 "\t",
139                         annotation->word3, annotation->word4);
140
141 #if defined(RTE_LIBRTE_IEEE1588)
142         if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
143                 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
144 #endif
145
146         if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
147                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
148                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
149                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
150                 mbuf->ol_flags |= PKT_RX_VLAN;
151                 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
152         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
153                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
154                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
155                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
156                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
157                 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
158         }
159
160         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
161                 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
162                 goto parse_done;
163         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
164                 pkt_type |= RTE_PTYPE_L2_ETHER;
165         } else {
166                 goto parse_done;
167         }
168
169         if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
170                                 L2_MPLS_N_PRESENT))
171                 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
172
173         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
174                              L3_IPV4_N_PRESENT)) {
175                 pkt_type |= RTE_PTYPE_L3_IPV4;
176                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
177                         L3_IP_N_OPT_PRESENT))
178                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
179
180         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
181                   L3_IPV6_N_PRESENT)) {
182                 pkt_type |= RTE_PTYPE_L3_IPV6;
183                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
184                     L3_IP_N_OPT_PRESENT))
185                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
186         } else {
187                 goto parse_done;
188         }
189
190         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
191                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
192         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
193                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
194
195         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
196             L3_IP_1_MORE_FRAGMENT |
197             L3_IP_N_FIRST_FRAGMENT |
198             L3_IP_N_MORE_FRAGMENT)) {
199                 pkt_type |= RTE_PTYPE_L4_FRAG;
200                 goto parse_done;
201         } else {
202                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
203         }
204
205         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
206                 pkt_type |= RTE_PTYPE_L4_UDP;
207
208         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
209                 pkt_type |= RTE_PTYPE_L4_TCP;
210
211         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
212                 pkt_type |= RTE_PTYPE_L4_SCTP;
213
214         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
215                 pkt_type |= RTE_PTYPE_L4_ICMP;
216
217         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
218                 pkt_type |= RTE_PTYPE_UNKNOWN;
219
220 parse_done:
221         return pkt_type;
222 }
223
224 static inline uint32_t __rte_hot
225 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
226 {
227         struct dpaa2_annot_hdr *annotation =
228                         (struct dpaa2_annot_hdr *)hw_annot_addr;
229
230         DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
231                            annotation->word4);
232
233         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
234                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
235         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
236                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
237
238         if (dpaa2_enable_ts[mbuf->port]) {
239                 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
240                 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
241                 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
242                                 *dpaa2_timestamp_dynfield(mbuf));
243         }
244
245         /* Check detailed parsing requirement */
246         if (annotation->word3 & 0x7FFFFC3FFFF)
247                 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
248
249         /* Return some common types from parse processing */
250         switch (annotation->word4) {
251         case DPAA2_L3_IPv4:
252                 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
253         case DPAA2_L3_IPv6:
254                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
255         case DPAA2_L3_IPv4_TCP:
256                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
257                                 RTE_PTYPE_L4_TCP;
258         case DPAA2_L3_IPv4_UDP:
259                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
260                                 RTE_PTYPE_L4_UDP;
261         case DPAA2_L3_IPv6_TCP:
262                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
263                                 RTE_PTYPE_L4_TCP;
264         case DPAA2_L3_IPv6_UDP:
265                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
266                                 RTE_PTYPE_L4_UDP;
267         default:
268                 break;
269         }
270
271         return dpaa2_dev_rx_parse_slow(mbuf, annotation);
272 }
273
274 static inline struct rte_mbuf *__rte_hot
275 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
276                   int port_id)
277 {
278         struct qbman_sge *sgt, *sge;
279         size_t sg_addr, fd_addr;
280         int i = 0;
281         void *hw_annot_addr;
282         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
283
284         fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
285         hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
286
287         /* Get Scatter gather table address */
288         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
289
290         sge = &sgt[i++];
291         sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
292
293         /* First Scatter gather entry */
294         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
295                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
296         /* Prepare all the metadata for first segment */
297         first_seg->buf_addr = (uint8_t *)sg_addr;
298         first_seg->ol_flags = 0;
299         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
300         first_seg->data_len = sge->length  & 0x1FFFF;
301         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
302         first_seg->nb_segs = 1;
303         first_seg->next = NULL;
304         first_seg->port = port_id;
305         if (dpaa2_svr_family == SVR_LX2160A)
306                 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
307         else
308                 first_seg->packet_type =
309                         dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
310
311         rte_mbuf_refcnt_set(first_seg, 1);
312         cur_seg = first_seg;
313         while (!DPAA2_SG_IS_FINAL(sge)) {
314                 sge = &sgt[i++];
315                 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
316                                 DPAA2_GET_FLE_ADDR(sge));
317                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
318                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
319                 next_seg->buf_addr  = (uint8_t *)sg_addr;
320                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
321                 next_seg->data_len  = sge->length  & 0x1FFFF;
322                 first_seg->nb_segs += 1;
323                 rte_mbuf_refcnt_set(next_seg, 1);
324                 cur_seg->next = next_seg;
325                 next_seg->next = NULL;
326                 cur_seg = next_seg;
327         }
328         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
329                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
330         rte_mbuf_refcnt_set(temp, 1);
331         rte_pktmbuf_free_seg(temp);
332
333         return (void *)first_seg;
334 }
335
336 static inline struct rte_mbuf *__rte_hot
337 eth_fd_to_mbuf(const struct qbman_fd *fd,
338                int port_id)
339 {
340         void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
341         void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
342         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
343                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
344
345         /* need to repopulated some of the fields,
346          * as they may have changed in last transmission
347          */
348         mbuf->nb_segs = 1;
349         mbuf->ol_flags = 0;
350         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
351         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
352         mbuf->pkt_len = mbuf->data_len;
353         mbuf->port = port_id;
354         mbuf->next = NULL;
355         rte_mbuf_refcnt_set(mbuf, 1);
356
357         /* Parse the packet */
358         /* parse results for LX2 are there in FRC field of FD.
359          * For other DPAA2 platforms , parse results are after
360          * the private - sw annotation area
361          */
362
363         if (dpaa2_svr_family == SVR_LX2160A)
364                 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
365         else
366                 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
367
368         DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
369                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
370                 mbuf, mbuf->buf_addr, mbuf->data_off,
371                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
372                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
373                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
374
375         return mbuf;
376 }
377
378 static int __rte_noinline __rte_hot
379 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
380                   struct qbman_fd *fd, uint16_t bpid)
381 {
382         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
383         struct qbman_sge *sgt, *sge = NULL;
384         int i;
385
386         temp = rte_pktmbuf_alloc(mbuf->pool);
387         if (temp == NULL) {
388                 DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
389                 return -ENOMEM;
390         }
391
392         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
393         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
394         DPAA2_SET_ONLY_FD_BPID(fd, bpid);
395         DPAA2_SET_FD_OFFSET(fd, temp->data_off);
396         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
397         DPAA2_RESET_FD_FRC(fd);
398         DPAA2_RESET_FD_CTRL(fd);
399         /*Set Scatter gather table and Scatter gather entries*/
400         sgt = (struct qbman_sge *)(
401                         (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
402                         + DPAA2_GET_FD_OFFSET(fd));
403
404         for (i = 0; i < mbuf->nb_segs; i++) {
405                 sge = &sgt[i];
406                 /*Resetting the buffer pool id and offset field*/
407                 sge->fin_bpid_offset = 0;
408                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
409                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
410                 sge->length = cur_seg->data_len;
411                 if (RTE_MBUF_DIRECT(cur_seg)) {
412                         if (rte_mbuf_refcnt_read(cur_seg) > 1) {
413                                 /* If refcnt > 1, invalid bpid is set to ensure
414                                  * buffer is not freed by HW
415                                  */
416                                 DPAA2_SET_FLE_IVP(sge);
417                                 rte_mbuf_refcnt_update(cur_seg, -1);
418                         } else
419                                 DPAA2_SET_FLE_BPID(sge,
420                                                 mempool_to_bpid(cur_seg->pool));
421                         cur_seg = cur_seg->next;
422                 } else {
423                         /* Get owner MBUF from indirect buffer */
424                         mi = rte_mbuf_from_indirect(cur_seg);
425                         if (rte_mbuf_refcnt_read(mi) > 1) {
426                                 /* If refcnt > 1, invalid bpid is set to ensure
427                                  * owner buffer is not freed by HW
428                                  */
429                                 DPAA2_SET_FLE_IVP(sge);
430                         } else {
431                                 DPAA2_SET_FLE_BPID(sge,
432                                                    mempool_to_bpid(mi->pool));
433                                 rte_mbuf_refcnt_update(mi, 1);
434                         }
435                         prev_seg = cur_seg;
436                         cur_seg = cur_seg->next;
437                         prev_seg->next = NULL;
438                         rte_pktmbuf_free(prev_seg);
439                 }
440         }
441         DPAA2_SG_SET_FINAL(sge, true);
442         return 0;
443 }
444
445 static void
446 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
447                struct qbman_fd *fd, uint16_t bpid) __rte_unused;
448
449 static void __rte_noinline __rte_hot
450 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
451                struct qbman_fd *fd, uint16_t bpid)
452 {
453         DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
454
455         DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
456                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
457                 mbuf, mbuf->buf_addr, mbuf->data_off,
458                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
459                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
460                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
461         if (RTE_MBUF_DIRECT(mbuf)) {
462                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
463                         DPAA2_SET_FD_IVP(fd);
464                         rte_mbuf_refcnt_update(mbuf, -1);
465                 }
466         } else {
467                 struct rte_mbuf *mi;
468
469                 mi = rte_mbuf_from_indirect(mbuf);
470                 if (rte_mbuf_refcnt_read(mi) > 1)
471                         DPAA2_SET_FD_IVP(fd);
472                 else
473                         rte_mbuf_refcnt_update(mi, 1);
474                 rte_pktmbuf_free(mbuf);
475         }
476 }
477
478 static inline int __rte_hot
479 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
480                     struct qbman_fd *fd, uint16_t bpid)
481 {
482         struct rte_mbuf *m;
483         void *mb = NULL;
484
485         if (rte_dpaa2_mbuf_alloc_bulk(
486                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
487                 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
488                 return -1;
489         }
490         m = (struct rte_mbuf *)mb;
491         memcpy((char *)m->buf_addr + mbuf->data_off,
492                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
493                 mbuf->pkt_len);
494
495         /* Copy required fields */
496         m->data_off = mbuf->data_off;
497         m->ol_flags = mbuf->ol_flags;
498         m->packet_type = mbuf->packet_type;
499         m->tx_offload = mbuf->tx_offload;
500
501         DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
502
503         DPAA2_PMD_DP_DEBUG(
504                 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
505                 " meta: %d, off: %d, len: %d\n",
506                 (void *)mbuf,
507                 mbuf->buf_addr,
508                 DPAA2_GET_FD_ADDR(fd),
509                 DPAA2_GET_FD_BPID(fd),
510                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
511                 DPAA2_GET_FD_OFFSET(fd),
512                 DPAA2_GET_FD_LEN(fd));
513
514 return 0;
515 }
516
517 /* This function assumes that caller will be keep the same value for nb_pkts
518  * across calls per queue, if that is not the case, better use non-prefetch
519  * version of rx call.
520  * It will return the packets as requested in previous call without honoring
521  * the current nb_pkts or bufs space.
522  */
523 uint16_t
524 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
525 {
526         /* Function receive frames for a given device and VQ*/
527         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
528         struct qbman_result *dq_storage, *dq_storage1 = NULL;
529         uint32_t fqid = dpaa2_q->fqid;
530         int ret, num_rx = 0, pull_size;
531         uint8_t pending, status;
532         struct qbman_swp *swp;
533         const struct qbman_fd *fd;
534         struct qbman_pull_desc pulldesc;
535         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
536         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
537 #if defined(RTE_LIBRTE_IEEE1588)
538         struct dpaa2_dev_priv *priv = eth_data->dev_private;
539 #endif
540
541         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
542                 ret = dpaa2_affine_qbman_ethrx_swp();
543                 if (ret) {
544                         DPAA2_PMD_ERR("Failure in affining portal");
545                         return 0;
546                 }
547         }
548
549         if (unlikely(!rte_dpaa2_bpid_info &&
550                      rte_eal_process_type() == RTE_PROC_SECONDARY))
551                 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
552
553         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
554         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
555         if (unlikely(!q_storage->active_dqs)) {
556                 q_storage->toggle = 0;
557                 dq_storage = q_storage->dq_storage[q_storage->toggle];
558                 q_storage->last_num_pkts = pull_size;
559                 qbman_pull_desc_clear(&pulldesc);
560                 qbman_pull_desc_set_numframes(&pulldesc,
561                                               q_storage->last_num_pkts);
562                 qbman_pull_desc_set_fq(&pulldesc, fqid);
563                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
564                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
565                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
566                         while (!qbman_check_command_complete(
567                                get_swp_active_dqs(
568                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
569                                 ;
570                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
571                 }
572                 while (1) {
573                         if (qbman_swp_pull(swp, &pulldesc)) {
574                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
575                                                   " QBMAN is busy (1)\n");
576                                 /* Portal was busy, try again */
577                                 continue;
578                         }
579                         break;
580                 }
581                 q_storage->active_dqs = dq_storage;
582                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
583                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
584                                    dq_storage);
585         }
586
587         dq_storage = q_storage->active_dqs;
588         rte_prefetch0((void *)(size_t)(dq_storage));
589         rte_prefetch0((void *)(size_t)(dq_storage + 1));
590
591         /* Prepare next pull descriptor. This will give space for the
592          * prefething done on DQRR entries
593          */
594         q_storage->toggle ^= 1;
595         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
596         qbman_pull_desc_clear(&pulldesc);
597         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
598         qbman_pull_desc_set_fq(&pulldesc, fqid);
599         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
600                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
601
602         /* Check if the previous issued command is completed.
603          * Also seems like the SWP is shared between the Ethernet Driver
604          * and the SEC driver.
605          */
606         while (!qbman_check_command_complete(dq_storage))
607                 ;
608         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
609                 clear_swp_active_dqs(q_storage->active_dpio_id);
610
611         pending = 1;
612
613         do {
614                 /* Loop until the dq_storage is updated with
615                  * new token by QBMAN
616                  */
617                 while (!qbman_check_new_result(dq_storage))
618                         ;
619                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
620                 /* Check whether Last Pull command is Expired and
621                  * setting Condition for Loop termination
622                  */
623                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
624                         pending = 0;
625                         /* Check for valid frame. */
626                         status = qbman_result_DQ_flags(dq_storage);
627                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
628                                 continue;
629                 }
630                 fd = qbman_result_DQ_fd(dq_storage);
631
632 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
633                 if (dpaa2_svr_family != SVR_LX2160A) {
634                         const struct qbman_fd *next_fd =
635                                 qbman_result_DQ_fd(dq_storage + 1);
636                         /* Prefetch Annotation address for the parse results */
637                         rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
638                                 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
639                 }
640 #endif
641
642                 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
643                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
644                 else
645                         bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
646 #if defined(RTE_LIBRTE_IEEE1588)
647                 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
648 #endif
649
650                 if (eth_data->dev_conf.rxmode.offloads &
651                                 DEV_RX_OFFLOAD_VLAN_STRIP)
652                         rte_vlan_strip(bufs[num_rx]);
653
654                 dq_storage++;
655                 num_rx++;
656         } while (pending);
657
658         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
659                 while (!qbman_check_command_complete(
660                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
661                         ;
662                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
663         }
664         /* issue a volatile dequeue command for next pull */
665         while (1) {
666                 if (qbman_swp_pull(swp, &pulldesc)) {
667                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
668                                           "QBMAN is busy (2)\n");
669                         continue;
670                 }
671                 break;
672         }
673         q_storage->active_dqs = dq_storage1;
674         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
675         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
676
677         dpaa2_q->rx_pkts += num_rx;
678
679         return num_rx;
680 }
681
682 void __rte_hot
683 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
684                                  const struct qbman_fd *fd,
685                                  const struct qbman_result *dq,
686                                  struct dpaa2_queue *rxq,
687                                  struct rte_event *ev)
688 {
689         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
690                 DPAA2_FD_PTA_SIZE + 16));
691
692         ev->flow_id = rxq->ev.flow_id;
693         ev->sub_event_type = rxq->ev.sub_event_type;
694         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
695         ev->op = RTE_EVENT_OP_NEW;
696         ev->sched_type = rxq->ev.sched_type;
697         ev->queue_id = rxq->ev.queue_id;
698         ev->priority = rxq->ev.priority;
699
700         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
701
702         qbman_swp_dqrr_consume(swp, dq);
703 }
704
705 void __rte_hot
706 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
707                                const struct qbman_fd *fd,
708                                const struct qbman_result *dq,
709                                struct dpaa2_queue *rxq,
710                                struct rte_event *ev)
711 {
712         uint8_t dqrr_index;
713
714         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
715                 DPAA2_FD_PTA_SIZE + 16));
716
717         ev->flow_id = rxq->ev.flow_id;
718         ev->sub_event_type = rxq->ev.sub_event_type;
719         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
720         ev->op = RTE_EVENT_OP_NEW;
721         ev->sched_type = rxq->ev.sched_type;
722         ev->queue_id = rxq->ev.queue_id;
723         ev->priority = rxq->ev.priority;
724
725         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
726
727         dqrr_index = qbman_get_dqrr_idx(dq);
728         *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
729         DPAA2_PER_LCORE_DQRR_SIZE++;
730         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
731         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
732 }
733
734 void __rte_hot
735 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
736                                 const struct qbman_fd *fd,
737                                 const struct qbman_result *dq,
738                                 struct dpaa2_queue *rxq,
739                                 struct rte_event *ev)
740 {
741         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
742                 DPAA2_FD_PTA_SIZE + 16));
743
744         ev->flow_id = rxq->ev.flow_id;
745         ev->sub_event_type = rxq->ev.sub_event_type;
746         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
747         ev->op = RTE_EVENT_OP_NEW;
748         ev->sched_type = rxq->ev.sched_type;
749         ev->queue_id = rxq->ev.queue_id;
750         ev->priority = rxq->ev.priority;
751
752         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
753
754         *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
755         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
756         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
757
758         qbman_swp_dqrr_consume(swp, dq);
759 }
760
761 uint16_t
762 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
763 {
764         /* Function receive frames for a given device and VQ */
765         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
766         struct qbman_result *dq_storage;
767         uint32_t fqid = dpaa2_q->fqid;
768         int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
769         uint8_t pending, status;
770         struct qbman_swp *swp;
771         const struct qbman_fd *fd;
772         struct qbman_pull_desc pulldesc;
773         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
774
775         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
776                 ret = dpaa2_affine_qbman_swp();
777                 if (ret) {
778                         DPAA2_PMD_ERR(
779                                 "Failed to allocate IO portal, tid: %d\n",
780                                 rte_gettid());
781                         return 0;
782                 }
783         }
784         swp = DPAA2_PER_LCORE_PORTAL;
785
786         do {
787                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
788                 qbman_pull_desc_clear(&pulldesc);
789                 qbman_pull_desc_set_fq(&pulldesc, fqid);
790                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
791                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
792
793                 if (next_pull > dpaa2_dqrr_size) {
794                         qbman_pull_desc_set_numframes(&pulldesc,
795                                 dpaa2_dqrr_size);
796                         next_pull -= dpaa2_dqrr_size;
797                 } else {
798                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
799                         next_pull = 0;
800                 }
801
802                 while (1) {
803                         if (qbman_swp_pull(swp, &pulldesc)) {
804                                 DPAA2_PMD_DP_DEBUG(
805                                         "VDQ command is not issued.QBMAN is busy\n");
806                                 /* Portal was busy, try again */
807                                 continue;
808                         }
809                         break;
810                 }
811
812                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
813                 /* Check if the previous issued command is completed. */
814                 while (!qbman_check_command_complete(dq_storage))
815                         ;
816
817                 num_pulled = 0;
818                 pending = 1;
819                 do {
820                         /* Loop until the dq_storage is updated with
821                          * new token by QBMAN
822                          */
823                         while (!qbman_check_new_result(dq_storage))
824                                 ;
825                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
826                         /* Check whether Last Pull command is Expired and
827                          * setting Condition for Loop termination
828                          */
829                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
830                                 pending = 0;
831                                 /* Check for valid frame. */
832                                 status = qbman_result_DQ_flags(dq_storage);
833                                 if (unlikely((status &
834                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
835                                         continue;
836                         }
837                         fd = qbman_result_DQ_fd(dq_storage);
838
839 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
840                         if (dpaa2_svr_family != SVR_LX2160A) {
841                                 const struct qbman_fd *next_fd =
842                                         qbman_result_DQ_fd(dq_storage + 1);
843
844                                 /* Prefetch Annotation address for the parse
845                                  * results.
846                                  */
847                                 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
848                                         DPAA2_GET_FD_ADDR(next_fd) +
849                                         DPAA2_FD_PTA_SIZE + 16)));
850                         }
851 #endif
852
853                         if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
854                                 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
855                                                         eth_data->port_id);
856                         else
857                                 bufs[num_rx] = eth_fd_to_mbuf(fd,
858                                                         eth_data->port_id);
859
860                 if (eth_data->dev_conf.rxmode.offloads &
861                                 DEV_RX_OFFLOAD_VLAN_STRIP) {
862                         rte_vlan_strip(bufs[num_rx]);
863                 }
864
865                         dq_storage++;
866                         num_rx++;
867                         num_pulled++;
868                 } while (pending);
869         /* Last VDQ provided all packets and more packets are requested */
870         } while (next_pull && num_pulled == dpaa2_dqrr_size);
871
872         dpaa2_q->rx_pkts += num_rx;
873
874         return num_rx;
875 }
876
877 uint16_t dpaa2_dev_tx_conf(void *queue)
878 {
879         /* Function receive frames for a given device and VQ */
880         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
881         struct qbman_result *dq_storage;
882         uint32_t fqid = dpaa2_q->fqid;
883         int ret, num_tx_conf = 0, num_pulled;
884         uint8_t pending, status;
885         struct qbman_swp *swp;
886         const struct qbman_fd *fd, *next_fd;
887         struct qbman_pull_desc pulldesc;
888         struct qbman_release_desc releasedesc;
889         uint32_t bpid;
890         uint64_t buf;
891 #if defined(RTE_LIBRTE_IEEE1588)
892         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
893         struct dpaa2_dev_priv *priv = eth_data->dev_private;
894         struct dpaa2_annot_hdr *annotation;
895 #endif
896
897         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
898                 ret = dpaa2_affine_qbman_swp();
899                 if (ret) {
900                         DPAA2_PMD_ERR(
901                                 "Failed to allocate IO portal, tid: %d\n",
902                                 rte_gettid());
903                         return 0;
904                 }
905         }
906         swp = DPAA2_PER_LCORE_PORTAL;
907
908         do {
909                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
910                 qbman_pull_desc_clear(&pulldesc);
911                 qbman_pull_desc_set_fq(&pulldesc, fqid);
912                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
913                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
914
915                 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
916
917                 while (1) {
918                         if (qbman_swp_pull(swp, &pulldesc)) {
919                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
920                                                    "QBMAN is busy\n");
921                                 /* Portal was busy, try again */
922                                 continue;
923                         }
924                         break;
925                 }
926
927                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
928                 /* Check if the previous issued command is completed. */
929                 while (!qbman_check_command_complete(dq_storage))
930                         ;
931
932                 num_pulled = 0;
933                 pending = 1;
934                 do {
935                         /* Loop until the dq_storage is updated with
936                          * new token by QBMAN
937                          */
938                         while (!qbman_check_new_result(dq_storage))
939                                 ;
940                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
941                         /* Check whether Last Pull command is Expired and
942                          * setting Condition for Loop termination
943                          */
944                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
945                                 pending = 0;
946                                 /* Check for valid frame. */
947                                 status = qbman_result_DQ_flags(dq_storage);
948                                 if (unlikely((status &
949                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
950                                         continue;
951                         }
952                         fd = qbman_result_DQ_fd(dq_storage);
953
954                         next_fd = qbman_result_DQ_fd(dq_storage + 1);
955                         /* Prefetch Annotation address for the parse results */
956                         rte_prefetch0((void *)(size_t)
957                                 (DPAA2_GET_FD_ADDR(next_fd) +
958                                  DPAA2_FD_PTA_SIZE + 16));
959
960                         bpid = DPAA2_GET_FD_BPID(fd);
961
962                         /* Create a release descriptor required for releasing
963                          * buffers into QBMAN
964                          */
965                         qbman_release_desc_clear(&releasedesc);
966                         qbman_release_desc_set_bpid(&releasedesc, bpid);
967
968                         buf = DPAA2_GET_FD_ADDR(fd);
969                         /* feed them to bman */
970                         do {
971                                 ret = qbman_swp_release(swp, &releasedesc,
972                                                         &buf, 1);
973                         } while (ret == -EBUSY);
974
975                         dq_storage++;
976                         num_tx_conf++;
977                         num_pulled++;
978 #if defined(RTE_LIBRTE_IEEE1588)
979                         annotation = (struct dpaa2_annot_hdr *)((size_t)
980                                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
981                                 DPAA2_FD_PTA_SIZE);
982                         priv->tx_timestamp = annotation->word2;
983 #endif
984                 } while (pending);
985
986         /* Last VDQ provided all packets and more packets are requested */
987         } while (num_pulled == dpaa2_dqrr_size);
988
989         dpaa2_q->rx_pkts += num_tx_conf;
990
991         return num_tx_conf;
992 }
993
994 /* Configure the egress frame annotation for timestamp update */
995 static void enable_tx_tstamp(struct qbman_fd *fd)
996 {
997         struct dpaa2_faead *fd_faead;
998
999         /* Set frame annotation status field as valid */
1000         (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1001
1002         /* Set frame annotation egress action descriptor as valid */
1003         (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1004
1005         /* Set Annotation Length as 128B */
1006         (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1007
1008         /* enable update of confirmation frame annotation */
1009         fd_faead = (struct dpaa2_faead *)((size_t)
1010                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1011                         DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1012         fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1013                                 DPAA2_ANNOT_FAEAD_UPD;
1014 }
1015
1016 /*
1017  * Callback to handle sending packets through WRIOP based interface
1018  */
1019 uint16_t
1020 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1021 {
1022         /* Function to transmit the frames to given device and VQ*/
1023         uint32_t loop, retry_count;
1024         int32_t ret;
1025         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1026         struct rte_mbuf *mi;
1027         uint32_t frames_to_send;
1028         struct rte_mempool *mp;
1029         struct qbman_eq_desc eqdesc;
1030         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1031         struct qbman_swp *swp;
1032         uint16_t num_tx = 0;
1033         uint16_t bpid;
1034         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1035         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1036         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1037
1038         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1039                 ret = dpaa2_affine_qbman_swp();
1040                 if (ret) {
1041                         DPAA2_PMD_ERR(
1042                                 "Failed to allocate IO portal, tid: %d\n",
1043                                 rte_gettid());
1044                         return 0;
1045                 }
1046         }
1047         swp = DPAA2_PER_LCORE_PORTAL;
1048
1049         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1050                         eth_data, dpaa2_q->fqid);
1051
1052 #ifdef RTE_LIBRTE_IEEE1588
1053         /* IEEE1588 driver need pointer to tx confirmation queue
1054          * corresponding to last packet transmitted for reading
1055          * the timestamp
1056          */
1057         priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1058         dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1059 #endif
1060
1061         /*Prepare enqueue descriptor*/
1062         qbman_eq_desc_clear(&eqdesc);
1063         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1064         qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1065
1066         /*Clear the unused FD fields before sending*/
1067         while (nb_pkts) {
1068                 /*Check if the queue is congested*/
1069                 retry_count = 0;
1070                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1071                         retry_count++;
1072                         /* Retry for some time before giving up */
1073                         if (retry_count > CONG_RETRY_COUNT)
1074                                 goto skip_tx;
1075                 }
1076
1077                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1078                         dpaa2_eqcr_size : nb_pkts;
1079
1080                 for (loop = 0; loop < frames_to_send; loop++) {
1081                         if (*dpaa2_seqn(*bufs)) {
1082                                 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1083
1084                                 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1085                                                 dqrr_index;
1086                                 DPAA2_PER_LCORE_DQRR_SIZE--;
1087                                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1088                                 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1089                         }
1090
1091                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1092                                 mp = (*bufs)->pool;
1093                                 /* Check the basic scenario and set
1094                                  * the FD appropriately here itself.
1095                                  */
1096                                 if (likely(mp && mp->ops_index ==
1097                                     priv->bp_list->dpaa2_ops_index &&
1098                                     (*bufs)->nb_segs == 1 &&
1099                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1100                                         if (unlikely(((*bufs)->ol_flags
1101                                                 & PKT_TX_VLAN_PKT) ||
1102                                                 (eth_data->dev_conf.txmode.offloads
1103                                                 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1104                                                 ret = rte_vlan_insert(bufs);
1105                                                 if (ret)
1106                                                         goto send_n_return;
1107                                         }
1108                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1109                                         &fd_arr[loop], mempool_to_bpid(mp));
1110                                         bufs++;
1111 #ifdef RTE_LIBRTE_IEEE1588
1112                                         enable_tx_tstamp(&fd_arr[loop]);
1113 #endif
1114                                         continue;
1115                                 }
1116                         } else {
1117                                 mi = rte_mbuf_from_indirect(*bufs);
1118                                 mp = mi->pool;
1119                         }
1120                         /* Not a hw_pkt pool allocated frame */
1121                         if (unlikely(!mp || !priv->bp_list)) {
1122                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1123                                 goto send_n_return;
1124                         }
1125
1126                         if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1127                                 (eth_data->dev_conf.txmode.offloads
1128                                 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1129                                 int ret = rte_vlan_insert(bufs);
1130                                 if (ret)
1131                                         goto send_n_return;
1132                         }
1133                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1134                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1135                                 /* alloc should be from the default buffer pool
1136                                  * attached to this interface
1137                                  */
1138                                 bpid = priv->bp_list->buf_pool.bpid;
1139
1140                                 if (unlikely((*bufs)->nb_segs > 1)) {
1141                                         DPAA2_PMD_ERR("S/G support not added"
1142                                                 " for non hw offload buffer");
1143                                         goto send_n_return;
1144                                 }
1145                                 if (eth_copy_mbuf_to_fd(*bufs,
1146                                                         &fd_arr[loop], bpid)) {
1147                                         goto send_n_return;
1148                                 }
1149                                 /* free the original packet */
1150                                 rte_pktmbuf_free(*bufs);
1151                         } else {
1152                                 bpid = mempool_to_bpid(mp);
1153                                 if (unlikely((*bufs)->nb_segs > 1)) {
1154                                         if (eth_mbuf_to_sg_fd(*bufs,
1155                                                         &fd_arr[loop], bpid))
1156                                                 goto send_n_return;
1157                                 } else {
1158                                         eth_mbuf_to_fd(*bufs,
1159                                                        &fd_arr[loop], bpid);
1160                                 }
1161                         }
1162 #ifdef RTE_LIBRTE_IEEE1588
1163                         enable_tx_tstamp(&fd_arr[loop]);
1164 #endif
1165                         bufs++;
1166                 }
1167
1168                 loop = 0;
1169                 retry_count = 0;
1170                 while (loop < frames_to_send) {
1171                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1172                                         &fd_arr[loop], &flags[loop],
1173                                         frames_to_send - loop);
1174                         if (unlikely(ret < 0)) {
1175                                 retry_count++;
1176                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1177                                         num_tx += loop;
1178                                         nb_pkts -= loop;
1179                                         goto send_n_return;
1180                                 }
1181                         } else {
1182                                 loop += ret;
1183                                 retry_count = 0;
1184                         }
1185                 }
1186
1187                 num_tx += loop;
1188                 nb_pkts -= loop;
1189         }
1190         dpaa2_q->tx_pkts += num_tx;
1191         return num_tx;
1192
1193 send_n_return:
1194         /* send any already prepared fd */
1195         if (loop) {
1196                 unsigned int i = 0;
1197
1198                 retry_count = 0;
1199                 while (i < loop) {
1200                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1201                                                          &fd_arr[i],
1202                                                          &flags[i],
1203                                                          loop - i);
1204                         if (unlikely(ret < 0)) {
1205                                 retry_count++;
1206                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1207                                         break;
1208                         } else {
1209                                 i += ret;
1210                                 retry_count = 0;
1211                         }
1212                 }
1213                 num_tx += i;
1214         }
1215 skip_tx:
1216         dpaa2_q->tx_pkts += num_tx;
1217         return num_tx;
1218 }
1219
1220 void
1221 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1222 {
1223         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1224         struct qbman_fd *fd;
1225         struct rte_mbuf *m;
1226
1227         fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1228
1229         /* Setting port id does not matter as we are to free the mbuf */
1230         m = eth_fd_to_mbuf(fd, 0);
1231         rte_pktmbuf_free(m);
1232 }
1233
1234 static void
1235 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1236                              struct rte_mbuf *m,
1237                              struct qbman_eq_desc *eqdesc)
1238 {
1239         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1240         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1241         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1242         struct eqresp_metadata *eqresp_meta;
1243         uint16_t orpid, seqnum;
1244         uint8_t dq_idx;
1245
1246         qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1247
1248         if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1249                 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1250                         DPAA2_EQCR_OPRID_SHIFT;
1251                 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1252                         DPAA2_EQCR_SEQNUM_SHIFT;
1253
1254                 if (!priv->en_loose_ordered) {
1255                         qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1256                         qbman_eq_desc_set_response(eqdesc, (uint64_t)
1257                                 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1258                                 dpio_dev->eqresp_pi]), 1);
1259                         qbman_eq_desc_set_token(eqdesc, 1);
1260
1261                         eqresp_meta = &dpio_dev->eqresp_meta[
1262                                 dpio_dev->eqresp_pi];
1263                         eqresp_meta->dpaa2_q = dpaa2_q;
1264                         eqresp_meta->mp = m->pool;
1265
1266                         dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1267                                 dpio_dev->eqresp_pi++ :
1268                                 (dpio_dev->eqresp_pi = 0);
1269                 } else {
1270                         qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1271                 }
1272         } else {
1273                 dq_idx = *dpaa2_seqn(m) - 1;
1274                 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1275                 DPAA2_PER_LCORE_DQRR_SIZE--;
1276                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1277         }
1278         *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1279 }
1280
1281 /* Callback to handle sending ordered packets through WRIOP based interface */
1282 uint16_t
1283 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1284 {
1285         /* Function to transmit the frames to given device and VQ*/
1286         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1287         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1288         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1289         struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1290         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1291         struct rte_mbuf *mi;
1292         struct rte_mempool *mp;
1293         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1294         struct qbman_swp *swp;
1295         uint32_t frames_to_send, num_free_eq_desc;
1296         uint32_t loop, retry_count;
1297         int32_t ret;
1298         uint16_t num_tx = 0;
1299         uint16_t bpid;
1300
1301         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1302                 ret = dpaa2_affine_qbman_swp();
1303                 if (ret) {
1304                         DPAA2_PMD_ERR(
1305                                 "Failed to allocate IO portal, tid: %d\n",
1306                                 rte_gettid());
1307                         return 0;
1308                 }
1309         }
1310         swp = DPAA2_PER_LCORE_PORTAL;
1311
1312         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1313                            eth_data, dpaa2_q->fqid);
1314
1315         /* This would also handle normal and atomic queues as any type
1316          * of packet can be enqueued when ordered queues are being used.
1317          */
1318         while (nb_pkts) {
1319                 /*Check if the queue is congested*/
1320                 retry_count = 0;
1321                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1322                         retry_count++;
1323                         /* Retry for some time before giving up */
1324                         if (retry_count > CONG_RETRY_COUNT)
1325                                 goto skip_tx;
1326                 }
1327
1328                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1329                         dpaa2_eqcr_size : nb_pkts;
1330
1331                 if (!priv->en_loose_ordered) {
1332                         if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1333                                 num_free_eq_desc = dpaa2_free_eq_descriptors();
1334                                 if (num_free_eq_desc < frames_to_send)
1335                                         frames_to_send = num_free_eq_desc;
1336                         }
1337                 }
1338
1339                 for (loop = 0; loop < frames_to_send; loop++) {
1340                         /*Prepare enqueue descriptor*/
1341                         qbman_eq_desc_clear(&eqdesc[loop]);
1342
1343                         if (*dpaa2_seqn(*bufs)) {
1344                                 /* Use only queue 0 for Tx in case of atomic/
1345                                  * ordered packets as packets can get unordered
1346                                  * when being tranmitted out from the interface
1347                                  */
1348                                 dpaa2_set_enqueue_descriptor(order_sendq,
1349                                                              (*bufs),
1350                                                              &eqdesc[loop]);
1351                         } else {
1352                                 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1353                                                          DPAA2_EQ_RESP_ERR_FQ);
1354                                 qbman_eq_desc_set_fq(&eqdesc[loop],
1355                                                      dpaa2_q->fqid);
1356                         }
1357
1358                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1359                                 mp = (*bufs)->pool;
1360                                 /* Check the basic scenario and set
1361                                  * the FD appropriately here itself.
1362                                  */
1363                                 if (likely(mp && mp->ops_index ==
1364                                     priv->bp_list->dpaa2_ops_index &&
1365                                     (*bufs)->nb_segs == 1 &&
1366                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1367                                         if (unlikely((*bufs)->ol_flags
1368                                                 & PKT_TX_VLAN_PKT)) {
1369                                           ret = rte_vlan_insert(bufs);
1370                                           if (ret)
1371                                                 goto send_n_return;
1372                                         }
1373                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1374                                                 &fd_arr[loop],
1375                                                 mempool_to_bpid(mp));
1376                                         bufs++;
1377                                         continue;
1378                                 }
1379                         } else {
1380                                 mi = rte_mbuf_from_indirect(*bufs);
1381                                 mp = mi->pool;
1382                         }
1383                         /* Not a hw_pkt pool allocated frame */
1384                         if (unlikely(!mp || !priv->bp_list)) {
1385                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1386                                 goto send_n_return;
1387                         }
1388
1389                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1390                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1391                                 /* alloc should be from the default buffer pool
1392                                  * attached to this interface
1393                                  */
1394                                 bpid = priv->bp_list->buf_pool.bpid;
1395
1396                                 if (unlikely((*bufs)->nb_segs > 1)) {
1397                                         DPAA2_PMD_ERR(
1398                                                 "S/G not supp for non hw offload buffer");
1399                                         goto send_n_return;
1400                                 }
1401                                 if (eth_copy_mbuf_to_fd(*bufs,
1402                                                         &fd_arr[loop], bpid)) {
1403                                         goto send_n_return;
1404                                 }
1405                                 /* free the original packet */
1406                                 rte_pktmbuf_free(*bufs);
1407                         } else {
1408                                 bpid = mempool_to_bpid(mp);
1409                                 if (unlikely((*bufs)->nb_segs > 1)) {
1410                                         if (eth_mbuf_to_sg_fd(*bufs,
1411                                                               &fd_arr[loop],
1412                                                               bpid))
1413                                                 goto send_n_return;
1414                                 } else {
1415                                         eth_mbuf_to_fd(*bufs,
1416                                                        &fd_arr[loop], bpid);
1417                                 }
1418                         }
1419                         bufs++;
1420                 }
1421
1422                 loop = 0;
1423                 retry_count = 0;
1424                 while (loop < frames_to_send) {
1425                         ret = qbman_swp_enqueue_multiple_desc(swp,
1426                                         &eqdesc[loop], &fd_arr[loop],
1427                                         frames_to_send - loop);
1428                         if (unlikely(ret < 0)) {
1429                                 retry_count++;
1430                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1431                                         num_tx += loop;
1432                                         nb_pkts -= loop;
1433                                         goto send_n_return;
1434                                 }
1435                         } else {
1436                                 loop += ret;
1437                                 retry_count = 0;
1438                         }
1439                 }
1440
1441                 num_tx += loop;
1442                 nb_pkts -= loop;
1443         }
1444         dpaa2_q->tx_pkts += num_tx;
1445         return num_tx;
1446
1447 send_n_return:
1448         /* send any already prepared fd */
1449         if (loop) {
1450                 unsigned int i = 0;
1451
1452                 retry_count = 0;
1453                 while (i < loop) {
1454                         ret = qbman_swp_enqueue_multiple_desc(swp,
1455                                        &eqdesc[loop], &fd_arr[i], loop - i);
1456                         if (unlikely(ret < 0)) {
1457                                 retry_count++;
1458                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1459                                         break;
1460                         } else {
1461                                 i += ret;
1462                                 retry_count = 0;
1463                         }
1464                 }
1465                 num_tx += i;
1466         }
1467 skip_tx:
1468         dpaa2_q->tx_pkts += num_tx;
1469         return num_tx;
1470 }
1471
1472 /**
1473  * Dummy DPDK callback for TX.
1474  *
1475  * This function is used to temporarily replace the real callback during
1476  * unsafe control operations on the queue, or in case of error.
1477  *
1478  * @param dpdk_txq
1479  *   Generic pointer to TX queue structure.
1480  * @param[in] pkts
1481  *   Packets to transmit.
1482  * @param pkts_n
1483  *   Number of packets in array.
1484  *
1485  * @return
1486  *   Number of packets successfully transmitted (<= pkts_n).
1487  */
1488 uint16_t
1489 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1490 {
1491         (void)queue;
1492         (void)bufs;
1493         (void)nb_pkts;
1494         return 0;
1495 }
1496
1497 #if defined(RTE_TOOLCHAIN_GCC)
1498 #pragma GCC diagnostic push
1499 #pragma GCC diagnostic ignored "-Wcast-qual"
1500 #elif defined(RTE_TOOLCHAIN_CLANG)
1501 #pragma clang diagnostic push
1502 #pragma clang diagnostic ignored "-Wcast-qual"
1503 #endif
1504
1505 /* This function loopbacks all the received packets.*/
1506 uint16_t
1507 dpaa2_dev_loopback_rx(void *queue,
1508                       struct rte_mbuf **bufs __rte_unused,
1509                       uint16_t nb_pkts)
1510 {
1511         /* Function receive frames for a given device and VQ*/
1512         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1513         struct qbman_result *dq_storage, *dq_storage1 = NULL;
1514         uint32_t fqid = dpaa2_q->fqid;
1515         int ret, num_rx = 0, num_tx = 0, pull_size;
1516         uint8_t pending, status;
1517         struct qbman_swp *swp;
1518         struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1519         struct qbman_pull_desc pulldesc;
1520         struct qbman_eq_desc eqdesc;
1521         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1522         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1523         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1524         struct dpaa2_queue *tx_q = priv->tx_vq[0];
1525         /* todo - currently we are using 1st TX queue only for loopback*/
1526
1527         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1528                 ret = dpaa2_affine_qbman_ethrx_swp();
1529                 if (ret) {
1530                         DPAA2_PMD_ERR("Failure in affining portal");
1531                         return 0;
1532                 }
1533         }
1534         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1535         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1536         if (unlikely(!q_storage->active_dqs)) {
1537                 q_storage->toggle = 0;
1538                 dq_storage = q_storage->dq_storage[q_storage->toggle];
1539                 q_storage->last_num_pkts = pull_size;
1540                 qbman_pull_desc_clear(&pulldesc);
1541                 qbman_pull_desc_set_numframes(&pulldesc,
1542                                               q_storage->last_num_pkts);
1543                 qbman_pull_desc_set_fq(&pulldesc, fqid);
1544                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1545                         (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1546                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1547                         while (!qbman_check_command_complete(
1548                                get_swp_active_dqs(
1549                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1550                                 ;
1551                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1552                 }
1553                 while (1) {
1554                         if (qbman_swp_pull(swp, &pulldesc)) {
1555                                 DPAA2_PMD_DP_DEBUG(
1556                                         "VDQ command not issued.QBMAN busy\n");
1557                                 /* Portal was busy, try again */
1558                                 continue;
1559                         }
1560                         break;
1561                 }
1562                 q_storage->active_dqs = dq_storage;
1563                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1564                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1565                                    dq_storage);
1566         }
1567
1568         dq_storage = q_storage->active_dqs;
1569         rte_prefetch0((void *)(size_t)(dq_storage));
1570         rte_prefetch0((void *)(size_t)(dq_storage + 1));
1571
1572         /* Prepare next pull descriptor. This will give space for the
1573          * prefething done on DQRR entries
1574          */
1575         q_storage->toggle ^= 1;
1576         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1577         qbman_pull_desc_clear(&pulldesc);
1578         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1579         qbman_pull_desc_set_fq(&pulldesc, fqid);
1580         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1581                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1582
1583         /*Prepare enqueue descriptor*/
1584         qbman_eq_desc_clear(&eqdesc);
1585         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1586         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1587         qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1588
1589         /* Check if the previous issued command is completed.
1590          * Also seems like the SWP is shared between the Ethernet Driver
1591          * and the SEC driver.
1592          */
1593         while (!qbman_check_command_complete(dq_storage))
1594                 ;
1595         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1596                 clear_swp_active_dqs(q_storage->active_dpio_id);
1597
1598         pending = 1;
1599
1600         do {
1601                 /* Loop until the dq_storage is updated with
1602                  * new token by QBMAN
1603                  */
1604                 while (!qbman_check_new_result(dq_storage))
1605                         ;
1606                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1607                 /* Check whether Last Pull command is Expired and
1608                  * setting Condition for Loop termination
1609                  */
1610                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1611                         pending = 0;
1612                         /* Check for valid frame. */
1613                         status = qbman_result_DQ_flags(dq_storage);
1614                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1615                                 continue;
1616                 }
1617                 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1618
1619                 dq_storage++;
1620                 num_rx++;
1621         } while (pending);
1622
1623         while (num_tx < num_rx) {
1624                 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1625                                 &fd[num_tx], 0, num_rx - num_tx);
1626         }
1627
1628         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1629                 while (!qbman_check_command_complete(
1630                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1631                         ;
1632                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1633         }
1634         /* issue a volatile dequeue command for next pull */
1635         while (1) {
1636                 if (qbman_swp_pull(swp, &pulldesc)) {
1637                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1638                                           "QBMAN is busy (2)\n");
1639                         continue;
1640                 }
1641                 break;
1642         }
1643         q_storage->active_dqs = dq_storage1;
1644         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1645         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1646
1647         dpaa2_q->rx_pkts += num_rx;
1648         dpaa2_q->tx_pkts += num_tx;
1649
1650         return 0;
1651 }
1652 #if defined(RTE_TOOLCHAIN_GCC)
1653 #pragma GCC diagnostic pop
1654 #elif defined(RTE_TOOLCHAIN_CLANG)
1655 #pragma clang diagnostic pop
1656 #endif