net/dpaa2: support optional Tx confirmation
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17
18 #include <rte_fslmc.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
23
24 #include "dpaa2_pmd_logs.h"
25 #include "dpaa2_ethdev.h"
26 #include "base/dpaa2_hw_dpni_annot.h"
27
28 static inline uint32_t __rte_hot
29 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
30                         struct dpaa2_annot_hdr *annotation);
31
32 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
33
34 static inline rte_mbuf_timestamp_t *
35 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
36 {
37         return RTE_MBUF_DYNFIELD(mbuf,
38                 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
39 }
40
41 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
42         DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
43         DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
44         DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
45         DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
46         DPAA2_SET_FD_FRC(_fd, 0);               \
47         DPAA2_RESET_FD_CTRL(_fd);               \
48         DPAA2_RESET_FD_FLC(_fd);                \
49 } while (0)
50
51 static inline void __rte_hot
52 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
53                        void *hw_annot_addr)
54 {
55         uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
56         struct dpaa2_annot_hdr *annotation =
57                         (struct dpaa2_annot_hdr *)hw_annot_addr;
58
59         m->packet_type = RTE_PTYPE_UNKNOWN;
60         switch (frc) {
61         case DPAA2_PKT_TYPE_ETHER:
62                 m->packet_type = RTE_PTYPE_L2_ETHER;
63                 break;
64         case DPAA2_PKT_TYPE_IPV4:
65                 m->packet_type = RTE_PTYPE_L2_ETHER |
66                         RTE_PTYPE_L3_IPV4;
67                 break;
68         case DPAA2_PKT_TYPE_IPV6:
69                 m->packet_type = RTE_PTYPE_L2_ETHER |
70                         RTE_PTYPE_L3_IPV6;
71                 break;
72         case DPAA2_PKT_TYPE_IPV4_EXT:
73                 m->packet_type = RTE_PTYPE_L2_ETHER |
74                         RTE_PTYPE_L3_IPV4_EXT;
75                 break;
76         case DPAA2_PKT_TYPE_IPV6_EXT:
77                 m->packet_type = RTE_PTYPE_L2_ETHER |
78                         RTE_PTYPE_L3_IPV6_EXT;
79                 break;
80         case DPAA2_PKT_TYPE_IPV4_TCP:
81                 m->packet_type = RTE_PTYPE_L2_ETHER |
82                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
83                 break;
84         case DPAA2_PKT_TYPE_IPV6_TCP:
85                 m->packet_type = RTE_PTYPE_L2_ETHER |
86                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
87                 break;
88         case DPAA2_PKT_TYPE_IPV4_UDP:
89                 m->packet_type = RTE_PTYPE_L2_ETHER |
90                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
91                 break;
92         case DPAA2_PKT_TYPE_IPV6_UDP:
93                 m->packet_type = RTE_PTYPE_L2_ETHER |
94                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
95                 break;
96         case DPAA2_PKT_TYPE_IPV4_SCTP:
97                 m->packet_type = RTE_PTYPE_L2_ETHER |
98                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
99                 break;
100         case DPAA2_PKT_TYPE_IPV6_SCTP:
101                 m->packet_type = RTE_PTYPE_L2_ETHER |
102                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
103                 break;
104         case DPAA2_PKT_TYPE_IPV4_ICMP:
105                 m->packet_type = RTE_PTYPE_L2_ETHER |
106                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
107                 break;
108         case DPAA2_PKT_TYPE_IPV6_ICMP:
109                 m->packet_type = RTE_PTYPE_L2_ETHER |
110                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
111                 break;
112         default:
113                 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
114         }
115         m->hash.rss = fd->simple.flc_hi;
116         m->ol_flags |= PKT_RX_RSS_HASH;
117
118         if (dpaa2_enable_ts[m->port]) {
119                 *dpaa2_timestamp_dynfield(m) = annotation->word2;
120                 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
121                 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
122                                 *dpaa2_timestamp_dynfield(m));
123         }
124
125         DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
126                 "ol_flags =0x%" PRIx64 "",
127                 frc, m->packet_type, m->ol_flags);
128 }
129
130 static inline uint32_t __rte_hot
131 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
132                         struct dpaa2_annot_hdr *annotation)
133 {
134         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
135         uint16_t *vlan_tci;
136
137         DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
138                         "(4)=0x%" PRIx64 "\t",
139                         annotation->word3, annotation->word4);
140
141 #if defined(RTE_LIBRTE_IEEE1588)
142         if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
143                 mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
144 #endif
145
146         if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
147                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
148                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
149                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
150                 mbuf->ol_flags |= PKT_RX_VLAN;
151                 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
152         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
153                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
154                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
155                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
156                 mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
157                 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
158         }
159
160         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
161                 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
162                 goto parse_done;
163         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
164                 pkt_type |= RTE_PTYPE_L2_ETHER;
165         } else {
166                 goto parse_done;
167         }
168
169         if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
170                                 L2_MPLS_N_PRESENT))
171                 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
172
173         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
174                              L3_IPV4_N_PRESENT)) {
175                 pkt_type |= RTE_PTYPE_L3_IPV4;
176                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
177                         L3_IP_N_OPT_PRESENT))
178                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
179
180         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
181                   L3_IPV6_N_PRESENT)) {
182                 pkt_type |= RTE_PTYPE_L3_IPV6;
183                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
184                     L3_IP_N_OPT_PRESENT))
185                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
186         } else {
187                 goto parse_done;
188         }
189
190         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
191                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
192         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
193                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
194
195         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
196             L3_IP_1_MORE_FRAGMENT |
197             L3_IP_N_FIRST_FRAGMENT |
198             L3_IP_N_MORE_FRAGMENT)) {
199                 pkt_type |= RTE_PTYPE_L4_FRAG;
200                 goto parse_done;
201         } else {
202                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
203         }
204
205         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
206                 pkt_type |= RTE_PTYPE_L4_UDP;
207
208         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
209                 pkt_type |= RTE_PTYPE_L4_TCP;
210
211         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
212                 pkt_type |= RTE_PTYPE_L4_SCTP;
213
214         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
215                 pkt_type |= RTE_PTYPE_L4_ICMP;
216
217         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
218                 pkt_type |= RTE_PTYPE_UNKNOWN;
219
220 parse_done:
221         return pkt_type;
222 }
223
224 static inline uint32_t __rte_hot
225 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
226 {
227         struct dpaa2_annot_hdr *annotation =
228                         (struct dpaa2_annot_hdr *)hw_annot_addr;
229
230         DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
231                            annotation->word4);
232
233         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
234                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
235         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
236                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
237
238         if (dpaa2_enable_ts[mbuf->port]) {
239                 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
240                 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
241                 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
242                                 *dpaa2_timestamp_dynfield(mbuf));
243         }
244
245         /* Check detailed parsing requirement */
246         if (annotation->word3 & 0x7FFFFC3FFFF)
247                 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
248
249         /* Return some common types from parse processing */
250         switch (annotation->word4) {
251         case DPAA2_L3_IPv4:
252                 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
253         case DPAA2_L3_IPv6:
254                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
255         case DPAA2_L3_IPv4_TCP:
256                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
257                                 RTE_PTYPE_L4_TCP;
258         case DPAA2_L3_IPv4_UDP:
259                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
260                                 RTE_PTYPE_L4_UDP;
261         case DPAA2_L3_IPv6_TCP:
262                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
263                                 RTE_PTYPE_L4_TCP;
264         case DPAA2_L3_IPv6_UDP:
265                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
266                                 RTE_PTYPE_L4_UDP;
267         default:
268                 break;
269         }
270
271         return dpaa2_dev_rx_parse_slow(mbuf, annotation);
272 }
273
274 static inline struct rte_mbuf *__rte_hot
275 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
276                   int port_id)
277 {
278         struct qbman_sge *sgt, *sge;
279         size_t sg_addr, fd_addr;
280         int i = 0;
281         void *hw_annot_addr;
282         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
283
284         fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
285         hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
286
287         /* Get Scatter gather table address */
288         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
289
290         sge = &sgt[i++];
291         sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
292
293         /* First Scatter gather entry */
294         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
295                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
296         /* Prepare all the metadata for first segment */
297         first_seg->buf_addr = (uint8_t *)sg_addr;
298         first_seg->ol_flags = 0;
299         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
300         first_seg->data_len = sge->length  & 0x1FFFF;
301         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
302         first_seg->nb_segs = 1;
303         first_seg->next = NULL;
304         first_seg->port = port_id;
305         if (dpaa2_svr_family == SVR_LX2160A)
306                 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
307         else
308                 first_seg->packet_type =
309                         dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
310
311         rte_mbuf_refcnt_set(first_seg, 1);
312         cur_seg = first_seg;
313         while (!DPAA2_SG_IS_FINAL(sge)) {
314                 sge = &sgt[i++];
315                 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
316                                 DPAA2_GET_FLE_ADDR(sge));
317                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
318                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
319                 next_seg->buf_addr  = (uint8_t *)sg_addr;
320                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
321                 next_seg->data_len  = sge->length  & 0x1FFFF;
322                 first_seg->nb_segs += 1;
323                 rte_mbuf_refcnt_set(next_seg, 1);
324                 cur_seg->next = next_seg;
325                 next_seg->next = NULL;
326                 cur_seg = next_seg;
327         }
328         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
329                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
330         rte_mbuf_refcnt_set(temp, 1);
331         rte_pktmbuf_free_seg(temp);
332
333         return (void *)first_seg;
334 }
335
336 static inline struct rte_mbuf *__rte_hot
337 eth_fd_to_mbuf(const struct qbman_fd *fd,
338                int port_id)
339 {
340         void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
341         void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
342         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
343                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
344
345         /* need to repopulated some of the fields,
346          * as they may have changed in last transmission
347          */
348         mbuf->nb_segs = 1;
349         mbuf->ol_flags = 0;
350         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
351         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
352         mbuf->pkt_len = mbuf->data_len;
353         mbuf->port = port_id;
354         mbuf->next = NULL;
355         rte_mbuf_refcnt_set(mbuf, 1);
356
357         /* Parse the packet */
358         /* parse results for LX2 are there in FRC field of FD.
359          * For other DPAA2 platforms , parse results are after
360          * the private - sw annotation area
361          */
362
363         if (dpaa2_svr_family == SVR_LX2160A)
364                 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
365         else
366                 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
367
368         DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
369                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
370                 mbuf, mbuf->buf_addr, mbuf->data_off,
371                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
372                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
373                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
374
375         return mbuf;
376 }
377
378 static int __rte_noinline __rte_hot
379 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
380                   struct qbman_fd *fd,
381                   struct rte_mempool *mp, uint16_t bpid)
382 {
383         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
384         struct qbman_sge *sgt, *sge = NULL;
385         int i, offset = 0;
386
387 #ifdef RTE_LIBRTE_IEEE1588
388         /* annotation area for timestamp in first buffer */
389         offset = 0x64;
390 #endif
391         if (RTE_MBUF_DIRECT(mbuf) &&
392                 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
393                 + offset))) {
394                 temp = mbuf;
395                 if (rte_mbuf_refcnt_read(temp) > 1) {
396                         /* If refcnt > 1, invalid bpid is set to ensure
397                          * buffer is not freed by HW
398                          */
399                         fd->simple.bpid_offset = 0;
400                         DPAA2_SET_FD_IVP(fd);
401                         rte_mbuf_refcnt_update(temp, -1);
402                 } else {
403                         DPAA2_SET_ONLY_FD_BPID(fd, bpid);
404                 }
405                 DPAA2_SET_FD_OFFSET(fd, offset);
406         } else {
407                 temp = rte_pktmbuf_alloc(mp);
408                 if (temp == NULL) {
409                         DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
410                         return -ENOMEM;
411                 }
412                 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
413                 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
414         }
415         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
416         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
417         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
418         DPAA2_RESET_FD_FRC(fd);
419         DPAA2_RESET_FD_CTRL(fd);
420         DPAA2_RESET_FD_FLC(fd);
421         /*Set Scatter gather table and Scatter gather entries*/
422         sgt = (struct qbman_sge *)(
423                         (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
424                         + DPAA2_GET_FD_OFFSET(fd));
425
426         for (i = 0; i < mbuf->nb_segs; i++) {
427                 sge = &sgt[i];
428                 /*Resetting the buffer pool id and offset field*/
429                 sge->fin_bpid_offset = 0;
430                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
431                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
432                 sge->length = cur_seg->data_len;
433                 if (RTE_MBUF_DIRECT(cur_seg)) {
434                         /* if we are using inline SGT in same buffers
435                          * set the FLE FMT as Frame Data Section
436                          */
437                         if (temp == cur_seg) {
438                                 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
439                                 DPAA2_SET_FLE_IVP(sge);
440                         } else {
441                                 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
442                                 /* If refcnt > 1, invalid bpid is set to ensure
443                                  * buffer is not freed by HW
444                                  */
445                                         DPAA2_SET_FLE_IVP(sge);
446                                         rte_mbuf_refcnt_update(cur_seg, -1);
447                                 } else {
448                                         DPAA2_SET_FLE_BPID(sge,
449                                                 mempool_to_bpid(cur_seg->pool));
450                                 }
451                         }
452                         cur_seg = cur_seg->next;
453                 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
454                         DPAA2_SET_FLE_IVP(sge);
455                         cur_seg = cur_seg->next;
456                 } else {
457                         /* Get owner MBUF from indirect buffer */
458                         mi = rte_mbuf_from_indirect(cur_seg);
459                         if (rte_mbuf_refcnt_read(mi) > 1) {
460                                 /* If refcnt > 1, invalid bpid is set to ensure
461                                  * owner buffer is not freed by HW
462                                  */
463                                 DPAA2_SET_FLE_IVP(sge);
464                         } else {
465                                 DPAA2_SET_FLE_BPID(sge,
466                                                    mempool_to_bpid(mi->pool));
467                                 rte_mbuf_refcnt_update(mi, 1);
468                         }
469                         prev_seg = cur_seg;
470                         cur_seg = cur_seg->next;
471                         prev_seg->next = NULL;
472                         rte_pktmbuf_free(prev_seg);
473                 }
474         }
475         DPAA2_SG_SET_FINAL(sge, true);
476         return 0;
477 }
478
479 static void
480 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
481                struct qbman_fd *fd, uint16_t bpid) __rte_unused;
482
483 static void __rte_noinline __rte_hot
484 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
485                struct qbman_fd *fd, uint16_t bpid)
486 {
487         DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
488
489         DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
490                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
491                 mbuf, mbuf->buf_addr, mbuf->data_off,
492                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
493                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
494                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
495         if (RTE_MBUF_DIRECT(mbuf)) {
496                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
497                         DPAA2_SET_FD_IVP(fd);
498                         rte_mbuf_refcnt_update(mbuf, -1);
499                 }
500         } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
501                 DPAA2_SET_FD_IVP(fd);
502         } else {
503                 struct rte_mbuf *mi;
504
505                 mi = rte_mbuf_from_indirect(mbuf);
506                 if (rte_mbuf_refcnt_read(mi) > 1)
507                         DPAA2_SET_FD_IVP(fd);
508                 else
509                         rte_mbuf_refcnt_update(mi, 1);
510                 rte_pktmbuf_free(mbuf);
511         }
512 }
513
514 static inline int __rte_hot
515 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
516                     struct qbman_fd *fd, uint16_t bpid)
517 {
518         struct rte_mbuf *m;
519         void *mb = NULL;
520
521         if (rte_dpaa2_mbuf_alloc_bulk(
522                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
523                 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
524                 return -1;
525         }
526         m = (struct rte_mbuf *)mb;
527         memcpy((char *)m->buf_addr + mbuf->data_off,
528                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
529                 mbuf->pkt_len);
530
531         /* Copy required fields */
532         m->data_off = mbuf->data_off;
533         m->ol_flags = mbuf->ol_flags;
534         m->packet_type = mbuf->packet_type;
535         m->tx_offload = mbuf->tx_offload;
536
537         DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
538
539         DPAA2_PMD_DP_DEBUG(
540                 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
541                 " meta: %d, off: %d, len: %d\n",
542                 (void *)mbuf,
543                 mbuf->buf_addr,
544                 DPAA2_GET_FD_ADDR(fd),
545                 DPAA2_GET_FD_BPID(fd),
546                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
547                 DPAA2_GET_FD_OFFSET(fd),
548                 DPAA2_GET_FD_LEN(fd));
549
550 return 0;
551 }
552
553 /* This function assumes that caller will be keep the same value for nb_pkts
554  * across calls per queue, if that is not the case, better use non-prefetch
555  * version of rx call.
556  * It will return the packets as requested in previous call without honoring
557  * the current nb_pkts or bufs space.
558  */
559 uint16_t
560 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
561 {
562         /* Function receive frames for a given device and VQ*/
563         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
564         struct qbman_result *dq_storage, *dq_storage1 = NULL;
565         uint32_t fqid = dpaa2_q->fqid;
566         int ret, num_rx = 0, pull_size;
567         uint8_t pending, status;
568         struct qbman_swp *swp;
569         const struct qbman_fd *fd;
570         struct qbman_pull_desc pulldesc;
571         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
572         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
573 #if defined(RTE_LIBRTE_IEEE1588)
574         struct dpaa2_dev_priv *priv = eth_data->dev_private;
575 #endif
576
577         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
578                 ret = dpaa2_affine_qbman_ethrx_swp();
579                 if (ret) {
580                         DPAA2_PMD_ERR("Failure in affining portal");
581                         return 0;
582                 }
583         }
584
585         if (unlikely(!rte_dpaa2_bpid_info &&
586                      rte_eal_process_type() == RTE_PROC_SECONDARY))
587                 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
588
589         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
590         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
591         if (unlikely(!q_storage->active_dqs)) {
592                 q_storage->toggle = 0;
593                 dq_storage = q_storage->dq_storage[q_storage->toggle];
594                 q_storage->last_num_pkts = pull_size;
595                 qbman_pull_desc_clear(&pulldesc);
596                 qbman_pull_desc_set_numframes(&pulldesc,
597                                               q_storage->last_num_pkts);
598                 qbman_pull_desc_set_fq(&pulldesc, fqid);
599                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
600                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
601                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
602                         while (!qbman_check_command_complete(
603                                get_swp_active_dqs(
604                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
605                                 ;
606                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
607                 }
608                 while (1) {
609                         if (qbman_swp_pull(swp, &pulldesc)) {
610                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
611                                                   " QBMAN is busy (1)\n");
612                                 /* Portal was busy, try again */
613                                 continue;
614                         }
615                         break;
616                 }
617                 q_storage->active_dqs = dq_storage;
618                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
619                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
620                                    dq_storage);
621         }
622
623         dq_storage = q_storage->active_dqs;
624         rte_prefetch0((void *)(size_t)(dq_storage));
625         rte_prefetch0((void *)(size_t)(dq_storage + 1));
626
627         /* Prepare next pull descriptor. This will give space for the
628          * prefething done on DQRR entries
629          */
630         q_storage->toggle ^= 1;
631         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
632         qbman_pull_desc_clear(&pulldesc);
633         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
634         qbman_pull_desc_set_fq(&pulldesc, fqid);
635         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
636                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
637
638         /* Check if the previous issued command is completed.
639          * Also seems like the SWP is shared between the Ethernet Driver
640          * and the SEC driver.
641          */
642         while (!qbman_check_command_complete(dq_storage))
643                 ;
644         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
645                 clear_swp_active_dqs(q_storage->active_dpio_id);
646
647         pending = 1;
648
649         do {
650                 /* Loop until the dq_storage is updated with
651                  * new token by QBMAN
652                  */
653                 while (!qbman_check_new_result(dq_storage))
654                         ;
655                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
656                 /* Check whether Last Pull command is Expired and
657                  * setting Condition for Loop termination
658                  */
659                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
660                         pending = 0;
661                         /* Check for valid frame. */
662                         status = qbman_result_DQ_flags(dq_storage);
663                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
664                                 continue;
665                 }
666                 fd = qbman_result_DQ_fd(dq_storage);
667
668 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
669                 if (dpaa2_svr_family != SVR_LX2160A) {
670                         const struct qbman_fd *next_fd =
671                                 qbman_result_DQ_fd(dq_storage + 1);
672                         /* Prefetch Annotation address for the parse results */
673                         rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
674                                 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
675                 }
676 #endif
677
678                 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
679                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
680                 else
681                         bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
682 #if defined(RTE_LIBRTE_IEEE1588)
683                 priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
684 #endif
685
686                 if (eth_data->dev_conf.rxmode.offloads &
687                                 DEV_RX_OFFLOAD_VLAN_STRIP)
688                         rte_vlan_strip(bufs[num_rx]);
689
690                 dq_storage++;
691                 num_rx++;
692         } while (pending);
693
694         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
695                 while (!qbman_check_command_complete(
696                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
697                         ;
698                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
699         }
700         /* issue a volatile dequeue command for next pull */
701         while (1) {
702                 if (qbman_swp_pull(swp, &pulldesc)) {
703                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
704                                           "QBMAN is busy (2)\n");
705                         continue;
706                 }
707                 break;
708         }
709         q_storage->active_dqs = dq_storage1;
710         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
711         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
712
713         dpaa2_q->rx_pkts += num_rx;
714
715         return num_rx;
716 }
717
718 void __rte_hot
719 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
720                                  const struct qbman_fd *fd,
721                                  const struct qbman_result *dq,
722                                  struct dpaa2_queue *rxq,
723                                  struct rte_event *ev)
724 {
725         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
726                 DPAA2_FD_PTA_SIZE + 16));
727
728         ev->flow_id = rxq->ev.flow_id;
729         ev->sub_event_type = rxq->ev.sub_event_type;
730         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
731         ev->op = RTE_EVENT_OP_NEW;
732         ev->sched_type = rxq->ev.sched_type;
733         ev->queue_id = rxq->ev.queue_id;
734         ev->priority = rxq->ev.priority;
735
736         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
737
738         qbman_swp_dqrr_consume(swp, dq);
739 }
740
741 void __rte_hot
742 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
743                                const struct qbman_fd *fd,
744                                const struct qbman_result *dq,
745                                struct dpaa2_queue *rxq,
746                                struct rte_event *ev)
747 {
748         uint8_t dqrr_index;
749
750         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
751                 DPAA2_FD_PTA_SIZE + 16));
752
753         ev->flow_id = rxq->ev.flow_id;
754         ev->sub_event_type = rxq->ev.sub_event_type;
755         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
756         ev->op = RTE_EVENT_OP_NEW;
757         ev->sched_type = rxq->ev.sched_type;
758         ev->queue_id = rxq->ev.queue_id;
759         ev->priority = rxq->ev.priority;
760
761         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
762
763         dqrr_index = qbman_get_dqrr_idx(dq);
764         *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
765         DPAA2_PER_LCORE_DQRR_SIZE++;
766         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
767         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
768 }
769
770 void __rte_hot
771 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
772                                 const struct qbman_fd *fd,
773                                 const struct qbman_result *dq,
774                                 struct dpaa2_queue *rxq,
775                                 struct rte_event *ev)
776 {
777         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
778                 DPAA2_FD_PTA_SIZE + 16));
779
780         ev->flow_id = rxq->ev.flow_id;
781         ev->sub_event_type = rxq->ev.sub_event_type;
782         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
783         ev->op = RTE_EVENT_OP_NEW;
784         ev->sched_type = rxq->ev.sched_type;
785         ev->queue_id = rxq->ev.queue_id;
786         ev->priority = rxq->ev.priority;
787
788         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
789
790         *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
791         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
792         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
793
794         qbman_swp_dqrr_consume(swp, dq);
795 }
796
797 uint16_t
798 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
799 {
800         /* Function receive frames for a given device and VQ */
801         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
802         struct qbman_result *dq_storage;
803         uint32_t fqid = dpaa2_q->fqid;
804         int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
805         uint8_t pending, status;
806         struct qbman_swp *swp;
807         const struct qbman_fd *fd;
808         struct qbman_pull_desc pulldesc;
809         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
810
811         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
812                 ret = dpaa2_affine_qbman_swp();
813                 if (ret) {
814                         DPAA2_PMD_ERR(
815                                 "Failed to allocate IO portal, tid: %d\n",
816                                 rte_gettid());
817                         return 0;
818                 }
819         }
820         swp = DPAA2_PER_LCORE_PORTAL;
821
822         do {
823                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
824                 qbman_pull_desc_clear(&pulldesc);
825                 qbman_pull_desc_set_fq(&pulldesc, fqid);
826                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
827                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
828
829                 if (next_pull > dpaa2_dqrr_size) {
830                         qbman_pull_desc_set_numframes(&pulldesc,
831                                 dpaa2_dqrr_size);
832                         next_pull -= dpaa2_dqrr_size;
833                 } else {
834                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
835                         next_pull = 0;
836                 }
837
838                 while (1) {
839                         if (qbman_swp_pull(swp, &pulldesc)) {
840                                 DPAA2_PMD_DP_DEBUG(
841                                         "VDQ command is not issued.QBMAN is busy\n");
842                                 /* Portal was busy, try again */
843                                 continue;
844                         }
845                         break;
846                 }
847
848                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
849                 /* Check if the previous issued command is completed. */
850                 while (!qbman_check_command_complete(dq_storage))
851                         ;
852
853                 num_pulled = 0;
854                 pending = 1;
855                 do {
856                         /* Loop until the dq_storage is updated with
857                          * new token by QBMAN
858                          */
859                         while (!qbman_check_new_result(dq_storage))
860                                 ;
861                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
862                         /* Check whether Last Pull command is Expired and
863                          * setting Condition for Loop termination
864                          */
865                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
866                                 pending = 0;
867                                 /* Check for valid frame. */
868                                 status = qbman_result_DQ_flags(dq_storage);
869                                 if (unlikely((status &
870                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
871                                         continue;
872                         }
873                         fd = qbman_result_DQ_fd(dq_storage);
874
875 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
876                         if (dpaa2_svr_family != SVR_LX2160A) {
877                                 const struct qbman_fd *next_fd =
878                                         qbman_result_DQ_fd(dq_storage + 1);
879
880                                 /* Prefetch Annotation address for the parse
881                                  * results.
882                                  */
883                                 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
884                                         DPAA2_GET_FD_ADDR(next_fd) +
885                                         DPAA2_FD_PTA_SIZE + 16)));
886                         }
887 #endif
888
889                         if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
890                                 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
891                                                         eth_data->port_id);
892                         else
893                                 bufs[num_rx] = eth_fd_to_mbuf(fd,
894                                                         eth_data->port_id);
895
896                 if (eth_data->dev_conf.rxmode.offloads &
897                                 DEV_RX_OFFLOAD_VLAN_STRIP) {
898                         rte_vlan_strip(bufs[num_rx]);
899                 }
900
901                         dq_storage++;
902                         num_rx++;
903                         num_pulled++;
904                 } while (pending);
905         /* Last VDQ provided all packets and more packets are requested */
906         } while (next_pull && num_pulled == dpaa2_dqrr_size);
907
908         dpaa2_q->rx_pkts += num_rx;
909
910         return num_rx;
911 }
912
913 uint16_t dpaa2_dev_tx_conf(void *queue)
914 {
915         /* Function receive frames for a given device and VQ */
916         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
917         struct qbman_result *dq_storage;
918         uint32_t fqid = dpaa2_q->fqid;
919         int ret, num_tx_conf = 0, num_pulled;
920         uint8_t pending, status;
921         struct qbman_swp *swp;
922         const struct qbman_fd *fd, *next_fd;
923         struct qbman_pull_desc pulldesc;
924         struct qbman_release_desc releasedesc;
925         uint32_t bpid;
926         uint64_t buf;
927 #if defined(RTE_LIBRTE_IEEE1588)
928         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
929         struct dpaa2_dev_priv *priv = eth_data->dev_private;
930         struct dpaa2_annot_hdr *annotation;
931 #endif
932
933         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
934                 ret = dpaa2_affine_qbman_swp();
935                 if (ret) {
936                         DPAA2_PMD_ERR(
937                                 "Failed to allocate IO portal, tid: %d\n",
938                                 rte_gettid());
939                         return 0;
940                 }
941         }
942         swp = DPAA2_PER_LCORE_PORTAL;
943
944         do {
945                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
946                 qbman_pull_desc_clear(&pulldesc);
947                 qbman_pull_desc_set_fq(&pulldesc, fqid);
948                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
949                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
950
951                 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
952
953                 while (1) {
954                         if (qbman_swp_pull(swp, &pulldesc)) {
955                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
956                                                    "QBMAN is busy\n");
957                                 /* Portal was busy, try again */
958                                 continue;
959                         }
960                         break;
961                 }
962
963                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
964                 /* Check if the previous issued command is completed. */
965                 while (!qbman_check_command_complete(dq_storage))
966                         ;
967
968                 num_pulled = 0;
969                 pending = 1;
970                 do {
971                         /* Loop until the dq_storage is updated with
972                          * new token by QBMAN
973                          */
974                         while (!qbman_check_new_result(dq_storage))
975                                 ;
976                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
977                         /* Check whether Last Pull command is Expired and
978                          * setting Condition for Loop termination
979                          */
980                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
981                                 pending = 0;
982                                 /* Check for valid frame. */
983                                 status = qbman_result_DQ_flags(dq_storage);
984                                 if (unlikely((status &
985                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
986                                         continue;
987                         }
988                         fd = qbman_result_DQ_fd(dq_storage);
989
990                         next_fd = qbman_result_DQ_fd(dq_storage + 1);
991                         /* Prefetch Annotation address for the parse results */
992                         rte_prefetch0((void *)(size_t)
993                                 (DPAA2_GET_FD_ADDR(next_fd) +
994                                  DPAA2_FD_PTA_SIZE + 16));
995
996                         bpid = DPAA2_GET_FD_BPID(fd);
997
998                         /* Create a release descriptor required for releasing
999                          * buffers into QBMAN
1000                          */
1001                         qbman_release_desc_clear(&releasedesc);
1002                         qbman_release_desc_set_bpid(&releasedesc, bpid);
1003
1004                         buf = DPAA2_GET_FD_ADDR(fd);
1005                         /* feed them to bman */
1006                         do {
1007                                 ret = qbman_swp_release(swp, &releasedesc,
1008                                                         &buf, 1);
1009                         } while (ret == -EBUSY);
1010
1011                         dq_storage++;
1012                         num_tx_conf++;
1013                         num_pulled++;
1014 #if defined(RTE_LIBRTE_IEEE1588)
1015                         annotation = (struct dpaa2_annot_hdr *)((size_t)
1016                                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1017                                 DPAA2_FD_PTA_SIZE);
1018                         priv->tx_timestamp = annotation->word2;
1019 #endif
1020                 } while (pending);
1021
1022         /* Last VDQ provided all packets and more packets are requested */
1023         } while (num_pulled == dpaa2_dqrr_size);
1024
1025         dpaa2_q->rx_pkts += num_tx_conf;
1026
1027         return num_tx_conf;
1028 }
1029
1030 /* Configure the egress frame annotation for timestamp update */
1031 static void enable_tx_tstamp(struct qbman_fd *fd)
1032 {
1033         struct dpaa2_faead *fd_faead;
1034
1035         /* Set frame annotation status field as valid */
1036         (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1037
1038         /* Set frame annotation egress action descriptor as valid */
1039         (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1040
1041         /* Set Annotation Length as 128B */
1042         (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1043
1044         /* enable update of confirmation frame annotation */
1045         fd_faead = (struct dpaa2_faead *)((size_t)
1046                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1047                         DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1048         fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1049                                 DPAA2_ANNOT_FAEAD_UPD;
1050 }
1051
1052 /*
1053  * Callback to handle sending packets through WRIOP based interface
1054  */
1055 uint16_t
1056 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1057 {
1058         /* Function to transmit the frames to given device and VQ*/
1059         uint32_t loop, retry_count;
1060         int32_t ret;
1061         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1062         struct rte_mbuf *mi;
1063         uint32_t frames_to_send;
1064         struct rte_mempool *mp;
1065         struct qbman_eq_desc eqdesc;
1066         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1067         struct qbman_swp *swp;
1068         uint16_t num_tx = 0;
1069         uint16_t bpid;
1070         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1071         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1072         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1073         struct rte_mbuf **orig_bufs = bufs;
1074
1075         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1076                 ret = dpaa2_affine_qbman_swp();
1077                 if (ret) {
1078                         DPAA2_PMD_ERR(
1079                                 "Failed to allocate IO portal, tid: %d\n",
1080                                 rte_gettid());
1081                         return 0;
1082                 }
1083         }
1084         swp = DPAA2_PER_LCORE_PORTAL;
1085
1086         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1087                         eth_data, dpaa2_q->fqid);
1088
1089 #ifdef RTE_LIBRTE_IEEE1588
1090         /* IEEE1588 driver need pointer to tx confirmation queue
1091          * corresponding to last packet transmitted for reading
1092          * the timestamp
1093          */
1094         priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1095         dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1096 #endif
1097
1098         /*Prepare enqueue descriptor*/
1099         qbman_eq_desc_clear(&eqdesc);
1100         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1101         qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1102
1103         /*Clear the unused FD fields before sending*/
1104         while (nb_pkts) {
1105                 /*Check if the queue is congested*/
1106                 retry_count = 0;
1107                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1108                         retry_count++;
1109                         /* Retry for some time before giving up */
1110                         if (retry_count > CONG_RETRY_COUNT)
1111                                 goto skip_tx;
1112                 }
1113
1114                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1115                         dpaa2_eqcr_size : nb_pkts;
1116
1117                 for (loop = 0; loop < frames_to_send; loop++) {
1118                         if (*dpaa2_seqn(*bufs)) {
1119                                 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1120
1121                                 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1122                                                 dqrr_index;
1123                                 DPAA2_PER_LCORE_DQRR_SIZE--;
1124                                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1125                                 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1126                         }
1127
1128                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1129                                 mp = (*bufs)->pool;
1130                                 /* Check the basic scenario and set
1131                                  * the FD appropriately here itself.
1132                                  */
1133                                 if (likely(mp && mp->ops_index ==
1134                                     priv->bp_list->dpaa2_ops_index &&
1135                                     (*bufs)->nb_segs == 1 &&
1136                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1137                                         if (unlikely(((*bufs)->ol_flags
1138                                                 & PKT_TX_VLAN_PKT) ||
1139                                                 (eth_data->dev_conf.txmode.offloads
1140                                                 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1141                                                 ret = rte_vlan_insert(bufs);
1142                                                 if (ret)
1143                                                         goto send_n_return;
1144                                         }
1145                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1146                                         &fd_arr[loop], mempool_to_bpid(mp));
1147                                         bufs++;
1148 #ifdef RTE_LIBRTE_IEEE1588
1149                                         enable_tx_tstamp(&fd_arr[loop]);
1150 #endif
1151                                         continue;
1152                                 }
1153                         } else {
1154                                 mi = rte_mbuf_from_indirect(*bufs);
1155                                 mp = mi->pool;
1156                         }
1157
1158                         if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1159                                 if (unlikely((*bufs)->nb_segs > 1)) {
1160                                         if (eth_mbuf_to_sg_fd(*bufs,
1161                                                               &fd_arr[loop],
1162                                                               mp, 0))
1163                                                 goto send_n_return;
1164                                 } else {
1165                                         eth_mbuf_to_fd(*bufs,
1166                                                        &fd_arr[loop], 0);
1167                                 }
1168                                 bufs++;
1169 #ifdef RTE_LIBRTE_IEEE1588
1170                                 enable_tx_tstamp(&fd_arr[loop]);
1171 #endif
1172                                 continue;
1173                         }
1174
1175                         /* Not a hw_pkt pool allocated frame */
1176                         if (unlikely(!mp || !priv->bp_list)) {
1177                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1178                                 goto send_n_return;
1179                         }
1180
1181                         if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
1182                                 (eth_data->dev_conf.txmode.offloads
1183                                 & DEV_TX_OFFLOAD_VLAN_INSERT))) {
1184                                 int ret = rte_vlan_insert(bufs);
1185                                 if (ret)
1186                                         goto send_n_return;
1187                         }
1188                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1189                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1190                                 /* alloc should be from the default buffer pool
1191                                  * attached to this interface
1192                                  */
1193                                 bpid = priv->bp_list->buf_pool.bpid;
1194
1195                                 if (unlikely((*bufs)->nb_segs > 1)) {
1196                                         DPAA2_PMD_ERR("S/G support not added"
1197                                                 " for non hw offload buffer");
1198                                         goto send_n_return;
1199                                 }
1200                                 if (eth_copy_mbuf_to_fd(*bufs,
1201                                                         &fd_arr[loop], bpid)) {
1202                                         goto send_n_return;
1203                                 }
1204                                 /* free the original packet */
1205                                 rte_pktmbuf_free(*bufs);
1206                         } else {
1207                                 bpid = mempool_to_bpid(mp);
1208                                 if (unlikely((*bufs)->nb_segs > 1)) {
1209                                         if (eth_mbuf_to_sg_fd(*bufs,
1210                                                         &fd_arr[loop],
1211                                                         mp, bpid))
1212                                                 goto send_n_return;
1213                                 } else {
1214                                         eth_mbuf_to_fd(*bufs,
1215                                                        &fd_arr[loop], bpid);
1216                                 }
1217                         }
1218 #ifdef RTE_LIBRTE_IEEE1588
1219                         enable_tx_tstamp(&fd_arr[loop]);
1220 #endif
1221                         bufs++;
1222                 }
1223
1224                 loop = 0;
1225                 retry_count = 0;
1226                 while (loop < frames_to_send) {
1227                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1228                                         &fd_arr[loop], &flags[loop],
1229                                         frames_to_send - loop);
1230                         if (unlikely(ret < 0)) {
1231                                 retry_count++;
1232                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1233                                         num_tx += loop;
1234                                         nb_pkts -= loop;
1235                                         goto send_n_return;
1236                                 }
1237                         } else {
1238                                 loop += ret;
1239                                 retry_count = 0;
1240                         }
1241                 }
1242
1243                 num_tx += loop;
1244                 nb_pkts -= loop;
1245         }
1246         dpaa2_q->tx_pkts += num_tx;
1247
1248         loop = 0;
1249         while (loop < num_tx) {
1250                 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1251                         rte_pktmbuf_free(*orig_bufs);
1252                 orig_bufs++;
1253                 loop++;
1254         }
1255
1256         return num_tx;
1257
1258 send_n_return:
1259         /* send any already prepared fd */
1260         if (loop) {
1261                 unsigned int i = 0;
1262
1263                 retry_count = 0;
1264                 while (i < loop) {
1265                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1266                                                          &fd_arr[i],
1267                                                          &flags[i],
1268                                                          loop - i);
1269                         if (unlikely(ret < 0)) {
1270                                 retry_count++;
1271                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1272                                         break;
1273                         } else {
1274                                 i += ret;
1275                                 retry_count = 0;
1276                         }
1277                 }
1278                 num_tx += i;
1279         }
1280 skip_tx:
1281         dpaa2_q->tx_pkts += num_tx;
1282
1283         loop = 0;
1284         while (loop < num_tx) {
1285                 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1286                         rte_pktmbuf_free(*orig_bufs);
1287                 orig_bufs++;
1288                 loop++;
1289         }
1290
1291         return num_tx;
1292 }
1293
1294 void
1295 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
1296 {
1297         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1298         struct qbman_fd *fd;
1299         struct rte_mbuf *m;
1300
1301         fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1302
1303         /* Setting port id does not matter as we are to free the mbuf */
1304         m = eth_fd_to_mbuf(fd, 0);
1305         rte_pktmbuf_free(m);
1306 }
1307
1308 static void
1309 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1310                              struct rte_mbuf *m,
1311                              struct qbman_eq_desc *eqdesc)
1312 {
1313         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1314         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1315         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1316         struct eqresp_metadata *eqresp_meta;
1317         uint16_t orpid, seqnum;
1318         uint8_t dq_idx;
1319
1320         qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1321
1322         if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1323                 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1324                         DPAA2_EQCR_OPRID_SHIFT;
1325                 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1326                         DPAA2_EQCR_SEQNUM_SHIFT;
1327
1328                 if (!priv->en_loose_ordered) {
1329                         qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1330                         qbman_eq_desc_set_response(eqdesc, (uint64_t)
1331                                 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1332                                 dpio_dev->eqresp_pi]), 1);
1333                         qbman_eq_desc_set_token(eqdesc, 1);
1334
1335                         eqresp_meta = &dpio_dev->eqresp_meta[
1336                                 dpio_dev->eqresp_pi];
1337                         eqresp_meta->dpaa2_q = dpaa2_q;
1338                         eqresp_meta->mp = m->pool;
1339
1340                         dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1341                                 dpio_dev->eqresp_pi++ :
1342                                 (dpio_dev->eqresp_pi = 0);
1343                 } else {
1344                         qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1345                 }
1346         } else {
1347                 dq_idx = *dpaa2_seqn(m) - 1;
1348                 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1349                 DPAA2_PER_LCORE_DQRR_SIZE--;
1350                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1351         }
1352         *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1353 }
1354
1355 /* Callback to handle sending ordered packets through WRIOP based interface */
1356 uint16_t
1357 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1358 {
1359         /* Function to transmit the frames to given device and VQ*/
1360         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1361         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1362         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1363         struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1364         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1365         struct rte_mbuf *mi;
1366         struct rte_mempool *mp;
1367         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1368         struct qbman_swp *swp;
1369         uint32_t frames_to_send, num_free_eq_desc;
1370         uint32_t loop, retry_count;
1371         int32_t ret;
1372         uint16_t num_tx = 0;
1373         uint16_t bpid;
1374
1375         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1376                 ret = dpaa2_affine_qbman_swp();
1377                 if (ret) {
1378                         DPAA2_PMD_ERR(
1379                                 "Failed to allocate IO portal, tid: %d\n",
1380                                 rte_gettid());
1381                         return 0;
1382                 }
1383         }
1384         swp = DPAA2_PER_LCORE_PORTAL;
1385
1386         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1387                            eth_data, dpaa2_q->fqid);
1388
1389         /* This would also handle normal and atomic queues as any type
1390          * of packet can be enqueued when ordered queues are being used.
1391          */
1392         while (nb_pkts) {
1393                 /*Check if the queue is congested*/
1394                 retry_count = 0;
1395                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1396                         retry_count++;
1397                         /* Retry for some time before giving up */
1398                         if (retry_count > CONG_RETRY_COUNT)
1399                                 goto skip_tx;
1400                 }
1401
1402                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1403                         dpaa2_eqcr_size : nb_pkts;
1404
1405                 if (!priv->en_loose_ordered) {
1406                         if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1407                                 num_free_eq_desc = dpaa2_free_eq_descriptors();
1408                                 if (num_free_eq_desc < frames_to_send)
1409                                         frames_to_send = num_free_eq_desc;
1410                         }
1411                 }
1412
1413                 for (loop = 0; loop < frames_to_send; loop++) {
1414                         /*Prepare enqueue descriptor*/
1415                         qbman_eq_desc_clear(&eqdesc[loop]);
1416
1417                         if (*dpaa2_seqn(*bufs)) {
1418                                 /* Use only queue 0 for Tx in case of atomic/
1419                                  * ordered packets as packets can get unordered
1420                                  * when being tranmitted out from the interface
1421                                  */
1422                                 dpaa2_set_enqueue_descriptor(order_sendq,
1423                                                              (*bufs),
1424                                                              &eqdesc[loop]);
1425                         } else {
1426                                 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1427                                                          DPAA2_EQ_RESP_ERR_FQ);
1428                                 qbman_eq_desc_set_fq(&eqdesc[loop],
1429                                                      dpaa2_q->fqid);
1430                         }
1431
1432                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1433                                 mp = (*bufs)->pool;
1434                                 /* Check the basic scenario and set
1435                                  * the FD appropriately here itself.
1436                                  */
1437                                 if (likely(mp && mp->ops_index ==
1438                                     priv->bp_list->dpaa2_ops_index &&
1439                                     (*bufs)->nb_segs == 1 &&
1440                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1441                                         if (unlikely((*bufs)->ol_flags
1442                                                 & PKT_TX_VLAN_PKT)) {
1443                                           ret = rte_vlan_insert(bufs);
1444                                           if (ret)
1445                                                 goto send_n_return;
1446                                         }
1447                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1448                                                 &fd_arr[loop],
1449                                                 mempool_to_bpid(mp));
1450                                         bufs++;
1451                                         continue;
1452                                 }
1453                         } else {
1454                                 mi = rte_mbuf_from_indirect(*bufs);
1455                                 mp = mi->pool;
1456                         }
1457                         /* Not a hw_pkt pool allocated frame */
1458                         if (unlikely(!mp || !priv->bp_list)) {
1459                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1460                                 goto send_n_return;
1461                         }
1462
1463                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1464                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1465                                 /* alloc should be from the default buffer pool
1466                                  * attached to this interface
1467                                  */
1468                                 bpid = priv->bp_list->buf_pool.bpid;
1469
1470                                 if (unlikely((*bufs)->nb_segs > 1)) {
1471                                         DPAA2_PMD_ERR(
1472                                                 "S/G not supp for non hw offload buffer");
1473                                         goto send_n_return;
1474                                 }
1475                                 if (eth_copy_mbuf_to_fd(*bufs,
1476                                                         &fd_arr[loop], bpid)) {
1477                                         goto send_n_return;
1478                                 }
1479                                 /* free the original packet */
1480                                 rte_pktmbuf_free(*bufs);
1481                         } else {
1482                                 bpid = mempool_to_bpid(mp);
1483                                 if (unlikely((*bufs)->nb_segs > 1)) {
1484                                         if (eth_mbuf_to_sg_fd(*bufs,
1485                                                               &fd_arr[loop],
1486                                                               mp,
1487                                                               bpid))
1488                                                 goto send_n_return;
1489                                 } else {
1490                                         eth_mbuf_to_fd(*bufs,
1491                                                        &fd_arr[loop], bpid);
1492                                 }
1493                         }
1494                         bufs++;
1495                 }
1496
1497                 loop = 0;
1498                 retry_count = 0;
1499                 while (loop < frames_to_send) {
1500                         ret = qbman_swp_enqueue_multiple_desc(swp,
1501                                         &eqdesc[loop], &fd_arr[loop],
1502                                         frames_to_send - loop);
1503                         if (unlikely(ret < 0)) {
1504                                 retry_count++;
1505                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1506                                         num_tx += loop;
1507                                         nb_pkts -= loop;
1508                                         goto send_n_return;
1509                                 }
1510                         } else {
1511                                 loop += ret;
1512                                 retry_count = 0;
1513                         }
1514                 }
1515
1516                 num_tx += loop;
1517                 nb_pkts -= loop;
1518         }
1519         dpaa2_q->tx_pkts += num_tx;
1520         return num_tx;
1521
1522 send_n_return:
1523         /* send any already prepared fd */
1524         if (loop) {
1525                 unsigned int i = 0;
1526
1527                 retry_count = 0;
1528                 while (i < loop) {
1529                         ret = qbman_swp_enqueue_multiple_desc(swp,
1530                                        &eqdesc[loop], &fd_arr[i], loop - i);
1531                         if (unlikely(ret < 0)) {
1532                                 retry_count++;
1533                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1534                                         break;
1535                         } else {
1536                                 i += ret;
1537                                 retry_count = 0;
1538                         }
1539                 }
1540                 num_tx += i;
1541         }
1542 skip_tx:
1543         dpaa2_q->tx_pkts += num_tx;
1544         return num_tx;
1545 }
1546
1547 /**
1548  * Dummy DPDK callback for TX.
1549  *
1550  * This function is used to temporarily replace the real callback during
1551  * unsafe control operations on the queue, or in case of error.
1552  *
1553  * @param dpdk_txq
1554  *   Generic pointer to TX queue structure.
1555  * @param[in] pkts
1556  *   Packets to transmit.
1557  * @param pkts_n
1558  *   Number of packets in array.
1559  *
1560  * @return
1561  *   Number of packets successfully transmitted (<= pkts_n).
1562  */
1563 uint16_t
1564 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1565 {
1566         (void)queue;
1567         (void)bufs;
1568         (void)nb_pkts;
1569         return 0;
1570 }
1571
1572 #if defined(RTE_TOOLCHAIN_GCC)
1573 #pragma GCC diagnostic push
1574 #pragma GCC diagnostic ignored "-Wcast-qual"
1575 #elif defined(RTE_TOOLCHAIN_CLANG)
1576 #pragma clang diagnostic push
1577 #pragma clang diagnostic ignored "-Wcast-qual"
1578 #endif
1579
1580 /* This function loopbacks all the received packets.*/
1581 uint16_t
1582 dpaa2_dev_loopback_rx(void *queue,
1583                       struct rte_mbuf **bufs __rte_unused,
1584                       uint16_t nb_pkts)
1585 {
1586         /* Function receive frames for a given device and VQ*/
1587         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1588         struct qbman_result *dq_storage, *dq_storage1 = NULL;
1589         uint32_t fqid = dpaa2_q->fqid;
1590         int ret, num_rx = 0, num_tx = 0, pull_size;
1591         uint8_t pending, status;
1592         struct qbman_swp *swp;
1593         struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1594         struct qbman_pull_desc pulldesc;
1595         struct qbman_eq_desc eqdesc;
1596         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1597         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1598         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1599         struct dpaa2_queue *tx_q = priv->tx_vq[0];
1600         /* todo - currently we are using 1st TX queue only for loopback*/
1601
1602         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1603                 ret = dpaa2_affine_qbman_ethrx_swp();
1604                 if (ret) {
1605                         DPAA2_PMD_ERR("Failure in affining portal");
1606                         return 0;
1607                 }
1608         }
1609         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1610         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1611         if (unlikely(!q_storage->active_dqs)) {
1612                 q_storage->toggle = 0;
1613                 dq_storage = q_storage->dq_storage[q_storage->toggle];
1614                 q_storage->last_num_pkts = pull_size;
1615                 qbman_pull_desc_clear(&pulldesc);
1616                 qbman_pull_desc_set_numframes(&pulldesc,
1617                                               q_storage->last_num_pkts);
1618                 qbman_pull_desc_set_fq(&pulldesc, fqid);
1619                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1620                         (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1621                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1622                         while (!qbman_check_command_complete(
1623                                get_swp_active_dqs(
1624                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1625                                 ;
1626                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1627                 }
1628                 while (1) {
1629                         if (qbman_swp_pull(swp, &pulldesc)) {
1630                                 DPAA2_PMD_DP_DEBUG(
1631                                         "VDQ command not issued.QBMAN busy\n");
1632                                 /* Portal was busy, try again */
1633                                 continue;
1634                         }
1635                         break;
1636                 }
1637                 q_storage->active_dqs = dq_storage;
1638                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1639                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1640                                    dq_storage);
1641         }
1642
1643         dq_storage = q_storage->active_dqs;
1644         rte_prefetch0((void *)(size_t)(dq_storage));
1645         rte_prefetch0((void *)(size_t)(dq_storage + 1));
1646
1647         /* Prepare next pull descriptor. This will give space for the
1648          * prefething done on DQRR entries
1649          */
1650         q_storage->toggle ^= 1;
1651         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1652         qbman_pull_desc_clear(&pulldesc);
1653         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1654         qbman_pull_desc_set_fq(&pulldesc, fqid);
1655         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1656                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1657
1658         /*Prepare enqueue descriptor*/
1659         qbman_eq_desc_clear(&eqdesc);
1660         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1661         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1662         qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1663
1664         /* Check if the previous issued command is completed.
1665          * Also seems like the SWP is shared between the Ethernet Driver
1666          * and the SEC driver.
1667          */
1668         while (!qbman_check_command_complete(dq_storage))
1669                 ;
1670         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1671                 clear_swp_active_dqs(q_storage->active_dpio_id);
1672
1673         pending = 1;
1674
1675         do {
1676                 /* Loop until the dq_storage is updated with
1677                  * new token by QBMAN
1678                  */
1679                 while (!qbman_check_new_result(dq_storage))
1680                         ;
1681                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1682                 /* Check whether Last Pull command is Expired and
1683                  * setting Condition for Loop termination
1684                  */
1685                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1686                         pending = 0;
1687                         /* Check for valid frame. */
1688                         status = qbman_result_DQ_flags(dq_storage);
1689                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1690                                 continue;
1691                 }
1692                 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1693
1694                 dq_storage++;
1695                 num_rx++;
1696         } while (pending);
1697
1698         while (num_tx < num_rx) {
1699                 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1700                                 &fd[num_tx], 0, num_rx - num_tx);
1701         }
1702
1703         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1704                 while (!qbman_check_command_complete(
1705                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1706                         ;
1707                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1708         }
1709         /* issue a volatile dequeue command for next pull */
1710         while (1) {
1711                 if (qbman_swp_pull(swp, &pulldesc)) {
1712                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1713                                           "QBMAN is busy (2)\n");
1714                         continue;
1715                 }
1716                 break;
1717         }
1718         q_storage->active_dqs = dq_storage1;
1719         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1720         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
1721
1722         dpaa2_q->rx_pkts += num_rx;
1723         dpaa2_q->tx_pkts += num_tx;
1724
1725         return 0;
1726 }
1727 #if defined(RTE_TOOLCHAIN_GCC)
1728 #pragma GCC diagnostic pop
1729 #elif defined(RTE_TOOLCHAIN_CLANG)
1730 #pragma clang diagnostic pop
1731 #endif