net/dpaa2: support ESP in packet type parsing
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016-2021 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17 #include <rte_hexdump.h>
18
19 #include <rte_fslmc.h>
20 #include <fslmc_vfio.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_dpio.h>
23 #include <dpaa2_hw_mempool.h>
24
25 #include "dpaa2_pmd_logs.h"
26 #include "dpaa2_ethdev.h"
27 #include "base/dpaa2_hw_dpni_annot.h"
28
29 static inline uint32_t __rte_hot
30 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
31                         struct dpaa2_annot_hdr *annotation);
32
33 static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
34
35 static inline rte_mbuf_timestamp_t *
36 dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
37 {
38         return RTE_MBUF_DYNFIELD(mbuf,
39                 dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
40 }
41
42 #define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid)  do { \
43         DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
44         DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
45         DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \
46         DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \
47         DPAA2_SET_FD_FRC(_fd, 0);               \
48         DPAA2_RESET_FD_CTRL(_fd);               \
49         DPAA2_RESET_FD_FLC(_fd);                \
50 } while (0)
51
52 static inline void __rte_hot
53 dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
54                        void *hw_annot_addr)
55 {
56         uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd);
57         struct dpaa2_annot_hdr *annotation =
58                         (struct dpaa2_annot_hdr *)hw_annot_addr;
59
60         m->packet_type = RTE_PTYPE_UNKNOWN;
61         switch (frc) {
62         case DPAA2_PKT_TYPE_ETHER:
63                 m->packet_type = RTE_PTYPE_L2_ETHER;
64                 break;
65         case DPAA2_PKT_TYPE_IPV4:
66                 m->packet_type = RTE_PTYPE_L2_ETHER |
67                         RTE_PTYPE_L3_IPV4;
68                 break;
69         case DPAA2_PKT_TYPE_IPV6:
70                 m->packet_type = RTE_PTYPE_L2_ETHER |
71                         RTE_PTYPE_L3_IPV6;
72                 break;
73         case DPAA2_PKT_TYPE_IPV4_EXT:
74                 m->packet_type = RTE_PTYPE_L2_ETHER |
75                         RTE_PTYPE_L3_IPV4_EXT;
76                 break;
77         case DPAA2_PKT_TYPE_IPV6_EXT:
78                 m->packet_type = RTE_PTYPE_L2_ETHER |
79                         RTE_PTYPE_L3_IPV6_EXT;
80                 break;
81         case DPAA2_PKT_TYPE_IPV4_TCP:
82                 m->packet_type = RTE_PTYPE_L2_ETHER |
83                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP;
84                 break;
85         case DPAA2_PKT_TYPE_IPV6_TCP:
86                 m->packet_type = RTE_PTYPE_L2_ETHER |
87                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
88                 break;
89         case DPAA2_PKT_TYPE_IPV4_UDP:
90                 m->packet_type = RTE_PTYPE_L2_ETHER |
91                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP;
92                 break;
93         case DPAA2_PKT_TYPE_IPV6_UDP:
94                 m->packet_type = RTE_PTYPE_L2_ETHER |
95                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP;
96                 break;
97         case DPAA2_PKT_TYPE_IPV4_SCTP:
98                 m->packet_type = RTE_PTYPE_L2_ETHER |
99                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP;
100                 break;
101         case DPAA2_PKT_TYPE_IPV6_SCTP:
102                 m->packet_type = RTE_PTYPE_L2_ETHER |
103                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP;
104                 break;
105         case DPAA2_PKT_TYPE_IPV4_ICMP:
106                 m->packet_type = RTE_PTYPE_L2_ETHER |
107                         RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP;
108                 break;
109         case DPAA2_PKT_TYPE_IPV6_ICMP:
110                 m->packet_type = RTE_PTYPE_L2_ETHER |
111                         RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP;
112                 break;
113         default:
114                 m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
115         }
116         m->hash.rss = fd->simple.flc_hi;
117         m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
118
119         if (dpaa2_enable_ts[m->port]) {
120                 *dpaa2_timestamp_dynfield(m) = annotation->word2;
121                 m->ol_flags |= dpaa2_timestamp_rx_dynflag;
122                 DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
123                                 *dpaa2_timestamp_dynfield(m));
124         }
125
126         DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
127                 "ol_flags =0x%" PRIx64 "",
128                 frc, m->packet_type, m->ol_flags);
129 }
130
131 static inline uint32_t __rte_hot
132 dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
133                         struct dpaa2_annot_hdr *annotation)
134 {
135         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
136         uint16_t *vlan_tci;
137
138         DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t"
139                         "(4)=0x%" PRIx64 "\t",
140                         annotation->word3, annotation->word4);
141
142 #if defined(RTE_LIBRTE_IEEE1588)
143         if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
144                 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
145                 mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
146         }
147 #endif
148
149         if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
150                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
151                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
152                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
153                 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
154                 pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
155         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
156                 vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
157                         (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
158                 mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
159                 mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
160                 pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
161         }
162
163         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
164                 pkt_type |= RTE_PTYPE_L2_ETHER_ARP;
165                 goto parse_done;
166         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
167                 pkt_type |= RTE_PTYPE_L2_ETHER;
168         } else {
169                 goto parse_done;
170         }
171
172         if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
173                                 L2_MPLS_N_PRESENT))
174                 pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
175
176         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
177                              L3_IPV4_N_PRESENT)) {
178                 pkt_type |= RTE_PTYPE_L3_IPV4;
179                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
180                         L3_IP_N_OPT_PRESENT))
181                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
182                 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
183                                         L3_PROTO_ESP_PRESENT))
184                         pkt_type |= RTE_PTYPE_TUNNEL_ESP;
185
186         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
187                   L3_IPV6_N_PRESENT)) {
188                 pkt_type |= RTE_PTYPE_L3_IPV6;
189                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
190                     L3_IP_N_OPT_PRESENT))
191                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
192                 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
193                                         L3_PROTO_ESP_PRESENT))
194                         pkt_type |= RTE_PTYPE_TUNNEL_ESP;
195         } else {
196                 goto parse_done;
197         }
198
199         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
200                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
201         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
202                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
203
204         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
205             L3_IP_1_MORE_FRAGMENT |
206             L3_IP_N_FIRST_FRAGMENT |
207             L3_IP_N_MORE_FRAGMENT)) {
208                 pkt_type |= RTE_PTYPE_L4_FRAG;
209                 goto parse_done;
210         } else {
211                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
212         }
213
214         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
215                 pkt_type |= RTE_PTYPE_L4_UDP;
216
217         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
218                 pkt_type |= RTE_PTYPE_L4_TCP;
219
220         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
221                 pkt_type |= RTE_PTYPE_L4_SCTP;
222
223         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
224                 pkt_type |= RTE_PTYPE_L4_ICMP;
225
226         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
227                 pkt_type |= RTE_PTYPE_UNKNOWN;
228
229 parse_done:
230         return pkt_type;
231 }
232
233 static inline uint32_t __rte_hot
234 dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
235 {
236         struct dpaa2_annot_hdr *annotation =
237                         (struct dpaa2_annot_hdr *)hw_annot_addr;
238
239         DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
240                            annotation->word4);
241
242         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
243                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
244         else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
245                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
246
247         if (dpaa2_enable_ts[mbuf->port]) {
248                 *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
249                 mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
250                 DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
251                                 *dpaa2_timestamp_dynfield(mbuf));
252         }
253
254         /* Check detailed parsing requirement */
255         if (annotation->word3 & 0x7FFFFC3FFFF)
256                 return dpaa2_dev_rx_parse_slow(mbuf, annotation);
257
258         /* Return some common types from parse processing */
259         switch (annotation->word4) {
260         case DPAA2_L3_IPv4:
261                 return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
262         case DPAA2_L3_IPv6:
263                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
264         case DPAA2_L3_IPv4_TCP:
265                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
266                                 RTE_PTYPE_L4_TCP;
267         case DPAA2_L3_IPv4_UDP:
268                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
269                                 RTE_PTYPE_L4_UDP;
270         case DPAA2_L3_IPv6_TCP:
271                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
272                                 RTE_PTYPE_L4_TCP;
273         case DPAA2_L3_IPv6_UDP:
274                 return  RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
275                                 RTE_PTYPE_L4_UDP;
276         default:
277                 break;
278         }
279
280         return dpaa2_dev_rx_parse_slow(mbuf, annotation);
281 }
282
283 static inline struct rte_mbuf *__rte_hot
284 eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
285                   int port_id)
286 {
287         struct qbman_sge *sgt, *sge;
288         size_t sg_addr, fd_addr;
289         int i = 0;
290         void *hw_annot_addr;
291         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
292
293         fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
294         hw_annot_addr = (void *)(fd_addr + DPAA2_FD_PTA_SIZE);
295
296         /* Get Scatter gather table address */
297         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
298
299         sge = &sgt[i++];
300         sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
301
302         /* First Scatter gather entry */
303         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
304                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
305         /* Prepare all the metadata for first segment */
306         first_seg->buf_addr = (uint8_t *)sg_addr;
307         first_seg->ol_flags = 0;
308         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
309         first_seg->data_len = sge->length  & 0x1FFFF;
310         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
311         first_seg->nb_segs = 1;
312         first_seg->next = NULL;
313         first_seg->port = port_id;
314         if (dpaa2_svr_family == SVR_LX2160A)
315                 dpaa2_dev_rx_parse_new(first_seg, fd, hw_annot_addr);
316         else
317                 first_seg->packet_type =
318                         dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
319
320         rte_mbuf_refcnt_set(first_seg, 1);
321 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
322         rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
323                         (void **)&first_seg, 1, 1);
324 #endif
325         cur_seg = first_seg;
326         while (!DPAA2_SG_IS_FINAL(sge)) {
327                 sge = &sgt[i++];
328                 sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
329                                 DPAA2_GET_FLE_ADDR(sge));
330                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
331                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
332                 next_seg->buf_addr  = (uint8_t *)sg_addr;
333                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
334                 next_seg->data_len  = sge->length  & 0x1FFFF;
335                 first_seg->nb_segs += 1;
336                 rte_mbuf_refcnt_set(next_seg, 1);
337 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
338                 rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
339                                 (void **)&next_seg, 1, 1);
340 #endif
341                 cur_seg->next = next_seg;
342                 next_seg->next = NULL;
343                 cur_seg = next_seg;
344         }
345         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
346                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
347         rte_mbuf_refcnt_set(temp, 1);
348 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
349                 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
350                                 (void **)&temp, 1, 1);
351 #endif
352         rte_pktmbuf_free_seg(temp);
353
354         return (void *)first_seg;
355 }
356
357 static inline struct rte_mbuf *__rte_hot
358 eth_fd_to_mbuf(const struct qbman_fd *fd,
359                int port_id)
360 {
361         void *v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
362         void *hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
363         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
364                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
365
366         /* need to repopulated some of the fields,
367          * as they may have changed in last transmission
368          */
369         mbuf->nb_segs = 1;
370         mbuf->ol_flags = 0;
371         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
372         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
373         mbuf->pkt_len = mbuf->data_len;
374         mbuf->port = port_id;
375         mbuf->next = NULL;
376         rte_mbuf_refcnt_set(mbuf, 1);
377 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
378         rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
379                         (void **)&mbuf, 1, 1);
380 #endif
381
382         /* Parse the packet */
383         /* parse results for LX2 are there in FRC field of FD.
384          * For other DPAA2 platforms , parse results are after
385          * the private - sw annotation area
386          */
387
388         if (dpaa2_svr_family == SVR_LX2160A)
389                 dpaa2_dev_rx_parse_new(mbuf, fd, hw_annot_addr);
390         else
391                 mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, hw_annot_addr);
392
393         DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
394                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
395                 mbuf, mbuf->buf_addr, mbuf->data_off,
396                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
397                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
398                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
399
400         return mbuf;
401 }
402
403 static int __rte_noinline __rte_hot
404 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
405                   struct qbman_fd *fd,
406                   struct rte_mempool *mp, uint16_t bpid)
407 {
408         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
409         struct qbman_sge *sgt, *sge = NULL;
410         int i, offset = 0;
411
412 #ifdef RTE_LIBRTE_IEEE1588
413         /* annotation area for timestamp in first buffer */
414         offset = 0x64;
415 #endif
416         if (RTE_MBUF_DIRECT(mbuf) &&
417                 (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
418                 + offset))) {
419                 temp = mbuf;
420                 if (rte_mbuf_refcnt_read(temp) > 1) {
421                         /* If refcnt > 1, invalid bpid is set to ensure
422                          * buffer is not freed by HW
423                          */
424                         fd->simple.bpid_offset = 0;
425                         DPAA2_SET_FD_IVP(fd);
426                         rte_mbuf_refcnt_update(temp, -1);
427                 } else {
428                         DPAA2_SET_ONLY_FD_BPID(fd, bpid);
429 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
430                         rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
431                                         (void **)&temp, 1, 0);
432 #endif
433                 }
434                 DPAA2_SET_FD_OFFSET(fd, offset);
435         } else {
436                 temp = rte_pktmbuf_alloc(mp);
437                 if (temp == NULL) {
438                         DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
439                         return -ENOMEM;
440                 }
441                 DPAA2_SET_ONLY_FD_BPID(fd, bpid);
442                 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
443 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
444                 rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
445                         (void **)&temp, 1, 0);
446 #endif
447         }
448         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
449         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
450         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
451         DPAA2_RESET_FD_FRC(fd);
452         DPAA2_RESET_FD_CTRL(fd);
453         DPAA2_RESET_FD_FLC(fd);
454         /*Set Scatter gather table and Scatter gather entries*/
455         sgt = (struct qbman_sge *)(
456                         (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
457                         + DPAA2_GET_FD_OFFSET(fd));
458
459         for (i = 0; i < mbuf->nb_segs; i++) {
460                 sge = &sgt[i];
461                 /*Resetting the buffer pool id and offset field*/
462                 sge->fin_bpid_offset = 0;
463                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
464                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
465                 sge->length = cur_seg->data_len;
466                 if (RTE_MBUF_DIRECT(cur_seg)) {
467                         /* if we are using inline SGT in same buffers
468                          * set the FLE FMT as Frame Data Section
469                          */
470                         if (temp == cur_seg) {
471                                 DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
472                                 DPAA2_SET_FLE_IVP(sge);
473                         } else {
474                                 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
475                                 /* If refcnt > 1, invalid bpid is set to ensure
476                                  * buffer is not freed by HW
477                                  */
478                                         DPAA2_SET_FLE_IVP(sge);
479                                         rte_mbuf_refcnt_update(cur_seg, -1);
480                                 } else {
481                                         DPAA2_SET_FLE_BPID(sge,
482                                                 mempool_to_bpid(cur_seg->pool));
483 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
484                                 rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
485                                         (void **)&cur_seg, 1, 0);
486 #endif
487                                 }
488                         }
489                         cur_seg = cur_seg->next;
490                 } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
491                         DPAA2_SET_FLE_IVP(sge);
492                         cur_seg = cur_seg->next;
493                 } else {
494                         /* Get owner MBUF from indirect buffer */
495                         mi = rte_mbuf_from_indirect(cur_seg);
496                         if (rte_mbuf_refcnt_read(mi) > 1) {
497                                 /* If refcnt > 1, invalid bpid is set to ensure
498                                  * owner buffer is not freed by HW
499                                  */
500                                 DPAA2_SET_FLE_IVP(sge);
501                         } else {
502                                 DPAA2_SET_FLE_BPID(sge,
503                                                    mempool_to_bpid(mi->pool));
504                                 rte_mbuf_refcnt_update(mi, 1);
505                         }
506                         prev_seg = cur_seg;
507                         cur_seg = cur_seg->next;
508                         prev_seg->next = NULL;
509                         rte_pktmbuf_free(prev_seg);
510                 }
511         }
512         DPAA2_SG_SET_FINAL(sge, true);
513         return 0;
514 }
515
516 static void
517 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
518                struct qbman_fd *fd, uint16_t bpid) __rte_unused;
519
520 static void __rte_noinline __rte_hot
521 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
522                struct qbman_fd *fd, uint16_t bpid)
523 {
524         DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
525
526         DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
527                 "fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
528                 mbuf, mbuf->buf_addr, mbuf->data_off,
529                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
530                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
531                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
532         if (RTE_MBUF_DIRECT(mbuf)) {
533                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
534                         DPAA2_SET_FD_IVP(fd);
535                         rte_mbuf_refcnt_update(mbuf, -1);
536                 }
537 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
538                 else
539                         rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
540                                 (void **)&mbuf, 1, 0);
541 #endif
542         } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
543                 DPAA2_SET_FD_IVP(fd);
544         } else {
545                 struct rte_mbuf *mi;
546
547                 mi = rte_mbuf_from_indirect(mbuf);
548                 if (rte_mbuf_refcnt_read(mi) > 1)
549                         DPAA2_SET_FD_IVP(fd);
550                 else
551                         rte_mbuf_refcnt_update(mi, 1);
552                 rte_pktmbuf_free(mbuf);
553         }
554 }
555
556 static inline int __rte_hot
557 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
558                     struct qbman_fd *fd, uint16_t bpid)
559 {
560         struct rte_mbuf *m;
561         void *mb = NULL;
562
563         if (rte_dpaa2_mbuf_alloc_bulk(
564                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
565                 DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
566                 return -1;
567         }
568         m = (struct rte_mbuf *)mb;
569         memcpy((char *)m->buf_addr + mbuf->data_off,
570                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
571                 mbuf->pkt_len);
572
573         /* Copy required fields */
574         m->data_off = mbuf->data_off;
575         m->ol_flags = mbuf->ol_flags;
576         m->packet_type = mbuf->packet_type;
577         m->tx_offload = mbuf->tx_offload;
578
579         DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
580
581 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
582         rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
583                 (void **)&m, 1, 0);
584 #endif
585         DPAA2_PMD_DP_DEBUG(
586                 "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
587                 " meta: %d, off: %d, len: %d\n",
588                 (void *)mbuf,
589                 mbuf->buf_addr,
590                 DPAA2_GET_FD_ADDR(fd),
591                 DPAA2_GET_FD_BPID(fd),
592                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
593                 DPAA2_GET_FD_OFFSET(fd),
594                 DPAA2_GET_FD_LEN(fd));
595
596 return 0;
597 }
598
599 static void
600 dump_err_pkts(struct dpaa2_queue *dpaa2_q)
601 {
602         /* Function receive frames for a given device and VQ */
603         struct qbman_result *dq_storage;
604         uint32_t fqid = dpaa2_q->fqid;
605         int ret, num_rx = 0, num_pulled;
606         uint8_t pending, status;
607         struct qbman_swp *swp;
608         const struct qbman_fd *fd;
609         struct qbman_pull_desc pulldesc;
610         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
611         uint32_t lcore_id = rte_lcore_id();
612         void *v_addr, *hw_annot_addr;
613         struct dpaa2_fas *fas;
614
615         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
616                 ret = dpaa2_affine_qbman_swp();
617                 if (ret) {
618                         DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
619                                 rte_gettid());
620                         return;
621                 }
622         }
623         swp = DPAA2_PER_LCORE_PORTAL;
624
625         dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
626         qbman_pull_desc_clear(&pulldesc);
627         qbman_pull_desc_set_fq(&pulldesc, fqid);
628         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
629                         (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
630         qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
631
632         while (1) {
633                 if (qbman_swp_pull(swp, &pulldesc)) {
634                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
635                         /* Portal was busy, try again */
636                         continue;
637                 }
638                 break;
639         }
640
641         /* Check if the previous issued command is completed. */
642         while (!qbman_check_command_complete(dq_storage))
643                 ;
644
645         num_pulled = 0;
646         pending = 1;
647         do {
648                 /* Loop until the dq_storage is updated with
649                  * new token by QBMAN
650                  */
651                 while (!qbman_check_new_result(dq_storage))
652                         ;
653
654                 /* Check whether Last Pull command is Expired and
655                  * setting Condition for Loop termination
656                  */
657                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
658                         pending = 0;
659                         /* Check for valid frame. */
660                         status = qbman_result_DQ_flags(dq_storage);
661                         if (unlikely((status &
662                                 QBMAN_DQ_STAT_VALIDFRAME) == 0))
663                                 continue;
664                 }
665                 fd = qbman_result_DQ_fd(dq_storage);
666                 v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
667                 hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
668                 fas = hw_annot_addr;
669
670                 DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
671                         " fd_off: %d, fd_err: %x, fas_status: %x",
672                         rte_lcore_id(), eth_data->port_id,
673                         DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
674                         fas->status);
675                 rte_hexdump(stderr, "Error packet", v_addr,
676                         DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
677
678                 dq_storage++;
679                 num_rx++;
680                 num_pulled++;
681         } while (pending);
682
683         dpaa2_q->err_pkts += num_rx;
684 }
685
686 /* This function assumes that caller will be keep the same value for nb_pkts
687  * across calls per queue, if that is not the case, better use non-prefetch
688  * version of rx call.
689  * It will return the packets as requested in previous call without honoring
690  * the current nb_pkts or bufs space.
691  */
692 uint16_t
693 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
694 {
695         /* Function receive frames for a given device and VQ*/
696         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
697         struct qbman_result *dq_storage, *dq_storage1 = NULL;
698         uint32_t fqid = dpaa2_q->fqid;
699         int ret, num_rx = 0, pull_size;
700         uint8_t pending, status;
701         struct qbman_swp *swp;
702         const struct qbman_fd *fd;
703         struct qbman_pull_desc pulldesc;
704         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
705         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
706         struct dpaa2_dev_priv *priv = eth_data->dev_private;
707
708         if (unlikely(dpaa2_enable_err_queue))
709                 dump_err_pkts(priv->rx_err_vq);
710
711         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
712                 ret = dpaa2_affine_qbman_ethrx_swp();
713                 if (ret) {
714                         DPAA2_PMD_ERR("Failure in affining portal");
715                         return 0;
716                 }
717         }
718
719         if (unlikely(!rte_dpaa2_bpid_info &&
720                      rte_eal_process_type() == RTE_PROC_SECONDARY))
721                 rte_dpaa2_bpid_info = dpaa2_q->bp_array;
722
723         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
724         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
725         if (unlikely(!q_storage->active_dqs)) {
726                 q_storage->toggle = 0;
727                 dq_storage = q_storage->dq_storage[q_storage->toggle];
728                 q_storage->last_num_pkts = pull_size;
729                 qbman_pull_desc_clear(&pulldesc);
730                 qbman_pull_desc_set_numframes(&pulldesc,
731                                               q_storage->last_num_pkts);
732                 qbman_pull_desc_set_fq(&pulldesc, fqid);
733                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
734                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
735                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
736                         while (!qbman_check_command_complete(
737                                get_swp_active_dqs(
738                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
739                                 ;
740                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
741                 }
742                 while (1) {
743                         if (qbman_swp_pull(swp, &pulldesc)) {
744                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
745                                                   " QBMAN is busy (1)\n");
746                                 /* Portal was busy, try again */
747                                 continue;
748                         }
749                         break;
750                 }
751                 q_storage->active_dqs = dq_storage;
752                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
753                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
754                                    dq_storage);
755         }
756
757         dq_storage = q_storage->active_dqs;
758         rte_prefetch0((void *)(size_t)(dq_storage));
759         rte_prefetch0((void *)(size_t)(dq_storage + 1));
760
761         /* Prepare next pull descriptor. This will give space for the
762          * prefetching done on DQRR entries
763          */
764         q_storage->toggle ^= 1;
765         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
766         qbman_pull_desc_clear(&pulldesc);
767         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
768         qbman_pull_desc_set_fq(&pulldesc, fqid);
769         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
770                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
771
772         /* Check if the previous issued command is completed.
773          * Also seems like the SWP is shared between the Ethernet Driver
774          * and the SEC driver.
775          */
776         while (!qbman_check_command_complete(dq_storage))
777                 ;
778         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
779                 clear_swp_active_dqs(q_storage->active_dpio_id);
780
781         pending = 1;
782
783         do {
784                 /* Loop until the dq_storage is updated with
785                  * new token by QBMAN
786                  */
787                 while (!qbman_check_new_result(dq_storage))
788                         ;
789                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
790                 /* Check whether Last Pull command is Expired and
791                  * setting Condition for Loop termination
792                  */
793                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
794                         pending = 0;
795                         /* Check for valid frame. */
796                         status = qbman_result_DQ_flags(dq_storage);
797                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
798                                 continue;
799                 }
800                 fd = qbman_result_DQ_fd(dq_storage);
801
802 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
803                 if (dpaa2_svr_family != SVR_LX2160A) {
804                         const struct qbman_fd *next_fd =
805                                 qbman_result_DQ_fd(dq_storage + 1);
806                         /* Prefetch Annotation address for the parse results */
807                         rte_prefetch0(DPAA2_IOVA_TO_VADDR((DPAA2_GET_FD_ADDR(
808                                 next_fd) + DPAA2_FD_PTA_SIZE + 16)));
809                 }
810 #endif
811
812                 if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
813                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd, eth_data->port_id);
814                 else
815                         bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
816 #if defined(RTE_LIBRTE_IEEE1588)
817                 if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
818                         priv->rx_timestamp =
819                                 *dpaa2_timestamp_dynfield(bufs[num_rx]);
820                 }
821 #endif
822
823                 if (eth_data->dev_conf.rxmode.offloads &
824                                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
825                         rte_vlan_strip(bufs[num_rx]);
826
827                 dq_storage++;
828                 num_rx++;
829         } while (pending);
830
831         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
832                 while (!qbman_check_command_complete(
833                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
834                         ;
835                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
836         }
837         /* issue a volatile dequeue command for next pull */
838         while (1) {
839                 if (qbman_swp_pull(swp, &pulldesc)) {
840                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
841                                           "QBMAN is busy (2)\n");
842                         continue;
843                 }
844                 break;
845         }
846         q_storage->active_dqs = dq_storage1;
847         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
848         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
849
850         dpaa2_q->rx_pkts += num_rx;
851
852         return num_rx;
853 }
854
855 void __rte_hot
856 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
857                                  const struct qbman_fd *fd,
858                                  const struct qbman_result *dq,
859                                  struct dpaa2_queue *rxq,
860                                  struct rte_event *ev)
861 {
862         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
863                 DPAA2_FD_PTA_SIZE + 16));
864
865         ev->flow_id = rxq->ev.flow_id;
866         ev->sub_event_type = rxq->ev.sub_event_type;
867         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
868         ev->op = RTE_EVENT_OP_NEW;
869         ev->sched_type = rxq->ev.sched_type;
870         ev->queue_id = rxq->ev.queue_id;
871         ev->priority = rxq->ev.priority;
872
873         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
874
875         qbman_swp_dqrr_consume(swp, dq);
876 }
877
878 void __rte_hot
879 dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
880                                const struct qbman_fd *fd,
881                                const struct qbman_result *dq,
882                                struct dpaa2_queue *rxq,
883                                struct rte_event *ev)
884 {
885         uint8_t dqrr_index;
886
887         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
888                 DPAA2_FD_PTA_SIZE + 16));
889
890         ev->flow_id = rxq->ev.flow_id;
891         ev->sub_event_type = rxq->ev.sub_event_type;
892         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
893         ev->op = RTE_EVENT_OP_NEW;
894         ev->sched_type = rxq->ev.sched_type;
895         ev->queue_id = rxq->ev.queue_id;
896         ev->priority = rxq->ev.priority;
897
898         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
899
900         dqrr_index = qbman_get_dqrr_idx(dq);
901         *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
902         DPAA2_PER_LCORE_DQRR_SIZE++;
903         DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
904         DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
905 }
906
907 void __rte_hot
908 dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
909                                 const struct qbman_fd *fd,
910                                 const struct qbman_result *dq,
911                                 struct dpaa2_queue *rxq,
912                                 struct rte_event *ev)
913 {
914         rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
915                 DPAA2_FD_PTA_SIZE + 16));
916
917         ev->flow_id = rxq->ev.flow_id;
918         ev->sub_event_type = rxq->ev.sub_event_type;
919         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
920         ev->op = RTE_EVENT_OP_NEW;
921         ev->sched_type = rxq->ev.sched_type;
922         ev->queue_id = rxq->ev.queue_id;
923         ev->priority = rxq->ev.priority;
924
925         ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
926
927         *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
928         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
929         *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
930
931         qbman_swp_dqrr_consume(swp, dq);
932 }
933
934 uint16_t
935 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
936 {
937         /* Function receive frames for a given device and VQ */
938         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
939         struct qbman_result *dq_storage;
940         uint32_t fqid = dpaa2_q->fqid;
941         int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
942         uint8_t pending, status;
943         struct qbman_swp *swp;
944         const struct qbman_fd *fd;
945         struct qbman_pull_desc pulldesc;
946         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
947         struct dpaa2_dev_priv *priv = eth_data->dev_private;
948
949         if (unlikely(dpaa2_enable_err_queue))
950                 dump_err_pkts(priv->rx_err_vq);
951
952         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
953                 ret = dpaa2_affine_qbman_swp();
954                 if (ret) {
955                         DPAA2_PMD_ERR(
956                                 "Failed to allocate IO portal, tid: %d\n",
957                                 rte_gettid());
958                         return 0;
959                 }
960         }
961         swp = DPAA2_PER_LCORE_PORTAL;
962
963         do {
964                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
965                 qbman_pull_desc_clear(&pulldesc);
966                 qbman_pull_desc_set_fq(&pulldesc, fqid);
967                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
968                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
969
970                 if (next_pull > dpaa2_dqrr_size) {
971                         qbman_pull_desc_set_numframes(&pulldesc,
972                                 dpaa2_dqrr_size);
973                         next_pull -= dpaa2_dqrr_size;
974                 } else {
975                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
976                         next_pull = 0;
977                 }
978
979                 while (1) {
980                         if (qbman_swp_pull(swp, &pulldesc)) {
981                                 DPAA2_PMD_DP_DEBUG(
982                                         "VDQ command is not issued.QBMAN is busy\n");
983                                 /* Portal was busy, try again */
984                                 continue;
985                         }
986                         break;
987                 }
988
989                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
990                 /* Check if the previous issued command is completed. */
991                 while (!qbman_check_command_complete(dq_storage))
992                         ;
993
994                 num_pulled = 0;
995                 pending = 1;
996                 do {
997                         /* Loop until the dq_storage is updated with
998                          * new token by QBMAN
999                          */
1000                         while (!qbman_check_new_result(dq_storage))
1001                                 ;
1002                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1003                         /* Check whether Last Pull command is Expired and
1004                          * setting Condition for Loop termination
1005                          */
1006                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1007                                 pending = 0;
1008                                 /* Check for valid frame. */
1009                                 status = qbman_result_DQ_flags(dq_storage);
1010                                 if (unlikely((status &
1011                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
1012                                         continue;
1013                         }
1014                         fd = qbman_result_DQ_fd(dq_storage);
1015
1016 #ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
1017                         if (dpaa2_svr_family != SVR_LX2160A) {
1018                                 const struct qbman_fd *next_fd =
1019                                         qbman_result_DQ_fd(dq_storage + 1);
1020
1021                                 /* Prefetch Annotation address for the parse
1022                                  * results.
1023                                  */
1024                                 rte_prefetch0((DPAA2_IOVA_TO_VADDR(
1025                                         DPAA2_GET_FD_ADDR(next_fd) +
1026                                         DPAA2_FD_PTA_SIZE + 16)));
1027                         }
1028 #endif
1029
1030                         if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
1031                                 bufs[num_rx] = eth_sg_fd_to_mbuf(fd,
1032                                                         eth_data->port_id);
1033                         else
1034                                 bufs[num_rx] = eth_fd_to_mbuf(fd,
1035                                                         eth_data->port_id);
1036
1037 #if defined(RTE_LIBRTE_IEEE1588)
1038                 if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
1039                         priv->rx_timestamp =
1040                                 *dpaa2_timestamp_dynfield(bufs[num_rx]);
1041                 }
1042 #endif
1043
1044                 if (eth_data->dev_conf.rxmode.offloads &
1045                                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1046                         rte_vlan_strip(bufs[num_rx]);
1047                 }
1048
1049                         dq_storage++;
1050                         num_rx++;
1051                         num_pulled++;
1052                 } while (pending);
1053         /* Last VDQ provided all packets and more packets are requested */
1054         } while (next_pull && num_pulled == dpaa2_dqrr_size);
1055
1056         dpaa2_q->rx_pkts += num_rx;
1057
1058         return num_rx;
1059 }
1060
1061 uint16_t dpaa2_dev_tx_conf(void *queue)
1062 {
1063         /* Function receive frames for a given device and VQ */
1064         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1065         struct qbman_result *dq_storage;
1066         uint32_t fqid = dpaa2_q->fqid;
1067         int ret, num_tx_conf = 0, num_pulled;
1068         uint8_t pending, status;
1069         struct qbman_swp *swp;
1070         const struct qbman_fd *fd, *next_fd;
1071         struct qbman_pull_desc pulldesc;
1072         struct qbman_release_desc releasedesc;
1073         uint32_t bpid;
1074         uint64_t buf;
1075 #if defined(RTE_LIBRTE_IEEE1588)
1076         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1077         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1078         struct dpaa2_annot_hdr *annotation;
1079         void *v_addr;
1080         struct rte_mbuf *mbuf;
1081 #endif
1082
1083         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1084                 ret = dpaa2_affine_qbman_swp();
1085                 if (ret) {
1086                         DPAA2_PMD_ERR(
1087                                 "Failed to allocate IO portal, tid: %d\n",
1088                                 rte_gettid());
1089                         return 0;
1090                 }
1091         }
1092         swp = DPAA2_PER_LCORE_PORTAL;
1093
1094         do {
1095                 dq_storage = dpaa2_q->q_storage->dq_storage[0];
1096                 qbman_pull_desc_clear(&pulldesc);
1097                 qbman_pull_desc_set_fq(&pulldesc, fqid);
1098                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1099                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1100
1101                 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
1102
1103                 while (1) {
1104                         if (qbman_swp_pull(swp, &pulldesc)) {
1105                                 DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1106                                                    "QBMAN is busy\n");
1107                                 /* Portal was busy, try again */
1108                                 continue;
1109                         }
1110                         break;
1111                 }
1112
1113                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
1114                 /* Check if the previous issued command is completed. */
1115                 while (!qbman_check_command_complete(dq_storage))
1116                         ;
1117
1118                 num_pulled = 0;
1119                 pending = 1;
1120                 do {
1121                         /* Loop until the dq_storage is updated with
1122                          * new token by QBMAN
1123                          */
1124                         while (!qbman_check_new_result(dq_storage))
1125                                 ;
1126                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1127                         /* Check whether Last Pull command is Expired and
1128                          * setting Condition for Loop termination
1129                          */
1130                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1131                                 pending = 0;
1132                                 /* Check for valid frame. */
1133                                 status = qbman_result_DQ_flags(dq_storage);
1134                                 if (unlikely((status &
1135                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
1136                                         continue;
1137                         }
1138                         fd = qbman_result_DQ_fd(dq_storage);
1139
1140                         next_fd = qbman_result_DQ_fd(dq_storage + 1);
1141                         /* Prefetch Annotation address for the parse results */
1142                         rte_prefetch0((void *)(size_t)
1143                                 (DPAA2_GET_FD_ADDR(next_fd) +
1144                                  DPAA2_FD_PTA_SIZE + 16));
1145
1146                         bpid = DPAA2_GET_FD_BPID(fd);
1147
1148                         /* Create a release descriptor required for releasing
1149                          * buffers into QBMAN
1150                          */
1151                         qbman_release_desc_clear(&releasedesc);
1152                         qbman_release_desc_set_bpid(&releasedesc, bpid);
1153
1154                         buf = DPAA2_GET_FD_ADDR(fd);
1155                         /* feed them to bman */
1156                         do {
1157                                 ret = qbman_swp_release(swp, &releasedesc,
1158                                                         &buf, 1);
1159                         } while (ret == -EBUSY);
1160
1161                         dq_storage++;
1162                         num_tx_conf++;
1163                         num_pulled++;
1164 #if defined(RTE_LIBRTE_IEEE1588)
1165                         v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
1166                         mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
1167                                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
1168
1169                         if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
1170                                 annotation = (struct dpaa2_annot_hdr *)((size_t)
1171                                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1172                                         DPAA2_FD_PTA_SIZE);
1173                                 priv->tx_timestamp = annotation->word2;
1174                         }
1175 #endif
1176                 } while (pending);
1177
1178         /* Last VDQ provided all packets and more packets are requested */
1179         } while (num_pulled == dpaa2_dqrr_size);
1180
1181         dpaa2_q->rx_pkts += num_tx_conf;
1182
1183         return num_tx_conf;
1184 }
1185
1186 /* Configure the egress frame annotation for timestamp update */
1187 static void enable_tx_tstamp(struct qbman_fd *fd)
1188 {
1189         struct dpaa2_faead *fd_faead;
1190
1191         /* Set frame annotation status field as valid */
1192         (fd)->simple.frc |= DPAA2_FD_FRC_FASV;
1193
1194         /* Set frame annotation egress action descriptor as valid */
1195         (fd)->simple.frc |= DPAA2_FD_FRC_FAEADV;
1196
1197         /* Set Annotation Length as 128B */
1198         (fd)->simple.ctrl |= DPAA2_FD_CTRL_ASAL;
1199
1200         /* enable update of confirmation frame annotation */
1201         fd_faead = (struct dpaa2_faead *)((size_t)
1202                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
1203                         DPAA2_FD_PTA_SIZE + DPAA2_FD_HW_ANNOT_FAEAD_OFFSET);
1204         fd_faead->ctrl = DPAA2_ANNOT_FAEAD_A2V | DPAA2_ANNOT_FAEAD_UPDV |
1205                                 DPAA2_ANNOT_FAEAD_UPD;
1206 }
1207
1208 /*
1209  * Callback to handle sending packets through WRIOP based interface
1210  */
1211 uint16_t
1212 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1213 {
1214         /* Function to transmit the frames to given device and VQ*/
1215         uint32_t loop, retry_count;
1216         int32_t ret;
1217         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1218         struct rte_mbuf *mi;
1219         uint32_t frames_to_send;
1220         struct rte_mempool *mp;
1221         struct qbman_eq_desc eqdesc;
1222         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1223         struct qbman_swp *swp;
1224         uint16_t num_tx = 0;
1225         uint16_t bpid;
1226         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1227         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1228         uint32_t flags[MAX_TX_RING_SLOTS] = {0};
1229         struct rte_mbuf **orig_bufs = bufs;
1230
1231         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1232                 ret = dpaa2_affine_qbman_swp();
1233                 if (ret) {
1234                         DPAA2_PMD_ERR(
1235                                 "Failed to allocate IO portal, tid: %d\n",
1236                                 rte_gettid());
1237                         return 0;
1238                 }
1239         }
1240         swp = DPAA2_PER_LCORE_PORTAL;
1241
1242         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1243                         eth_data, dpaa2_q->fqid);
1244
1245 #ifdef RTE_LIBRTE_IEEE1588
1246         /* IEEE1588 driver need pointer to tx confirmation queue
1247          * corresponding to last packet transmitted for reading
1248          * the timestamp
1249          */
1250         if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
1251                 priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
1252                 dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
1253                 priv->tx_timestamp = 0;
1254         }
1255 #endif
1256
1257         /*Prepare enqueue descriptor*/
1258         qbman_eq_desc_clear(&eqdesc);
1259         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1260         qbman_eq_desc_set_fq(&eqdesc, dpaa2_q->fqid);
1261
1262         /*Clear the unused FD fields before sending*/
1263         while (nb_pkts) {
1264                 /*Check if the queue is congested*/
1265                 retry_count = 0;
1266                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1267                         retry_count++;
1268                         /* Retry for some time before giving up */
1269                         if (retry_count > CONG_RETRY_COUNT)
1270                                 goto skip_tx;
1271                 }
1272
1273                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1274                         dpaa2_eqcr_size : nb_pkts;
1275
1276                 for (loop = 0; loop < frames_to_send; loop++) {
1277                         if (*dpaa2_seqn(*bufs)) {
1278                                 uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
1279
1280                                 flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
1281                                                 dqrr_index;
1282                                 DPAA2_PER_LCORE_DQRR_SIZE--;
1283                                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
1284                                 *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
1285                         }
1286
1287                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1288                                 mp = (*bufs)->pool;
1289                                 /* Check the basic scenario and set
1290                                  * the FD appropriately here itself.
1291                                  */
1292                                 if (likely(mp && mp->ops_index ==
1293                                     priv->bp_list->dpaa2_ops_index &&
1294                                     (*bufs)->nb_segs == 1 &&
1295                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1296                                         if (unlikely(((*bufs)->ol_flags
1297                                                 & RTE_MBUF_F_TX_VLAN) ||
1298                                                 (eth_data->dev_conf.txmode.offloads
1299                                                 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1300                                                 ret = rte_vlan_insert(bufs);
1301                                                 if (ret)
1302                                                         goto send_n_return;
1303                                         }
1304                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1305                                         &fd_arr[loop], mempool_to_bpid(mp));
1306 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1307                                         rte_mempool_check_cookies
1308                                                 (rte_mempool_from_obj((void *)*bufs),
1309                                                 (void **)bufs, 1, 0);
1310 #endif
1311                                         bufs++;
1312 #ifdef RTE_LIBRTE_IEEE1588
1313                                         enable_tx_tstamp(&fd_arr[loop]);
1314 #endif
1315                                         continue;
1316                                 }
1317                         } else {
1318                                 mi = rte_mbuf_from_indirect(*bufs);
1319                                 mp = mi->pool;
1320                         }
1321
1322                         if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
1323                                 if (unlikely((*bufs)->nb_segs > 1)) {
1324                                         if (eth_mbuf_to_sg_fd(*bufs,
1325                                                               &fd_arr[loop],
1326                                                               mp, 0))
1327                                                 goto send_n_return;
1328                                 } else {
1329                                         eth_mbuf_to_fd(*bufs,
1330                                                        &fd_arr[loop], 0);
1331                                 }
1332                                 bufs++;
1333 #ifdef RTE_LIBRTE_IEEE1588
1334                                 enable_tx_tstamp(&fd_arr[loop]);
1335 #endif
1336                                 continue;
1337                         }
1338
1339                         /* Not a hw_pkt pool allocated frame */
1340                         if (unlikely(!mp || !priv->bp_list)) {
1341                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1342                                 goto send_n_return;
1343                         }
1344
1345                         if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
1346                                 (eth_data->dev_conf.txmode.offloads
1347                                 & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
1348                                 int ret = rte_vlan_insert(bufs);
1349                                 if (ret)
1350                                         goto send_n_return;
1351                         }
1352                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1353                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1354                                 /* alloc should be from the default buffer pool
1355                                  * attached to this interface
1356                                  */
1357                                 bpid = priv->bp_list->buf_pool.bpid;
1358
1359                                 if (unlikely((*bufs)->nb_segs > 1)) {
1360                                         DPAA2_PMD_ERR("S/G support not added"
1361                                                 " for non hw offload buffer");
1362                                         goto send_n_return;
1363                                 }
1364                                 if (eth_copy_mbuf_to_fd(*bufs,
1365                                                         &fd_arr[loop], bpid)) {
1366                                         goto send_n_return;
1367                                 }
1368                                 /* free the original packet */
1369                                 rte_pktmbuf_free(*bufs);
1370                         } else {
1371                                 bpid = mempool_to_bpid(mp);
1372                                 if (unlikely((*bufs)->nb_segs > 1)) {
1373                                         if (eth_mbuf_to_sg_fd(*bufs,
1374                                                         &fd_arr[loop],
1375                                                         mp, bpid))
1376                                                 goto send_n_return;
1377                                 } else {
1378                                         eth_mbuf_to_fd(*bufs,
1379                                                        &fd_arr[loop], bpid);
1380                                 }
1381                         }
1382 #ifdef RTE_LIBRTE_IEEE1588
1383                         enable_tx_tstamp(&fd_arr[loop]);
1384 #endif
1385                         bufs++;
1386                 }
1387
1388                 loop = 0;
1389                 retry_count = 0;
1390                 while (loop < frames_to_send) {
1391                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1392                                         &fd_arr[loop], &flags[loop],
1393                                         frames_to_send - loop);
1394                         if (unlikely(ret < 0)) {
1395                                 retry_count++;
1396                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1397                                         num_tx += loop;
1398                                         nb_pkts -= loop;
1399                                         goto send_n_return;
1400                                 }
1401                         } else {
1402                                 loop += ret;
1403                                 retry_count = 0;
1404                         }
1405                 }
1406
1407                 num_tx += loop;
1408                 nb_pkts -= loop;
1409         }
1410         dpaa2_q->tx_pkts += num_tx;
1411
1412         loop = 0;
1413         while (loop < num_tx) {
1414                 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1415                         rte_pktmbuf_free(*orig_bufs);
1416                 orig_bufs++;
1417                 loop++;
1418         }
1419
1420         return num_tx;
1421
1422 send_n_return:
1423         /* send any already prepared fd */
1424         if (loop) {
1425                 unsigned int i = 0;
1426
1427                 retry_count = 0;
1428                 while (i < loop) {
1429                         ret = qbman_swp_enqueue_multiple(swp, &eqdesc,
1430                                                          &fd_arr[i],
1431                                                          &flags[i],
1432                                                          loop - i);
1433                         if (unlikely(ret < 0)) {
1434                                 retry_count++;
1435                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1436                                         break;
1437                         } else {
1438                                 i += ret;
1439                                 retry_count = 0;
1440                         }
1441                 }
1442                 num_tx += i;
1443         }
1444 skip_tx:
1445         dpaa2_q->tx_pkts += num_tx;
1446
1447         loop = 0;
1448         while (loop < num_tx) {
1449                 if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
1450                         rte_pktmbuf_free(*orig_bufs);
1451                 orig_bufs++;
1452                 loop++;
1453         }
1454
1455         return num_tx;
1456 }
1457
1458 void
1459 dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
1460                           __rte_unused struct dpaa2_queue *dpaa2_q)
1461 {
1462         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1463         struct qbman_fd *fd;
1464         struct rte_mbuf *m;
1465
1466         fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
1467
1468         /* Setting port id does not matter as we are to free the mbuf */
1469         m = eth_fd_to_mbuf(fd, 0);
1470         rte_pktmbuf_free(m);
1471 }
1472
1473 static void
1474 dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
1475                              struct rte_mbuf *m,
1476                              struct qbman_eq_desc *eqdesc)
1477 {
1478         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1479         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1480         struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
1481         struct eqresp_metadata *eqresp_meta;
1482         uint16_t orpid, seqnum;
1483         uint8_t dq_idx;
1484
1485         qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
1486
1487         if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
1488                 orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
1489                         DPAA2_EQCR_OPRID_SHIFT;
1490                 seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
1491                         DPAA2_EQCR_SEQNUM_SHIFT;
1492
1493                 if (!priv->en_loose_ordered) {
1494                         qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
1495                         qbman_eq_desc_set_response(eqdesc, (uint64_t)
1496                                 DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
1497                                 dpio_dev->eqresp_pi]), 1);
1498                         qbman_eq_desc_set_token(eqdesc, 1);
1499
1500                         eqresp_meta = &dpio_dev->eqresp_meta[
1501                                 dpio_dev->eqresp_pi];
1502                         eqresp_meta->dpaa2_q = dpaa2_q;
1503                         eqresp_meta->mp = m->pool;
1504
1505                         dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
1506                                 dpio_dev->eqresp_pi++ :
1507                                 (dpio_dev->eqresp_pi = 0);
1508                 } else {
1509                         qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
1510                 }
1511         } else {
1512                 dq_idx = *dpaa2_seqn(m) - 1;
1513                 qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
1514                 DPAA2_PER_LCORE_DQRR_SIZE--;
1515                 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
1516         }
1517         *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
1518 }
1519
1520 uint16_t
1521 dpaa2_dev_tx_multi_txq_ordered(void **queue,
1522                 struct rte_mbuf **bufs, uint16_t nb_pkts)
1523 {
1524         /* Function to transmit the frames to multiple queues respectively.*/
1525         uint32_t loop, retry_count;
1526         int32_t ret;
1527         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1528         uint32_t frames_to_send;
1529         struct rte_mempool *mp;
1530         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1531         struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
1532         struct qbman_swp *swp;
1533         uint16_t bpid;
1534         struct rte_mbuf *mi;
1535         struct rte_eth_dev_data *eth_data;
1536         struct dpaa2_dev_priv *priv;
1537         struct dpaa2_queue *order_sendq;
1538
1539         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1540                 ret = dpaa2_affine_qbman_swp();
1541                 if (ret) {
1542                         DPAA2_PMD_ERR(
1543                                 "Failed to allocate IO portal, tid: %d\n",
1544                                 rte_gettid());
1545                         return 0;
1546                 }
1547         }
1548         swp = DPAA2_PER_LCORE_PORTAL;
1549
1550         for (loop = 0; loop < nb_pkts; loop++) {
1551                 dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
1552                 eth_data = dpaa2_q[loop]->eth_data;
1553                 priv = eth_data->dev_private;
1554                 qbman_eq_desc_clear(&eqdesc[loop]);
1555                 if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
1556                         order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1557                         dpaa2_set_enqueue_descriptor(order_sendq,
1558                                                              (*bufs),
1559                                                              &eqdesc[loop]);
1560                 } else {
1561                         qbman_eq_desc_set_no_orp(&eqdesc[loop],
1562                                                          DPAA2_EQ_RESP_ERR_FQ);
1563                         qbman_eq_desc_set_fq(&eqdesc[loop],
1564                                                      dpaa2_q[loop]->fqid);
1565                 }
1566
1567                 retry_count = 0;
1568                 while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
1569                         retry_count++;
1570                         /* Retry for some time before giving up */
1571                         if (retry_count > CONG_RETRY_COUNT)
1572                                 goto send_frames;
1573                 }
1574
1575                 if (likely(RTE_MBUF_DIRECT(*bufs))) {
1576                         mp = (*bufs)->pool;
1577                         /* Check the basic scenario and set
1578                          * the FD appropriately here itself.
1579                          */
1580                         if (likely(mp && mp->ops_index ==
1581                                 priv->bp_list->dpaa2_ops_index &&
1582                                 (*bufs)->nb_segs == 1 &&
1583                                 rte_mbuf_refcnt_read((*bufs)) == 1)) {
1584                                 if (unlikely((*bufs)->ol_flags
1585                                         & RTE_MBUF_F_TX_VLAN)) {
1586                                         ret = rte_vlan_insert(bufs);
1587                                         if (ret)
1588                                                 goto send_frames;
1589                                 }
1590                                 DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1591                                         &fd_arr[loop],
1592                                         mempool_to_bpid(mp));
1593                                 bufs++;
1594                                 dpaa2_q[loop]++;
1595                                 continue;
1596                         }
1597                 } else {
1598                         mi = rte_mbuf_from_indirect(*bufs);
1599                         mp = mi->pool;
1600                 }
1601                 /* Not a hw_pkt pool allocated frame */
1602                 if (unlikely(!mp || !priv->bp_list)) {
1603                         DPAA2_PMD_ERR("Err: No buffer pool attached");
1604                         goto send_frames;
1605                 }
1606
1607                 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1608                         DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1609                         /* alloc should be from the default buffer pool
1610                          * attached to this interface
1611                          */
1612                         bpid = priv->bp_list->buf_pool.bpid;
1613
1614                         if (unlikely((*bufs)->nb_segs > 1)) {
1615                                 DPAA2_PMD_ERR(
1616                                         "S/G not supp for non hw offload buffer");
1617                                 goto send_frames;
1618                         }
1619                         if (eth_copy_mbuf_to_fd(*bufs,
1620                                                 &fd_arr[loop], bpid)) {
1621                                 goto send_frames;
1622                         }
1623                         /* free the original packet */
1624                         rte_pktmbuf_free(*bufs);
1625                 } else {
1626                         bpid = mempool_to_bpid(mp);
1627                         if (unlikely((*bufs)->nb_segs > 1)) {
1628                                 if (eth_mbuf_to_sg_fd(*bufs,
1629                                                       &fd_arr[loop],
1630                                                       mp,
1631                                                       bpid))
1632                                         goto send_frames;
1633                         } else {
1634                                 eth_mbuf_to_fd(*bufs,
1635                                                &fd_arr[loop], bpid);
1636                         }
1637                 }
1638
1639                 bufs++;
1640                 dpaa2_q[loop]++;
1641         }
1642
1643 send_frames:
1644         frames_to_send = loop;
1645         loop = 0;
1646         while (loop < frames_to_send) {
1647                 ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
1648                                 &fd_arr[loop],
1649                                 frames_to_send - loop);
1650                 if (likely(ret > 0)) {
1651                         loop += ret;
1652                 } else {
1653                         retry_count++;
1654                         if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1655                                 break;
1656                 }
1657         }
1658
1659         return loop;
1660 }
1661
1662 /* Callback to handle sending ordered packets through WRIOP based interface */
1663 uint16_t
1664 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
1665 {
1666         /* Function to transmit the frames to given device and VQ*/
1667         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1668         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1669         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1670         struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
1671         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
1672         struct rte_mbuf *mi;
1673         struct rte_mempool *mp;
1674         struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
1675         struct qbman_swp *swp;
1676         uint32_t frames_to_send, num_free_eq_desc;
1677         uint32_t loop, retry_count;
1678         int32_t ret;
1679         uint16_t num_tx = 0;
1680         uint16_t bpid;
1681
1682         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
1683                 ret = dpaa2_affine_qbman_swp();
1684                 if (ret) {
1685                         DPAA2_PMD_ERR(
1686                                 "Failed to allocate IO portal, tid: %d\n",
1687                                 rte_gettid());
1688                         return 0;
1689                 }
1690         }
1691         swp = DPAA2_PER_LCORE_PORTAL;
1692
1693         DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n",
1694                            eth_data, dpaa2_q->fqid);
1695
1696         /* This would also handle normal and atomic queues as any type
1697          * of packet can be enqueued when ordered queues are being used.
1698          */
1699         while (nb_pkts) {
1700                 /*Check if the queue is congested*/
1701                 retry_count = 0;
1702                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
1703                         retry_count++;
1704                         /* Retry for some time before giving up */
1705                         if (retry_count > CONG_RETRY_COUNT)
1706                                 goto skip_tx;
1707                 }
1708
1709                 frames_to_send = (nb_pkts > dpaa2_eqcr_size) ?
1710                         dpaa2_eqcr_size : nb_pkts;
1711
1712                 if (!priv->en_loose_ordered) {
1713                         if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
1714                                 num_free_eq_desc = dpaa2_free_eq_descriptors();
1715                                 if (num_free_eq_desc < frames_to_send)
1716                                         frames_to_send = num_free_eq_desc;
1717                         }
1718                 }
1719
1720                 for (loop = 0; loop < frames_to_send; loop++) {
1721                         /*Prepare enqueue descriptor*/
1722                         qbman_eq_desc_clear(&eqdesc[loop]);
1723
1724                         if (*dpaa2_seqn(*bufs)) {
1725                                 /* Use only queue 0 for Tx in case of atomic/
1726                                  * ordered packets as packets can get unordered
1727                                  * when being transmitted out from the interface
1728                                  */
1729                                 dpaa2_set_enqueue_descriptor(order_sendq,
1730                                                              (*bufs),
1731                                                              &eqdesc[loop]);
1732                         } else {
1733                                 qbman_eq_desc_set_no_orp(&eqdesc[loop],
1734                                                          DPAA2_EQ_RESP_ERR_FQ);
1735                                 qbman_eq_desc_set_fq(&eqdesc[loop],
1736                                                      dpaa2_q->fqid);
1737                         }
1738
1739                         if (likely(RTE_MBUF_DIRECT(*bufs))) {
1740                                 mp = (*bufs)->pool;
1741                                 /* Check the basic scenario and set
1742                                  * the FD appropriately here itself.
1743                                  */
1744                                 if (likely(mp && mp->ops_index ==
1745                                     priv->bp_list->dpaa2_ops_index &&
1746                                     (*bufs)->nb_segs == 1 &&
1747                                     rte_mbuf_refcnt_read((*bufs)) == 1)) {
1748                                         if (unlikely((*bufs)->ol_flags
1749                                                 & RTE_MBUF_F_TX_VLAN)) {
1750                                           ret = rte_vlan_insert(bufs);
1751                                           if (ret)
1752                                                 goto send_n_return;
1753                                         }
1754                                         DPAA2_MBUF_TO_CONTIG_FD((*bufs),
1755                                                 &fd_arr[loop],
1756                                                 mempool_to_bpid(mp));
1757                                         bufs++;
1758                                         continue;
1759                                 }
1760                         } else {
1761                                 mi = rte_mbuf_from_indirect(*bufs);
1762                                 mp = mi->pool;
1763                         }
1764                         /* Not a hw_pkt pool allocated frame */
1765                         if (unlikely(!mp || !priv->bp_list)) {
1766                                 DPAA2_PMD_ERR("Err: No buffer pool attached");
1767                                 goto send_n_return;
1768                         }
1769
1770                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
1771                                 DPAA2_PMD_WARN("Non DPAA2 buffer pool");
1772                                 /* alloc should be from the default buffer pool
1773                                  * attached to this interface
1774                                  */
1775                                 bpid = priv->bp_list->buf_pool.bpid;
1776
1777                                 if (unlikely((*bufs)->nb_segs > 1)) {
1778                                         DPAA2_PMD_ERR(
1779                                                 "S/G not supp for non hw offload buffer");
1780                                         goto send_n_return;
1781                                 }
1782                                 if (eth_copy_mbuf_to_fd(*bufs,
1783                                                         &fd_arr[loop], bpid)) {
1784                                         goto send_n_return;
1785                                 }
1786                                 /* free the original packet */
1787                                 rte_pktmbuf_free(*bufs);
1788                         } else {
1789                                 bpid = mempool_to_bpid(mp);
1790                                 if (unlikely((*bufs)->nb_segs > 1)) {
1791                                         if (eth_mbuf_to_sg_fd(*bufs,
1792                                                               &fd_arr[loop],
1793                                                               mp,
1794                                                               bpid))
1795                                                 goto send_n_return;
1796                                 } else {
1797                                         eth_mbuf_to_fd(*bufs,
1798                                                        &fd_arr[loop], bpid);
1799                                 }
1800                         }
1801                         bufs++;
1802                 }
1803
1804                 loop = 0;
1805                 retry_count = 0;
1806                 while (loop < frames_to_send) {
1807                         ret = qbman_swp_enqueue_multiple_desc(swp,
1808                                         &eqdesc[loop], &fd_arr[loop],
1809                                         frames_to_send - loop);
1810                         if (unlikely(ret < 0)) {
1811                                 retry_count++;
1812                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
1813                                         num_tx += loop;
1814                                         nb_pkts -= loop;
1815                                         goto send_n_return;
1816                                 }
1817                         } else {
1818                                 loop += ret;
1819                                 retry_count = 0;
1820                         }
1821                 }
1822
1823                 num_tx += loop;
1824                 nb_pkts -= loop;
1825         }
1826         dpaa2_q->tx_pkts += num_tx;
1827         return num_tx;
1828
1829 send_n_return:
1830         /* send any already prepared fd */
1831         if (loop) {
1832                 unsigned int i = 0;
1833
1834                 retry_count = 0;
1835                 while (i < loop) {
1836                         ret = qbman_swp_enqueue_multiple_desc(swp,
1837                                        &eqdesc[loop], &fd_arr[i], loop - i);
1838                         if (unlikely(ret < 0)) {
1839                                 retry_count++;
1840                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
1841                                         break;
1842                         } else {
1843                                 i += ret;
1844                                 retry_count = 0;
1845                         }
1846                 }
1847                 num_tx += i;
1848         }
1849 skip_tx:
1850         dpaa2_q->tx_pkts += num_tx;
1851         return num_tx;
1852 }
1853
1854 #if defined(RTE_TOOLCHAIN_GCC)
1855 #pragma GCC diagnostic push
1856 #pragma GCC diagnostic ignored "-Wcast-qual"
1857 #elif defined(RTE_TOOLCHAIN_CLANG)
1858 #pragma clang diagnostic push
1859 #pragma clang diagnostic ignored "-Wcast-qual"
1860 #endif
1861
1862 /* This function loopbacks all the received packets.*/
1863 uint16_t
1864 dpaa2_dev_loopback_rx(void *queue,
1865                       struct rte_mbuf **bufs __rte_unused,
1866                       uint16_t nb_pkts)
1867 {
1868         /* Function receive frames for a given device and VQ*/
1869         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
1870         struct qbman_result *dq_storage, *dq_storage1 = NULL;
1871         uint32_t fqid = dpaa2_q->fqid;
1872         int ret, num_rx = 0, num_tx = 0, pull_size;
1873         uint8_t pending, status;
1874         struct qbman_swp *swp;
1875         struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE];
1876         struct qbman_pull_desc pulldesc;
1877         struct qbman_eq_desc eqdesc;
1878         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
1879         struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
1880         struct dpaa2_dev_priv *priv = eth_data->dev_private;
1881         struct dpaa2_queue *tx_q = priv->tx_vq[0];
1882         /* todo - currently we are using 1st TX queue only for loopback*/
1883
1884         if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
1885                 ret = dpaa2_affine_qbman_ethrx_swp();
1886                 if (ret) {
1887                         DPAA2_PMD_ERR("Failure in affining portal");
1888                         return 0;
1889                 }
1890         }
1891         swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
1892         pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts;
1893         if (unlikely(!q_storage->active_dqs)) {
1894                 q_storage->toggle = 0;
1895                 dq_storage = q_storage->dq_storage[q_storage->toggle];
1896                 q_storage->last_num_pkts = pull_size;
1897                 qbman_pull_desc_clear(&pulldesc);
1898                 qbman_pull_desc_set_numframes(&pulldesc,
1899                                               q_storage->last_num_pkts);
1900                 qbman_pull_desc_set_fq(&pulldesc, fqid);
1901                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
1902                         (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
1903                 if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1904                         while (!qbman_check_command_complete(
1905                                get_swp_active_dqs(
1906                                DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1907                                 ;
1908                         clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1909                 }
1910                 while (1) {
1911                         if (qbman_swp_pull(swp, &pulldesc)) {
1912                                 DPAA2_PMD_DP_DEBUG(
1913                                         "VDQ command not issued.QBMAN busy\n");
1914                                 /* Portal was busy, try again */
1915                                 continue;
1916                         }
1917                         break;
1918                 }
1919                 q_storage->active_dqs = dq_storage;
1920                 q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
1921                 set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
1922                                    dq_storage);
1923         }
1924
1925         dq_storage = q_storage->active_dqs;
1926         rte_prefetch0((void *)(size_t)(dq_storage));
1927         rte_prefetch0((void *)(size_t)(dq_storage + 1));
1928
1929         /* Prepare next pull descriptor. This will give space for the
1930          * prefetching done on DQRR entries
1931          */
1932         q_storage->toggle ^= 1;
1933         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
1934         qbman_pull_desc_clear(&pulldesc);
1935         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
1936         qbman_pull_desc_set_fq(&pulldesc, fqid);
1937         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
1938                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
1939
1940         /*Prepare enqueue descriptor*/
1941         qbman_eq_desc_clear(&eqdesc);
1942         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
1943         qbman_eq_desc_set_response(&eqdesc, 0, 0);
1944         qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid);
1945
1946         /* Check if the previous issued command is completed.
1947          * Also seems like the SWP is shared between the Ethernet Driver
1948          * and the SEC driver.
1949          */
1950         while (!qbman_check_command_complete(dq_storage))
1951                 ;
1952         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
1953                 clear_swp_active_dqs(q_storage->active_dpio_id);
1954
1955         pending = 1;
1956
1957         do {
1958                 /* Loop until the dq_storage is updated with
1959                  * new token by QBMAN
1960                  */
1961                 while (!qbman_check_new_result(dq_storage))
1962                         ;
1963                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
1964                 /* Check whether Last Pull command is Expired and
1965                  * setting Condition for Loop termination
1966                  */
1967                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
1968                         pending = 0;
1969                         /* Check for valid frame. */
1970                         status = qbman_result_DQ_flags(dq_storage);
1971                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
1972                                 continue;
1973                 }
1974                 fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage);
1975
1976                 dq_storage++;
1977                 num_rx++;
1978         } while (pending);
1979
1980         while (num_tx < num_rx) {
1981                 num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc,
1982                                 &fd[num_tx], 0, num_rx - num_tx);
1983         }
1984
1985         if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
1986                 while (!qbman_check_command_complete(
1987                        get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
1988                         ;
1989                 clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
1990         }
1991         /* issue a volatile dequeue command for next pull */
1992         while (1) {
1993                 if (qbman_swp_pull(swp, &pulldesc)) {
1994                         DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
1995                                           "QBMAN is busy (2)\n");
1996                         continue;
1997                 }
1998                 break;
1999         }
2000         q_storage->active_dqs = dq_storage1;
2001         q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
2002         set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
2003
2004         dpaa2_q->rx_pkts += num_rx;
2005         dpaa2_q->tx_pkts += num_tx;
2006
2007         return 0;
2008 }
2009 #if defined(RTE_TOOLCHAIN_GCC)
2010 #pragma GCC diagnostic pop
2011 #elif defined(RTE_TOOLCHAIN_CLANG)
2012 #pragma clang diagnostic pop
2013 #endif