drivers: use SPDX tag in NXP dpaa2 files
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2016 NXP
5  *
6  */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_dev.h>
17
18 #include <fslmc_logs.h>
19 #include <fslmc_vfio.h>
20 #include <dpaa2_hw_pvt.h>
21 #include <dpaa2_hw_dpio.h>
22 #include <dpaa2_hw_mempool.h>
23
24 #include "dpaa2_ethdev.h"
25 #include "base/dpaa2_hw_dpni_annot.h"
26
27 static inline uint32_t __attribute__((hot))
28 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
29 {
30         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
31         struct dpaa2_annot_hdr *annotation =
32                         (struct dpaa2_annot_hdr *)hw_annot_addr;
33
34         PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
35
36         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
37                 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
38                 goto parse_done;
39         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
40                 pkt_type = RTE_PTYPE_L2_ETHER;
41         } else {
42                 goto parse_done;
43         }
44
45         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
46                              L3_IPV4_N_PRESENT)) {
47                 pkt_type |= RTE_PTYPE_L3_IPV4;
48                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
49                         L3_IP_N_OPT_PRESENT))
50                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
51
52         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
53                   L3_IPV6_N_PRESENT)) {
54                 pkt_type |= RTE_PTYPE_L3_IPV6;
55                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
56                     L3_IP_N_OPT_PRESENT))
57                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
58         } else {
59                 goto parse_done;
60         }
61
62         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
63             L3_IP_1_MORE_FRAGMENT |
64             L3_IP_N_FIRST_FRAGMENT |
65             L3_IP_N_MORE_FRAGMENT)) {
66                 pkt_type |= RTE_PTYPE_L4_FRAG;
67                 goto parse_done;
68         } else {
69                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
70         }
71
72         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
73                 pkt_type |= RTE_PTYPE_L4_UDP;
74
75         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
76                 pkt_type |= RTE_PTYPE_L4_TCP;
77
78         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
79                 pkt_type |= RTE_PTYPE_L4_SCTP;
80
81         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
82                 pkt_type |= RTE_PTYPE_L4_ICMP;
83
84         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
85                 pkt_type |= RTE_PTYPE_UNKNOWN;
86
87 parse_done:
88         return pkt_type;
89 }
90
91 static inline void __attribute__((hot))
92 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
93 {
94         struct dpaa2_annot_hdr *annotation =
95                 (struct dpaa2_annot_hdr *)hw_annot_addr;
96
97         if (BIT_ISSET_AT_POS(annotation->word3,
98                              L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
99                 mbuf->ol_flags |= PKT_RX_VLAN;
100
101         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
102                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
103
104         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
105                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
106 }
107
108 static inline struct rte_mbuf *__attribute__((hot))
109 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
110 {
111         struct qbman_sge *sgt, *sge;
112         dma_addr_t sg_addr;
113         int i = 0;
114         uint64_t fd_addr;
115         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
116
117         fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
118
119         /* Get Scatter gather table address */
120         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
121
122         sge = &sgt[i++];
123         sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
124
125         /* First Scatter gather entry */
126         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
127                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
128         /* Prepare all the metadata for first segment */
129         first_seg->buf_addr = (uint8_t *)sg_addr;
130         first_seg->ol_flags = 0;
131         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
132         first_seg->data_len = sge->length  & 0x1FFFF;
133         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
134         first_seg->nb_segs = 1;
135         first_seg->next = NULL;
136
137         first_seg->packet_type = dpaa2_dev_rx_parse(
138                          (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
139                          + DPAA2_FD_PTA_SIZE);
140         dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
141                         DPAA2_GET_FD_ADDR(fd)) +
142                         DPAA2_FD_PTA_SIZE, first_seg);
143         rte_mbuf_refcnt_set(first_seg, 1);
144         cur_seg = first_seg;
145         while (!DPAA2_SG_IS_FINAL(sge)) {
146                 sge = &sgt[i++];
147                 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
148                                 DPAA2_GET_FLE_ADDR(sge));
149                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
150                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
151                 next_seg->buf_addr  = (uint8_t *)sg_addr;
152                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
153                 next_seg->data_len  = sge->length  & 0x1FFFF;
154                 first_seg->nb_segs += 1;
155                 rte_mbuf_refcnt_set(next_seg, 1);
156                 cur_seg->next = next_seg;
157                 next_seg->next = NULL;
158                 cur_seg = next_seg;
159         }
160         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
161                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
162         rte_mbuf_refcnt_set(temp, 1);
163         rte_pktmbuf_free_seg(temp);
164
165         return (void *)first_seg;
166 }
167
168 static inline struct rte_mbuf *__attribute__((hot))
169 eth_fd_to_mbuf(const struct qbman_fd *fd)
170 {
171         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
172                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
173                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
174
175         /* need to repopulated some of the fields,
176          * as they may have changed in last transmission
177          */
178         mbuf->nb_segs = 1;
179         mbuf->ol_flags = 0;
180         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
181         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
182         mbuf->pkt_len = mbuf->data_len;
183
184         /* Parse the packet */
185         /* parse results are after the private - sw annotation area */
186         mbuf->packet_type = dpaa2_dev_rx_parse(
187                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
188                          + DPAA2_FD_PTA_SIZE);
189
190         dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
191                              DPAA2_GET_FD_ADDR(fd)) +
192                              DPAA2_FD_PTA_SIZE, mbuf);
193
194         mbuf->next = NULL;
195         rte_mbuf_refcnt_set(mbuf, 1);
196
197         PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
198                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
199                 mbuf, mbuf->buf_addr, mbuf->data_off,
200                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
201                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
202                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
203
204         return mbuf;
205 }
206
207 static int __attribute__ ((noinline)) __attribute__((hot))
208 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
209                   struct qbman_fd *fd, uint16_t bpid)
210 {
211         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
212         struct qbman_sge *sgt, *sge = NULL;
213         int i;
214
215         /* First Prepare FD to be transmited*/
216         /* Resetting the buffer pool id and offset field*/
217         fd->simple.bpid_offset = 0;
218
219         temp = rte_pktmbuf_alloc(mbuf->pool);
220         if (temp == NULL) {
221                 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
222                 return -ENOMEM;
223         }
224
225         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
226         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
227         DPAA2_SET_FD_OFFSET(fd, temp->data_off);
228         DPAA2_SET_FD_BPID(fd, bpid);
229         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
230         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
231         /*Set Scatter gather table and Scatter gather entries*/
232         sgt = (struct qbman_sge *)(
233                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
234                         + DPAA2_GET_FD_OFFSET(fd));
235
236         for (i = 0; i < mbuf->nb_segs; i++) {
237                 sge = &sgt[i];
238                 /*Resetting the buffer pool id and offset field*/
239                 sge->fin_bpid_offset = 0;
240                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
241                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
242                 sge->length = cur_seg->data_len;
243                 if (RTE_MBUF_DIRECT(cur_seg)) {
244                         if (rte_mbuf_refcnt_read(cur_seg) > 1) {
245                                 /* If refcnt > 1, invalid bpid is set to ensure
246                                  * buffer is not freed by HW
247                                  */
248                                 DPAA2_SET_FLE_IVP(sge);
249                                 rte_mbuf_refcnt_update(cur_seg, -1);
250                         } else
251                                 DPAA2_SET_FLE_BPID(sge,
252                                                 mempool_to_bpid(cur_seg->pool));
253                         cur_seg = cur_seg->next;
254                 } else {
255                         /* Get owner MBUF from indirect buffer */
256                         mi = rte_mbuf_from_indirect(cur_seg);
257                         if (rte_mbuf_refcnt_read(mi) > 1) {
258                                 /* If refcnt > 1, invalid bpid is set to ensure
259                                  * owner buffer is not freed by HW
260                                  */
261                                 DPAA2_SET_FLE_IVP(sge);
262                         } else {
263                                 DPAA2_SET_FLE_BPID(sge,
264                                                    mempool_to_bpid(mi->pool));
265                                 rte_mbuf_refcnt_update(mi, 1);
266                         }
267                         prev_seg = cur_seg;
268                         cur_seg = cur_seg->next;
269                         prev_seg->next = NULL;
270                         rte_pktmbuf_free(prev_seg);
271                 }
272         }
273         DPAA2_SG_SET_FINAL(sge, true);
274         return 0;
275 }
276
277 static void
278 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
279                struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
280
281 static void __attribute__ ((noinline)) __attribute__((hot))
282 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
283                struct qbman_fd *fd, uint16_t bpid)
284 {
285         /*Resetting the buffer pool id and offset field*/
286         fd->simple.bpid_offset = 0;
287
288         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
289         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
290         DPAA2_SET_FD_BPID(fd, bpid);
291         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
292         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
293
294         PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
295                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
296                 mbuf, mbuf->buf_addr, mbuf->data_off,
297                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
298                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
299                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
300         if (RTE_MBUF_DIRECT(mbuf)) {
301                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
302                         DPAA2_SET_FD_IVP(fd);
303                         rte_mbuf_refcnt_update(mbuf, -1);
304                 }
305         } else {
306                 struct rte_mbuf *mi;
307
308                 mi = rte_mbuf_from_indirect(mbuf);
309                 if (rte_mbuf_refcnt_read(mi) > 1)
310                         DPAA2_SET_FD_IVP(fd);
311                 else
312                         rte_mbuf_refcnt_update(mi, 1);
313                 rte_pktmbuf_free(mbuf);
314         }
315 }
316
317 static inline int __attribute__((hot))
318 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
319                     struct qbman_fd *fd, uint16_t bpid)
320 {
321         struct rte_mbuf *m;
322         void *mb = NULL;
323
324         if (rte_dpaa2_mbuf_alloc_bulk(
325                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
326                 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
327                 return -1;
328         }
329         m = (struct rte_mbuf *)mb;
330         memcpy((char *)m->buf_addr + mbuf->data_off,
331                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
332                 mbuf->pkt_len);
333
334         /* Copy required fields */
335         m->data_off = mbuf->data_off;
336         m->ol_flags = mbuf->ol_flags;
337         m->packet_type = mbuf->packet_type;
338         m->tx_offload = mbuf->tx_offload;
339
340         /*Resetting the buffer pool id and offset field*/
341         fd->simple.bpid_offset = 0;
342
343         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
344         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
345         DPAA2_SET_FD_BPID(fd, bpid);
346         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
347         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
348
349         PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
350                    (void *)mbuf, mbuf->buf_addr);
351
352         PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
353                    DPAA2_GET_FD_ADDR(fd),
354                 DPAA2_GET_FD_BPID(fd),
355                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
356                 DPAA2_GET_FD_OFFSET(fd),
357                 DPAA2_GET_FD_LEN(fd));
358
359         return 0;
360 }
361
362 uint16_t
363 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
364 {
365         /* Function receive frames for a given device and VQ*/
366         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
367         struct qbman_result *dq_storage;
368         uint32_t fqid = dpaa2_q->fqid;
369         int ret, num_rx = 0;
370         uint8_t is_last = 0, status;
371         struct qbman_swp *swp;
372         const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
373         struct qbman_pull_desc pulldesc;
374         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
375         struct rte_eth_dev *dev = dpaa2_q->dev;
376
377         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
378                 ret = dpaa2_affine_qbman_swp();
379                 if (ret) {
380                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
381                         return 0;
382                 }
383         }
384         swp = DPAA2_PER_LCORE_PORTAL;
385         if (!q_storage->active_dqs) {
386                 q_storage->toggle = 0;
387                 dq_storage = q_storage->dq_storage[q_storage->toggle];
388                 qbman_pull_desc_clear(&pulldesc);
389                 qbman_pull_desc_set_numframes(&pulldesc,
390                                               (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
391                                                DPAA2_DQRR_RING_SIZE : nb_pkts);
392                 qbman_pull_desc_set_fq(&pulldesc, fqid);
393                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
394                         (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
395                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
396                         while (!qbman_check_command_complete(
397                                get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
398                                 ;
399                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
400                 }
401                 while (1) {
402                         if (qbman_swp_pull(swp, &pulldesc)) {
403                                 PMD_RX_LOG(WARNING, "VDQ command is not issued."
404                                            "QBMAN is busy\n");
405                                 /* Portal was busy, try again */
406                                 continue;
407                         }
408                         break;
409                 }
410                 q_storage->active_dqs = dq_storage;
411                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
412                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
413         }
414         dq_storage = q_storage->active_dqs;
415         /* Check if the previous issued command is completed.
416          * Also seems like the SWP is shared between the Ethernet Driver
417          * and the SEC driver.
418          */
419         while (!qbman_check_command_complete(dq_storage))
420                 ;
421         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
422                 clear_swp_active_dqs(q_storage->active_dpio_id);
423         while (!is_last) {
424                 /* Loop until the dq_storage is updated with
425                  * new token by QBMAN
426                  */
427                 while (!qbman_check_new_result(dq_storage))
428                         ;
429                 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
430                 /* Check whether Last Pull command is Expired and
431                  * setting Condition for Loop termination
432                  */
433                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
434                         is_last = 1;
435                         /* Check for valid frame. */
436                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
437                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
438                                 continue;
439                 }
440                 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
441
442                 /* Prefetch Annotation address for the parse results */
443                 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
444                                 + DPAA2_FD_PTA_SIZE + 16));
445
446                 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
447                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
448                 else
449                         bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
450                 bufs[num_rx]->port = dev->data->port_id;
451
452                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
453                         rte_vlan_strip(bufs[num_rx]);
454
455                 dq_storage++;
456                 num_rx++;
457         }
458
459         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
460                 while (!qbman_check_command_complete(
461                        get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
462                         ;
463                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
464         }
465         q_storage->toggle ^= 1;
466         dq_storage = q_storage->dq_storage[q_storage->toggle];
467         qbman_pull_desc_clear(&pulldesc);
468         qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
469         qbman_pull_desc_set_fq(&pulldesc, fqid);
470         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
471                         (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
472         /* Issue a volatile dequeue command. */
473         while (1) {
474                 if (qbman_swp_pull(swp, &pulldesc)) {
475                         PMD_RX_LOG(WARNING, "VDQ command is not issued."
476                                    "QBMAN is busy\n");
477                         continue;
478                 }
479                 break;
480         }
481         q_storage->active_dqs = dq_storage;
482         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
483         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
484
485         dpaa2_q->rx_pkts += num_rx;
486
487         /* Return the total number of packets received to DPAA2 app */
488         return num_rx;
489 }
490
491 void __attribute__((hot))
492 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
493                                  const struct qbman_fd *fd,
494                                  const struct qbman_result *dq,
495                                  struct dpaa2_queue *rxq,
496                                  struct rte_event *ev)
497 {
498         ev->mbuf = eth_fd_to_mbuf(fd);
499
500         ev->flow_id = rxq->ev.flow_id;
501         ev->sub_event_type = rxq->ev.sub_event_type;
502         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
503         ev->op = RTE_EVENT_OP_NEW;
504         ev->sched_type = rxq->ev.sched_type;
505         ev->queue_id = rxq->ev.queue_id;
506         ev->priority = rxq->ev.priority;
507
508         qbman_swp_dqrr_consume(swp, dq);
509 }
510
511 /*
512  * Callback to handle sending packets through WRIOP based interface
513  */
514 uint16_t
515 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
516 {
517         /* Function to transmit the frames to given device and VQ*/
518         uint32_t loop, retry_count;
519         int32_t ret;
520         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
521         struct rte_mbuf *mi;
522         uint32_t frames_to_send;
523         struct rte_mempool *mp;
524         struct qbman_eq_desc eqdesc;
525         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
526         struct qbman_swp *swp;
527         uint16_t num_tx = 0;
528         uint16_t bpid;
529         struct rte_eth_dev *dev = dpaa2_q->dev;
530         struct dpaa2_dev_priv *priv = dev->data->dev_private;
531
532         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
533                 ret = dpaa2_affine_qbman_swp();
534                 if (ret) {
535                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
536                         return 0;
537                 }
538         }
539         swp = DPAA2_PER_LCORE_PORTAL;
540
541         PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
542
543         /*Prepare enqueue descriptor*/
544         qbman_eq_desc_clear(&eqdesc);
545         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
546         qbman_eq_desc_set_response(&eqdesc, 0, 0);
547         qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
548                              dpaa2_q->flow_id, dpaa2_q->tc_index);
549
550         /*Clear the unused FD fields before sending*/
551         while (nb_pkts) {
552                 /*Check if the queue is congested*/
553                 retry_count = 0;
554                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
555                         retry_count++;
556                         /* Retry for some time before giving up */
557                         if (retry_count > CONG_RETRY_COUNT)
558                                 goto skip_tx;
559                 }
560
561                 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
562
563                 for (loop = 0; loop < frames_to_send; loop++) {
564                         fd_arr[loop].simple.frc = 0;
565                         DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
566                         DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
567                         if (RTE_MBUF_DIRECT(*bufs)) {
568                                 mp = (*bufs)->pool;
569                         } else {
570                                 mi = rte_mbuf_from_indirect(*bufs);
571                                 mp = mi->pool;
572                         }
573                         /* Not a hw_pkt pool allocated frame */
574                         if (unlikely(!mp || !priv->bp_list)) {
575                                 PMD_TX_LOG(ERR, "err: no bpool attached");
576                                 goto send_n_return;
577                         }
578
579                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
580                                 PMD_TX_LOG(ERR, "non hw offload bufffer ");
581                                 /* alloc should be from the default buffer pool
582                                  * attached to this interface
583                                  */
584                                 bpid = priv->bp_list->buf_pool.bpid;
585
586                                 if (unlikely((*bufs)->nb_segs > 1)) {
587                                         PMD_TX_LOG(ERR, "S/G support not added"
588                                                 " for non hw offload buffer");
589                                         goto send_n_return;
590                                 }
591                                 if (eth_copy_mbuf_to_fd(*bufs,
592                                                         &fd_arr[loop], bpid)) {
593                                         goto send_n_return;
594                                 }
595                                 /* free the original packet */
596                                 rte_pktmbuf_free(*bufs);
597                         } else {
598                                 bpid = mempool_to_bpid(mp);
599                                 if (unlikely((*bufs)->nb_segs > 1)) {
600                                         if (eth_mbuf_to_sg_fd(*bufs,
601                                                         &fd_arr[loop], bpid))
602                                                 goto send_n_return;
603                                 } else {
604                                         eth_mbuf_to_fd(*bufs,
605                                                        &fd_arr[loop], bpid);
606                                 }
607                         }
608                         bufs++;
609                 }
610                 loop = 0;
611                 while (loop < frames_to_send) {
612                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
613                                         &fd_arr[loop], frames_to_send - loop);
614                 }
615
616                 num_tx += frames_to_send;
617                 dpaa2_q->tx_pkts += frames_to_send;
618                 nb_pkts -= frames_to_send;
619         }
620         return num_tx;
621
622 send_n_return:
623         /* send any already prepared fd */
624         if (loop) {
625                 unsigned int i = 0;
626
627                 while (i < loop) {
628                         i += qbman_swp_enqueue_multiple(swp, &eqdesc,
629                                                         &fd_arr[i], loop - i);
630                 }
631                 num_tx += loop;
632                 dpaa2_q->tx_pkts += loop;
633         }
634 skip_tx:
635         return num_tx;
636 }
637
638 /**
639  * Dummy DPDK callback for TX.
640  *
641  * This function is used to temporarily replace the real callback during
642  * unsafe control operations on the queue, or in case of error.
643  *
644  * @param dpdk_txq
645  *   Generic pointer to TX queue structure.
646  * @param[in] pkts
647  *   Packets to transmit.
648  * @param pkts_n
649  *   Number of packets in array.
650  *
651  * @return
652  *   Number of packets successfully transmitted (<= pkts_n).
653  */
654 uint16_t
655 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
656 {
657         (void)queue;
658         (void)bufs;
659         (void)nb_pkts;
660         return 0;
661 }