dma/dpaa2: support DMA operations
[dpdk.git] / drivers / dma / dpaa2 / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 NXP
3  */
4
5 #include <rte_eal.h>
6 #include <rte_fslmc.h>
7 #include <rte_dmadev.h>
8 #include <rte_dmadev_pmd.h>
9 #include <rte_kvargs.h>
10
11 #include <mc/fsl_dpdmai.h>
12
13 #include "rte_pmd_dpaa2_qdma.h"
14 #include "dpaa2_qdma.h"
15 #include "dpaa2_qdma_logs.h"
16
17 #define DPAA2_QDMA_PREFETCH "prefetch"
18
19 /* Dynamic log type identifier */
20 int dpaa2_qdma_logtype;
21
22 uint32_t dpaa2_coherent_no_alloc_cache;
23 uint32_t dpaa2_coherent_alloc_cache;
24
25 static inline int
26 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
27                      uint32_t len, struct qbman_fd *fd,
28                      struct rte_dpaa2_qdma_rbp *rbp, int ser)
29 {
30         fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
31         fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
32
33         fd->simple_pci.len_sl = len;
34
35         fd->simple_pci.bmt = 1;
36         fd->simple_pci.fmt = 3;
37         fd->simple_pci.sl = 1;
38         fd->simple_pci.ser = ser;
39
40         fd->simple_pci.sportid = rbp->sportid;  /*pcie 3 */
41         fd->simple_pci.srbp = rbp->srbp;
42         if (rbp->srbp)
43                 fd->simple_pci.rdttype = 0;
44         else
45                 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
46
47         /*dest is pcie memory */
48         fd->simple_pci.dportid = rbp->dportid;  /*pcie 3 */
49         fd->simple_pci.drbp = rbp->drbp;
50         if (rbp->drbp)
51                 fd->simple_pci.wrttype = 0;
52         else
53                 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
54
55         fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
56         fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
57
58         return 0;
59 }
60
61 static inline int
62 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
63                      uint32_t len, struct qbman_fd *fd, int ser)
64 {
65         fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
66         fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
67
68         fd->simple_ddr.len = len;
69
70         fd->simple_ddr.bmt = 1;
71         fd->simple_ddr.fmt = 3;
72         fd->simple_ddr.sl = 1;
73         fd->simple_ddr.ser = ser;
74         /**
75          * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
76          * Coherent copy of cacheable memory,
77         * lookup in downstream cache, no allocate
78          * on miss
79          */
80         fd->simple_ddr.rns = 0;
81         fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
82         /**
83          * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
84          * Coherent write of cacheable memory,
85          * lookup in downstream cache, no allocate on miss
86          */
87         fd->simple_ddr.wns = 0;
88         fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
89
90         fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
91         fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
92
93         return 0;
94 }
95
96 static void
97 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
98                         uint64_t fle_iova,
99                         struct rte_dpaa2_qdma_rbp *rbp,
100                         uint64_t src, uint64_t dest,
101                         size_t len, uint32_t flags, uint32_t fmt)
102 {
103         struct qdma_sdd *sdd;
104         uint64_t sdd_iova;
105
106         sdd = (struct qdma_sdd *)
107                         ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
108                         QDMA_FLE_SDD_OFFSET);
109         sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
110
111         /* first frame list to source descriptor */
112         DPAA2_SET_FLE_ADDR(fle, sdd_iova);
113         DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
114
115         /* source and destination descriptor */
116         if (rbp && rbp->enable) {
117                 /* source */
118                 sdd->read_cmd.portid = rbp->sportid;
119                 sdd->rbpcmd_simple.pfid = rbp->spfid;
120                 sdd->rbpcmd_simple.vfid = rbp->svfid;
121
122                 if (rbp->srbp) {
123                         sdd->read_cmd.rbp = rbp->srbp;
124                         sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
125                 } else {
126                         sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
127                 }
128                 sdd++;
129                 /* destination */
130                 sdd->write_cmd.portid = rbp->dportid;
131                 sdd->rbpcmd_simple.pfid = rbp->dpfid;
132                 sdd->rbpcmd_simple.vfid = rbp->dvfid;
133
134                 if (rbp->drbp) {
135                         sdd->write_cmd.rbp = rbp->drbp;
136                         sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
137                 } else {
138                         sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
139                 }
140
141         } else {
142                 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
143                 sdd++;
144                 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
145         }
146         fle++;
147         /* source frame list to source buffer */
148         if (flags & RTE_DPAA2_QDMA_JOB_SRC_PHY) {
149                 DPAA2_SET_FLE_ADDR(fle, src);
150 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
151                 DPAA2_SET_FLE_BMT(fle);
152 #endif
153         } else {
154                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
155         }
156         fle->word4.fmt = fmt;
157         DPAA2_SET_FLE_LEN(fle, len);
158
159         fle++;
160         /* destination frame list to destination buffer */
161         if (flags & RTE_DPAA2_QDMA_JOB_DEST_PHY) {
162 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
163                 DPAA2_SET_FLE_BMT(fle);
164 #endif
165                 DPAA2_SET_FLE_ADDR(fle, dest);
166         } else {
167                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
168         }
169         fle->word4.fmt = fmt;
170         DPAA2_SET_FLE_LEN(fle, len);
171
172         /* Final bit: 1, for last frame list */
173         DPAA2_SET_FLE_FIN(fle);
174 }
175
176 static inline int
177 dpdmai_dev_set_fd_us(struct qdma_virt_queue *qdma_vq,
178                      struct qbman_fd *fd,
179                      struct rte_dpaa2_qdma_job **job,
180                      uint16_t nb_jobs)
181 {
182         struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
183         struct rte_dpaa2_qdma_job **ppjob;
184         size_t iova;
185         int ret = 0, loop;
186         int ser = (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) ?
187                                 0 : 1;
188
189         for (loop = 0; loop < nb_jobs; loop++) {
190                 if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
191                         iova = (size_t)job[loop]->dest;
192                 else
193                         iova = (size_t)job[loop]->src;
194
195                 /* Set the metadata */
196                 job[loop]->vq_id = qdma_vq->vq_id;
197                 ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
198                 *ppjob = job[loop];
199
200                 if ((rbp->drbp == 1) || (rbp->srbp == 1))
201                         ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
202                                         (phys_addr_t)job[loop]->dest,
203                                         job[loop]->len, &fd[loop], rbp, ser);
204                 else
205                         ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
206                                         (phys_addr_t)job[loop]->dest,
207                                         job[loop]->len, &fd[loop], ser);
208         }
209
210         return ret;
211 }
212
213 static uint32_t
214 qdma_populate_sg_entry(struct rte_dpaa2_qdma_job **jobs,
215                        struct qdma_sg_entry *src_sge,
216                        struct qdma_sg_entry *dst_sge,
217                        uint16_t nb_jobs)
218 {
219         uint16_t i;
220         uint32_t total_len = 0;
221         uint64_t iova;
222
223         for (i = 0; i < nb_jobs; i++) {
224                 /* source SG */
225                 if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_SRC_PHY)) {
226                         src_sge->addr_lo = (uint32_t)jobs[i]->src;
227                         src_sge->addr_hi = (jobs[i]->src >> 32);
228                 } else {
229                         iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
230                         src_sge->addr_lo = (uint32_t)iova;
231                         src_sge->addr_hi = iova >> 32;
232                 }
233                 src_sge->data_len.data_len_sl0 = jobs[i]->len;
234                 src_sge->ctrl.sl = QDMA_SG_SL_LONG;
235                 src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
236 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
237                 src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
238 #else
239                 src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
240 #endif
241                 /* destination SG */
242                 if (likely(jobs[i]->flags & RTE_DPAA2_QDMA_JOB_DEST_PHY)) {
243                         dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
244                         dst_sge->addr_hi = (jobs[i]->dest >> 32);
245                 } else {
246                         iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
247                         dst_sge->addr_lo = (uint32_t)iova;
248                         dst_sge->addr_hi = iova >> 32;
249                 }
250                 dst_sge->data_len.data_len_sl0 = jobs[i]->len;
251                 dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
252                 dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
253 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
254                 dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
255 #else
256                 dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
257 #endif
258                 total_len += jobs[i]->len;
259
260                 if (i == (nb_jobs - 1)) {
261                         src_sge->ctrl.f = QDMA_SG_F;
262                         dst_sge->ctrl.f = QDMA_SG_F;
263                 } else {
264                         src_sge->ctrl.f = 0;
265                         dst_sge->ctrl.f = 0;
266                 }
267                 src_sge++;
268                 dst_sge++;
269         }
270
271         return total_len;
272 }
273
274 static inline int
275 dpdmai_dev_set_multi_fd_lf_no_rsp(struct qdma_virt_queue *qdma_vq,
276                                   struct qbman_fd *fd,
277                                   struct rte_dpaa2_qdma_job **job,
278                                   uint16_t nb_jobs)
279 {
280         struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
281         struct rte_dpaa2_qdma_job **ppjob;
282         uint16_t i;
283         void *elem;
284         struct qbman_fle *fle;
285         uint64_t elem_iova, fle_iova;
286
287         for (i = 0; i < nb_jobs; i++) {
288                 elem = job[i]->usr_elem;
289 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
290                 elem_iova = rte_mempool_virt2iova(elem);
291 #else
292                 elem_iova = DPAA2_VADDR_TO_IOVA(elem);
293 #endif
294
295                 ppjob = (struct rte_dpaa2_qdma_job **)
296                         ((uintptr_t)(uint64_t)elem +
297                          QDMA_FLE_SINGLE_JOB_OFFSET);
298                 *ppjob = job[i];
299
300                 job[i]->vq_id = qdma_vq->vq_id;
301
302                 fle = (struct qbman_fle *)
303                         ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
304                 fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
305
306                 DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
307                 DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
308
309                 memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
310                                 DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
311
312                 dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
313                         job[i]->src, job[i]->dest, job[i]->len,
314                         job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
315         }
316
317         return 0;
318 }
319
320 static inline int
321 dpdmai_dev_set_multi_fd_lf(struct qdma_virt_queue *qdma_vq,
322                            struct qbman_fd *fd,
323                            struct rte_dpaa2_qdma_job **job,
324                            uint16_t nb_jobs)
325 {
326         struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
327         struct rte_dpaa2_qdma_job **ppjob;
328         uint16_t i;
329         int ret;
330         void *elem[DPAA2_QDMA_MAX_DESC];
331         struct qbman_fle *fle;
332         uint64_t elem_iova, fle_iova;
333
334         ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
335         if (ret) {
336                 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
337                 return ret;
338         }
339
340         for (i = 0; i < nb_jobs; i++) {
341 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
342                 elem_iova = rte_mempool_virt2iova(elem[i]);
343 #else
344                 elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
345 #endif
346
347                 ppjob = (struct rte_dpaa2_qdma_job **)
348                         ((uintptr_t)(uint64_t)elem[i] +
349                          QDMA_FLE_SINGLE_JOB_OFFSET);
350                 *ppjob = job[i];
351
352                 job[i]->vq_id = qdma_vq->vq_id;
353
354                 fle = (struct qbman_fle *)
355                         ((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
356                 fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
357
358                 DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
359                 DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
360                 DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
361
362                 memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
363                         DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
364
365                 dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
366                                 job[i]->src, job[i]->dest, job[i]->len,
367                                 job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
368         }
369
370         return 0;
371 }
372
373 static inline int
374 dpdmai_dev_set_sg_fd_lf(struct qdma_virt_queue *qdma_vq,
375                         struct qbman_fd *fd,
376                         struct rte_dpaa2_qdma_job **job,
377                         uint16_t nb_jobs)
378 {
379         struct rte_dpaa2_qdma_rbp *rbp = &qdma_vq->rbp;
380         struct rte_dpaa2_qdma_job **ppjob;
381         void *elem;
382         struct qbman_fle *fle;
383         uint64_t elem_iova, fle_iova, src, dst;
384         int ret = 0, i;
385         struct qdma_sg_entry *src_sge, *dst_sge;
386         uint32_t len, fmt, flags;
387
388         /*
389          * Get an FLE/SDD from FLE pool.
390          * Note: IO metadata is before the FLE and SDD memory.
391          */
392         if (qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE) {
393                 elem = job[0]->usr_elem;
394         } else {
395                 ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
396                 if (ret) {
397                         DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
398                         return ret;
399                 }
400         }
401
402 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
403         elem_iova = rte_mempool_virt2iova(elem);
404 #else
405         elem_iova = DPAA2_VADDR_TO_IOVA(elem);
406 #endif
407
408         /* Set the metadata */
409         /* Save job context. */
410         *((uint16_t *)
411         ((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
412         ppjob = (struct rte_dpaa2_qdma_job **)
413                 ((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
414         for (i = 0; i < nb_jobs; i++)
415                 ppjob[i] = job[i];
416
417         ppjob[0]->vq_id = qdma_vq->vq_id;
418
419         fle = (struct qbman_fle *)
420                 ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
421         fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
422
423         DPAA2_SET_FD_ADDR(fd, fle_iova);
424         DPAA2_SET_FD_COMPOUND_FMT(fd);
425         if (!(qdma_vq->flags & DPAA2_QDMA_VQ_NO_RESPONSE))
426                 DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
427
428         /* Populate FLE */
429         if (likely(nb_jobs > 1)) {
430                 src_sge = (struct qdma_sg_entry *)
431                         ((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
432                 dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
433                 src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
434                 dst = src +
435                         DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
436                 len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
437                 fmt = QBMAN_FLE_WORD4_FMT_SGE;
438                 flags = RTE_DPAA2_QDMA_JOB_SRC_PHY | RTE_DPAA2_QDMA_JOB_DEST_PHY;
439         } else {
440                 src = job[0]->src;
441                 dst = job[0]->dest;
442                 len = job[0]->len;
443                 fmt = QBMAN_FLE_WORD4_FMT_SBF;
444                 flags = job[0]->flags;
445         }
446
447         memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
448                         DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
449
450         dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
451                                         src, dst, len, flags, fmt);
452
453         return 0;
454 }
455
456 static inline uint16_t
457 dpdmai_dev_get_job_us(struct qdma_virt_queue *qdma_vq __rte_unused,
458                       const struct qbman_fd *fd,
459                       struct rte_dpaa2_qdma_job **job, uint16_t *nb_jobs)
460 {
461         uint16_t vqid;
462         size_t iova;
463         struct rte_dpaa2_qdma_job **ppjob;
464
465         if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
466                 iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
467                                 | (uint64_t)fd->simple_pci.daddr_lo);
468         else
469                 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
470                                 | (uint64_t)fd->simple_pci.saddr_lo);
471
472         ppjob = (struct rte_dpaa2_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
473         *job = (struct rte_dpaa2_qdma_job *)*ppjob;
474         (*job)->status = (fd->simple_pci.acc_err << 8) |
475                                         (fd->simple_pci.error);
476         vqid = (*job)->vq_id;
477         *nb_jobs = 1;
478
479         return vqid;
480 }
481
482 static inline uint16_t
483 dpdmai_dev_get_single_job_lf(struct qdma_virt_queue *qdma_vq,
484                              const struct qbman_fd *fd,
485                              struct rte_dpaa2_qdma_job **job,
486                              uint16_t *nb_jobs)
487 {
488         struct qbman_fle *fle;
489         struct rte_dpaa2_qdma_job **ppjob = NULL;
490         uint16_t status;
491
492         /*
493          * Fetch metadata from FLE. job and vq_id were set
494          * in metadata in the enqueue operation.
495          */
496         fle = (struct qbman_fle *)
497                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
498
499         *nb_jobs = 1;
500         ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
501                         QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
502
503         status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
504
505         *job = *ppjob;
506         (*job)->status = status;
507
508         /* Free FLE to the pool */
509         rte_mempool_put(qdma_vq->fle_pool,
510                         (void *)
511                         ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
512
513         return (*job)->vq_id;
514 }
515
516 static inline uint16_t
517 dpdmai_dev_get_sg_job_lf(struct qdma_virt_queue *qdma_vq,
518                          const struct qbman_fd *fd,
519                          struct rte_dpaa2_qdma_job **job,
520                          uint16_t *nb_jobs)
521 {
522         struct qbman_fle *fle;
523         struct rte_dpaa2_qdma_job **ppjob = NULL;
524         uint16_t i, status;
525
526         /*
527          * Fetch metadata from FLE. job and vq_id were set
528          * in metadata in the enqueue operation.
529          */
530         fle = (struct qbman_fle *)
531                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
532         *nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
533                                 QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
534         ppjob = (struct rte_dpaa2_qdma_job **)((uintptr_t)(uint64_t)fle -
535                                 QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
536         status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
537
538         for (i = 0; i < (*nb_jobs); i++) {
539                 job[i] = ppjob[i];
540                 job[i]->status = status;
541         }
542
543         /* Free FLE to the pool */
544         rte_mempool_put(qdma_vq->fle_pool,
545                         (void *)
546                         ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
547
548         return job[0]->vq_id;
549 }
550
551 /* Function to receive a QDMA job for a given device and queue*/
552 static int
553 dpdmai_dev_dequeue_multijob_prefetch(struct qdma_virt_queue *qdma_vq,
554                                      uint16_t *vq_id,
555                                      struct rte_dpaa2_qdma_job **job,
556                                      uint16_t nb_jobs)
557 {
558         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
559         struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
560         struct qbman_result *dq_storage, *dq_storage1 = NULL;
561         struct qbman_pull_desc pulldesc;
562         struct qbman_swp *swp;
563         struct queue_storage_info_t *q_storage;
564         uint8_t status, pending;
565         uint8_t num_rx = 0;
566         const struct qbman_fd *fd;
567         uint16_t vqid, num_rx_ret;
568         uint16_t rx_fqid = rxq->fqid;
569         int ret, pull_size;
570
571         if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
572                 /** Make sure there are enough space to get jobs.*/
573                 if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
574                         return -EINVAL;
575                 nb_jobs = 1;
576         }
577
578         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
579                 ret = dpaa2_affine_qbman_swp();
580                 if (ret) {
581                         DPAA2_QDMA_ERR(
582                                 "Failed to allocate IO portal, tid: %d\n",
583                                 rte_gettid());
584                         return 0;
585                 }
586         }
587         swp = DPAA2_PER_LCORE_PORTAL;
588
589         pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
590         q_storage = rxq->q_storage;
591
592         if (unlikely(!q_storage->active_dqs)) {
593                 q_storage->toggle = 0;
594                 dq_storage = q_storage->dq_storage[q_storage->toggle];
595                 q_storage->last_num_pkts = pull_size;
596                 qbman_pull_desc_clear(&pulldesc);
597                 qbman_pull_desc_set_numframes(&pulldesc,
598                                               q_storage->last_num_pkts);
599                 qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
600                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
601                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
602                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
603                         while (!qbman_check_command_complete(
604                                 get_swp_active_dqs(
605                                 DPAA2_PER_LCORE_DPIO->index)))
606                                 ;
607                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
608                 }
609                 while (1) {
610                         if (qbman_swp_pull(swp, &pulldesc)) {
611                                 DPAA2_QDMA_DP_WARN(
612                                         "VDQ command not issued.QBMAN busy\n");
613                                         /* Portal was busy, try again */
614                                 continue;
615                         }
616                         break;
617                 }
618                 q_storage->active_dqs = dq_storage;
619                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
620                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
621                                    dq_storage);
622         }
623
624         dq_storage = q_storage->active_dqs;
625         rte_prefetch0((void *)(size_t)(dq_storage));
626         rte_prefetch0((void *)(size_t)(dq_storage + 1));
627
628         /* Prepare next pull descriptor. This will give space for the
629          * prefething done on DQRR entries
630          */
631         q_storage->toggle ^= 1;
632         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
633         qbman_pull_desc_clear(&pulldesc);
634         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
635         qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
636         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
637                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
638
639         /* Check if the previous issued command is completed.
640          * Also seems like the SWP is shared between the Ethernet Driver
641          * and the SEC driver.
642          */
643         while (!qbman_check_command_complete(dq_storage))
644                 ;
645         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
646                 clear_swp_active_dqs(q_storage->active_dpio_id);
647
648         pending = 1;
649
650         do {
651                 /* Loop until the dq_storage is updated with
652                  * new token by QBMAN
653                  */
654                 while (!qbman_check_new_result(dq_storage))
655                         ;
656                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
657                 /* Check whether Last Pull command is Expired and
658                  * setting Condition for Loop termination
659                  */
660                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
661                         pending = 0;
662                         /* Check for valid frame. */
663                         status = qbman_result_DQ_flags(dq_storage);
664                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
665                                 continue;
666                 }
667                 fd = qbman_result_DQ_fd(dq_storage);
668
669                 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
670                                                                 &num_rx_ret);
671                 if (vq_id)
672                         vq_id[num_rx] = vqid;
673
674                 dq_storage++;
675                 num_rx += num_rx_ret;
676         } while (pending);
677
678         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
679                 while (!qbman_check_command_complete(
680                         get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
681                         ;
682                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
683         }
684         /* issue a volatile dequeue command for next pull */
685         while (1) {
686                 if (qbman_swp_pull(swp, &pulldesc)) {
687                         DPAA2_QDMA_DP_WARN(
688                                 "VDQ command is not issued. QBMAN is busy (2)\n");
689                         continue;
690                 }
691                 break;
692         }
693
694         q_storage->active_dqs = dq_storage1;
695         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
696         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
697
698         return num_rx;
699 }
700
701 static int
702 dpdmai_dev_dequeue_multijob_no_prefetch(struct qdma_virt_queue *qdma_vq,
703                                         uint16_t *vq_id,
704                                         struct rte_dpaa2_qdma_job **job,
705                                         uint16_t nb_jobs)
706 {
707         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
708         struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
709         struct qbman_result *dq_storage;
710         struct qbman_pull_desc pulldesc;
711         struct qbman_swp *swp;
712         uint8_t status, pending;
713         uint8_t num_rx = 0;
714         const struct qbman_fd *fd;
715         uint16_t vqid, num_rx_ret;
716         uint16_t rx_fqid = rxq->fqid;
717         int ret, next_pull, num_pulled = 0;
718
719         if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
720                 /** Make sure there are enough space to get jobs.*/
721                 if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
722                         return -EINVAL;
723                 nb_jobs = 1;
724         }
725
726         next_pull = nb_jobs;
727
728         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
729                 ret = dpaa2_affine_qbman_swp();
730                 if (ret) {
731                         DPAA2_QDMA_ERR(
732                                 "Failed to allocate IO portal, tid: %d\n",
733                                 rte_gettid());
734                         return 0;
735                 }
736         }
737         swp = DPAA2_PER_LCORE_PORTAL;
738
739         rxq = &(dpdmai_dev->rx_queue[0]);
740
741         do {
742                 dq_storage = rxq->q_storage->dq_storage[0];
743                 /* Prepare dequeue descriptor */
744                 qbman_pull_desc_clear(&pulldesc);
745                 qbman_pull_desc_set_fq(&pulldesc, rx_fqid);
746                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
747                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
748
749                 if (next_pull > dpaa2_dqrr_size) {
750                         qbman_pull_desc_set_numframes(&pulldesc,
751                                         dpaa2_dqrr_size);
752                         next_pull -= dpaa2_dqrr_size;
753                 } else {
754                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
755                         next_pull = 0;
756                 }
757
758                 while (1) {
759                         if (qbman_swp_pull(swp, &pulldesc)) {
760                                 DPAA2_QDMA_DP_WARN(
761                                         "VDQ command not issued. QBMAN busy");
762                                 /* Portal was busy, try again */
763                                 continue;
764                         }
765                         break;
766                 }
767
768                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
769                 /* Check if the previous issued command is completed. */
770                 while (!qbman_check_command_complete(dq_storage))
771                         ;
772
773                 num_pulled = 0;
774                 pending = 1;
775
776                 do {
777                         /* Loop until dq_storage is updated
778                          * with new token by QBMAN
779                          */
780                         while (!qbman_check_new_result(dq_storage))
781                                 ;
782                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
783
784                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
785                                 pending = 0;
786                                 /* Check for valid frame. */
787                                 status = qbman_result_DQ_flags(dq_storage);
788                                 if (unlikely((status &
789                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
790                                         continue;
791                         }
792                         fd = qbman_result_DQ_fd(dq_storage);
793
794                         vqid = qdma_vq->get_job(qdma_vq, fd,
795                                                 &job[num_rx], &num_rx_ret);
796                         if (vq_id)
797                                 vq_id[num_rx] = vqid;
798
799                         dq_storage++;
800                         num_rx += num_rx_ret;
801                         num_pulled++;
802
803                 } while (pending);
804         /* Last VDQ provided all packets and more packets are requested */
805         } while (next_pull && num_pulled == dpaa2_dqrr_size);
806
807         return num_rx;
808 }
809
810 static int
811 dpdmai_dev_submit_multi(struct qdma_virt_queue *qdma_vq,
812                         struct rte_dpaa2_qdma_job **job,
813                         uint16_t nb_jobs)
814 {
815         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_vq->dpdmai_dev;
816         uint16_t txq_id = dpdmai_dev->tx_queue[0].fqid;
817         struct qbman_fd fd[DPAA2_QDMA_MAX_DESC];
818         struct qbman_eq_desc eqdesc;
819         struct qbman_swp *swp;
820         uint32_t num_to_send = 0;
821         uint16_t num_tx = 0;
822         uint32_t enqueue_loop, loop;
823         int ret;
824
825         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
826                 ret = dpaa2_affine_qbman_swp();
827                 if (ret) {
828                         DPAA2_QDMA_ERR(
829                                 "Failed to allocate IO portal, tid: %d\n",
830                                 rte_gettid());
831                         return 0;
832                 }
833         }
834         swp = DPAA2_PER_LCORE_PORTAL;
835
836         /* Prepare enqueue descriptor */
837         qbman_eq_desc_clear(&eqdesc);
838         qbman_eq_desc_set_fq(&eqdesc, txq_id);
839         qbman_eq_desc_set_no_orp(&eqdesc, 0);
840         qbman_eq_desc_set_response(&eqdesc, 0, 0);
841
842         if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
843                 uint16_t fd_nb;
844                 uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
845                                                 DPAA2_QDMA_MAX_SG_NB : nb_jobs;
846                 uint16_t job_idx = 0;
847                 uint16_t fd_sg_nb[8];
848                 uint16_t nb_jobs_ret = 0;
849
850                 if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
851                         fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
852                 else
853                         fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
854
855                 memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
856
857                 for (loop = 0; loop < fd_nb; loop++) {
858                         ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
859                                               sg_entry_nb);
860                         if (unlikely(ret < 0))
861                                 return 0;
862                         fd_sg_nb[loop] = sg_entry_nb;
863                         nb_jobs -= sg_entry_nb;
864                         job_idx += sg_entry_nb;
865                         sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
866                                                 DPAA2_QDMA_MAX_SG_NB : nb_jobs;
867                 }
868
869                 /* Enqueue the packet to the QBMAN */
870                 enqueue_loop = 0;
871
872                 while (enqueue_loop < fd_nb) {
873                         ret = qbman_swp_enqueue_multiple(swp,
874                                         &eqdesc, &fd[enqueue_loop],
875                                         NULL, fd_nb - enqueue_loop);
876                         if (likely(ret >= 0)) {
877                                 for (loop = 0; loop < (uint32_t)ret; loop++)
878                                         nb_jobs_ret +=
879                                                 fd_sg_nb[enqueue_loop + loop];
880                                 enqueue_loop += ret;
881                         }
882                 }
883
884                 return nb_jobs_ret;
885         }
886
887         memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
888
889         while (nb_jobs > 0) {
890                 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
891                         dpaa2_eqcr_size : nb_jobs;
892
893                 ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
894                                                 &job[num_tx], num_to_send);
895                 if (unlikely(ret < 0))
896                         break;
897
898                 /* Enqueue the packet to the QBMAN */
899                 enqueue_loop = 0;
900                 loop = num_to_send;
901
902                 while (enqueue_loop < loop) {
903                         ret = qbman_swp_enqueue_multiple(swp,
904                                                 &eqdesc,
905                                                 &fd[num_tx + enqueue_loop],
906                                                 NULL,
907                                                 loop - enqueue_loop);
908                         if (likely(ret >= 0))
909                                 enqueue_loop += ret;
910                 }
911                 num_tx += num_to_send;
912                 nb_jobs -= loop;
913         }
914
915         qdma_vq->num_enqueues += num_tx;
916
917         return num_tx;
918 }
919
920 static inline int
921 dpaa2_qdma_submit(void *dev_private, uint16_t vchan)
922 {
923         struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
924         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
925         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
926
927         dpdmai_dev_submit_multi(qdma_vq, qdma_vq->job_list,
928                                 qdma_vq->num_valid_jobs);
929
930         qdma_vq->num_valid_jobs = 0;
931
932         return 0;
933 }
934
935 static int
936 dpaa2_qdma_enqueue(void *dev_private, uint16_t vchan,
937                    rte_iova_t src, rte_iova_t dst,
938                    uint32_t length, uint64_t flags)
939 {
940         struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
941         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
942         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
943         struct rte_dpaa2_qdma_job *job;
944         int idx, ret;
945
946         idx = (uint16_t)(qdma_vq->num_enqueues + qdma_vq->num_valid_jobs);
947
948         ret = rte_mempool_get(qdma_vq->job_pool, (void **)&job);
949         if (ret) {
950                 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
951                 return -ENOSPC;
952         }
953
954         job->src = src;
955         job->dest = dst;
956         job->len = length;
957         job->flags = flags;
958         job->status = 0;
959         job->vq_id = vchan;
960
961         qdma_vq->job_list[qdma_vq->num_valid_jobs] = job;
962         qdma_vq->num_valid_jobs++;
963
964         if (flags & RTE_DMA_OP_FLAG_SUBMIT)
965                 dpaa2_qdma_submit(dev_private, vchan);
966
967         return idx;
968 }
969
970 int
971 rte_dpaa2_qdma_copy_multi(int16_t dev_id, uint16_t vchan,
972                           struct rte_dpaa2_qdma_job **jobs,
973                           uint16_t nb_cpls)
974 {
975         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
976         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
977         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
978         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
979
980         return dpdmai_dev_submit_multi(qdma_vq, jobs, nb_cpls);
981 }
982
983 static uint16_t
984 dpaa2_qdma_dequeue_multi(struct qdma_device *qdma_dev,
985                          struct qdma_virt_queue *qdma_vq,
986                          struct rte_dpaa2_qdma_job **jobs,
987                          uint16_t nb_jobs)
988 {
989         struct qdma_virt_queue *temp_qdma_vq;
990         int ring_count;
991         int ret = 0, i;
992
993         if (qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) {
994                 /** Make sure there are enough space to get jobs.*/
995                 if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
996                         return -EINVAL;
997         }
998
999         /* Only dequeue when there are pending jobs on VQ */
1000         if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1001                 return 0;
1002
1003         if (!(qdma_vq->flags & DPAA2_QDMA_VQ_FD_SG_FORMAT) &&
1004                 qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1005                 nb_jobs = RTE_MIN((qdma_vq->num_enqueues -
1006                                 qdma_vq->num_dequeues), nb_jobs);
1007
1008         if (qdma_vq->exclusive_hw_queue) {
1009                 /* In case of exclusive queue directly fetch from HW queue */
1010                 ret = qdma_vq->dequeue_job(qdma_vq, NULL, jobs, nb_jobs);
1011                 if (ret < 0) {
1012                         DPAA2_QDMA_ERR(
1013                                 "Dequeue from DPDMAI device failed: %d", ret);
1014                         return ret;
1015                 }
1016         } else {
1017                 uint16_t temp_vq_id[DPAA2_QDMA_MAX_DESC];
1018
1019                 /* Get the QDMA completed jobs from the software ring.
1020                  * In case they are not available on the ring poke the HW
1021                  * to fetch completed jobs from corresponding HW queues
1022                  */
1023                 ring_count = rte_ring_count(qdma_vq->status_ring);
1024                 if (ring_count < nb_jobs) {
1025                         ret = qdma_vq->dequeue_job(qdma_vq,
1026                                         temp_vq_id, jobs, nb_jobs);
1027                         for (i = 0; i < ret; i++) {
1028                                 temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
1029                                 rte_ring_enqueue(temp_qdma_vq->status_ring,
1030                                         (void *)(jobs[i]));
1031                         }
1032                         ring_count = rte_ring_count(
1033                                         qdma_vq->status_ring);
1034                 }
1035
1036                 if (ring_count) {
1037                         /* Dequeue job from the software ring
1038                          * to provide to the user
1039                          */
1040                         ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1041                                                     (void **)jobs,
1042                                                     ring_count, NULL);
1043                 }
1044         }
1045
1046         qdma_vq->num_dequeues += ret;
1047         return ret;
1048 }
1049
1050 static uint16_t
1051 dpaa2_qdma_dequeue_status(void *dev_private, uint16_t vchan,
1052                           const uint16_t nb_cpls,
1053                           uint16_t *last_idx,
1054                           enum rte_dma_status_code *st)
1055 {
1056         struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
1057         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1058         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
1059         struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
1060         int ret, i;
1061
1062         ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
1063
1064         for (i = 0; i < ret; i++)
1065                 st[i] = jobs[i]->status;
1066
1067         rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
1068
1069         if (last_idx != NULL)
1070                 *last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
1071
1072         return ret;
1073 }
1074
1075 static uint16_t
1076 dpaa2_qdma_dequeue(void *dev_private,
1077                    uint16_t vchan, const uint16_t nb_cpls,
1078                    uint16_t *last_idx, bool *has_error)
1079 {
1080         struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
1081         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1082         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
1083         struct rte_dpaa2_qdma_job *jobs[DPAA2_QDMA_MAX_DESC];
1084         int ret;
1085
1086         RTE_SET_USED(has_error);
1087
1088         ret = dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq,
1089                                 jobs, nb_cpls);
1090
1091         rte_mempool_put_bulk(qdma_vq->job_pool, (void **)jobs, ret);
1092
1093         if (last_idx != NULL)
1094                 *last_idx = (uint16_t)(qdma_vq->num_dequeues - 1);
1095
1096         return ret;
1097 }
1098
1099 uint16_t
1100 rte_dpaa2_qdma_completed_multi(int16_t dev_id, uint16_t vchan,
1101                                struct rte_dpaa2_qdma_job **jobs,
1102                                uint16_t nb_cpls)
1103 {
1104         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1105         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
1106         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1107         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
1108
1109         return dpaa2_qdma_dequeue_multi(qdma_dev, qdma_vq, jobs, nb_cpls);
1110 }
1111
1112 static int
1113 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
1114                     struct rte_dma_info *dev_info,
1115                     uint32_t info_sz)
1116 {
1117         RTE_SET_USED(dev);
1118         RTE_SET_USED(info_sz);
1119
1120         dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
1121                              RTE_DMA_CAPA_MEM_TO_DEV |
1122                              RTE_DMA_CAPA_DEV_TO_DEV |
1123                              RTE_DMA_CAPA_DEV_TO_MEM |
1124                              RTE_DMA_CAPA_SILENT |
1125                              RTE_DMA_CAPA_OPS_COPY;
1126         dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
1127         dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
1128         dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
1129
1130         return 0;
1131 }
1132
1133 static int
1134 dpaa2_qdma_configure(struct rte_dma_dev *dev,
1135                      const struct rte_dma_conf *dev_conf,
1136                      uint32_t conf_sz)
1137 {
1138         char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
1139         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1140         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1141
1142         DPAA2_QDMA_FUNC_TRACE();
1143
1144         RTE_SET_USED(conf_sz);
1145
1146         /* In case QDMA device is not in stopped state, return -EBUSY */
1147         if (qdma_dev->state == 1) {
1148                 DPAA2_QDMA_ERR(
1149                         "Device is in running state. Stop before config.");
1150                 return -1;
1151         }
1152
1153         /* Allocate Virtual Queues */
1154         sprintf(name, "qdma_%d_vq", dev->data->dev_id);
1155         qdma_dev->vqs = rte_malloc(name,
1156                         (sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
1157                         RTE_CACHE_LINE_SIZE);
1158         if (!qdma_dev->vqs) {
1159                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
1160                 return -ENOMEM;
1161         }
1162         qdma_dev->num_vqs = dev_conf->nb_vchans;
1163
1164         return 0;
1165 }
1166
1167 static int
1168 check_devargs_handler(__rte_unused const char *key,
1169                       const char *value,
1170                       __rte_unused void *opaque)
1171 {
1172         if (strcmp(value, "1"))
1173                 return -1;
1174
1175         return 0;
1176 }
1177
1178 static int
1179 dpaa2_qdma_get_devargs(struct rte_devargs *devargs, const char *key)
1180 {
1181         struct rte_kvargs *kvlist;
1182
1183         if (!devargs)
1184                 return 0;
1185
1186         kvlist = rte_kvargs_parse(devargs->args, NULL);
1187         if (!kvlist)
1188                 return 0;
1189
1190         if (!rte_kvargs_count(kvlist, key)) {
1191                 rte_kvargs_free(kvlist);
1192                 return 0;
1193         }
1194
1195         if (rte_kvargs_process(kvlist, key,
1196                                check_devargs_handler, NULL) < 0) {
1197                 rte_kvargs_free(kvlist);
1198                 return 0;
1199         }
1200         rte_kvargs_free(kvlist);
1201
1202         return 1;
1203 }
1204
1205 /* Enable FD in Ultra Short format */
1206 void
1207 rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
1208 {
1209         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1210         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
1211         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1212
1213         qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
1214 }
1215
1216 /* Enable internal SG processing */
1217 void
1218 rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
1219 {
1220         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1221         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
1222         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1223
1224         qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
1225 }
1226
1227 /* Enable RBP */
1228 void
1229 rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
1230                                 struct rte_dpaa2_qdma_rbp *rbp_config)
1231 {
1232         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1233         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
1234         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1235
1236         memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
1237                         sizeof(struct rte_dpaa2_qdma_rbp));
1238 }
1239
1240 static int
1241 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
1242                        const struct rte_dma_vchan_conf *conf,
1243                        uint32_t conf_sz)
1244 {
1245         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1246         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1247         uint32_t pool_size;
1248         char ring_name[32];
1249         char pool_name[64];
1250         int fd_long_format = 1;
1251         int sg_enable = 0;
1252
1253         DPAA2_QDMA_FUNC_TRACE();
1254
1255         RTE_SET_USED(conf_sz);
1256
1257         if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
1258                 sg_enable = 1;
1259
1260         if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
1261                 fd_long_format = 0;
1262
1263         if (dev->data->dev_conf.enable_silent)
1264                 qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
1265
1266         if (sg_enable) {
1267                 if (qdma_dev->num_vqs != 1) {
1268                         DPAA2_QDMA_ERR(
1269                                 "qDMA SG format only supports physical queue!");
1270                         return -ENODEV;
1271                 }
1272                 if (!fd_long_format) {
1273                         DPAA2_QDMA_ERR(
1274                                 "qDMA SG format only supports long FD format!");
1275                         return -ENODEV;
1276                 }
1277                 pool_size = QDMA_FLE_SG_POOL_SIZE;
1278         } else {
1279                 pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
1280         }
1281
1282         if (qdma_dev->num_vqs == 1)
1283                 qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
1284         else {
1285                 /* Allocate a Ring for Virtual Queue in VQ mode */
1286                 snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
1287                          dev->data->dev_id, vchan);
1288                 qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
1289                         conf->nb_desc, rte_socket_id(), 0);
1290                 if (!qdma_dev->vqs[vchan].status_ring) {
1291                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
1292                         return rte_errno;
1293                 }
1294         }
1295
1296         snprintf(pool_name, sizeof(pool_name),
1297                 "qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
1298         qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
1299                         conf->nb_desc, pool_size,
1300                         QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
1301                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1302         if (!qdma_dev->vqs[vchan].fle_pool) {
1303                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
1304                 return -ENOMEM;
1305         }
1306
1307         snprintf(pool_name, sizeof(pool_name),
1308                 "qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
1309         qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
1310                         conf->nb_desc, pool_size,
1311                         QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
1312                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1313         if (!qdma_dev->vqs[vchan].job_pool) {
1314                 DPAA2_QDMA_ERR("qdma_job_pool create failed");
1315                 return -ENOMEM;
1316         }
1317
1318         if (fd_long_format) {
1319                 if (sg_enable) {
1320                         qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_sg_fd_lf;
1321                         qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_sg_job_lf;
1322                 } else {
1323                         if (dev->data->dev_conf.enable_silent)
1324                                 qdma_dev->vqs[vchan].set_fd =
1325                                         dpdmai_dev_set_multi_fd_lf_no_rsp;
1326                         else
1327                                 qdma_dev->vqs[vchan].set_fd =
1328                                         dpdmai_dev_set_multi_fd_lf;
1329                         qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_single_job_lf;
1330                 }
1331         } else {
1332                 qdma_dev->vqs[vchan].set_fd = dpdmai_dev_set_fd_us;
1333                 qdma_dev->vqs[vchan].get_job = dpdmai_dev_get_job_us;
1334         }
1335
1336         if (dpaa2_qdma_get_devargs(dev->device->devargs,
1337                         DPAA2_QDMA_PREFETCH)) {
1338                 /* If no prefetch is configured. */
1339                 qdma_dev->vqs[vchan].dequeue_job =
1340                                 dpdmai_dev_dequeue_multijob_prefetch;
1341                 DPAA2_QDMA_INFO("Prefetch RX Mode enabled");
1342         } else {
1343                 qdma_dev->vqs[vchan].dequeue_job =
1344                         dpdmai_dev_dequeue_multijob_no_prefetch;
1345         }
1346
1347         qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
1348         qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
1349         qdma_dev->vqs[vchan].enqueue_job = dpdmai_dev_submit_multi;
1350
1351         return 0;
1352 }
1353
1354 static int
1355 dpaa2_qdma_start(struct rte_dma_dev *dev)
1356 {
1357         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1358         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1359
1360         DPAA2_QDMA_FUNC_TRACE();
1361
1362         qdma_dev->state = 1;
1363
1364         return 0;
1365 }
1366
1367 static int
1368 dpaa2_qdma_stop(struct rte_dma_dev *dev)
1369 {
1370         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1371         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1372
1373         DPAA2_QDMA_FUNC_TRACE();
1374
1375         qdma_dev->state = 0;
1376
1377         return 0;
1378 }
1379
1380 static int
1381 dpaa2_qdma_reset(struct rte_dma_dev *dev)
1382 {
1383         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1384         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1385         int i;
1386
1387         DPAA2_QDMA_FUNC_TRACE();
1388
1389         /* In case QDMA device is not in stopped state, return -EBUSY */
1390         if (qdma_dev->state == 1) {
1391                 DPAA2_QDMA_ERR(
1392                         "Device is in running state. Stop before reset.");
1393                 return -EBUSY;
1394         }
1395
1396         /* In case there are pending jobs on any VQ, return -EBUSY */
1397         for (i = 0; i < qdma_dev->num_vqs; i++) {
1398                 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
1399                     qdma_dev->vqs[i].num_dequeues)) {
1400                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
1401                         return -EBUSY;
1402                 }
1403         }
1404
1405         /* Reset and free virtual queues */
1406         for (i = 0; i < qdma_dev->num_vqs; i++) {
1407                 if (qdma_dev->vqs[i].status_ring)
1408                         rte_ring_free(qdma_dev->vqs[i].status_ring);
1409         }
1410         if (qdma_dev->vqs)
1411                 rte_free(qdma_dev->vqs);
1412         qdma_dev->vqs = NULL;
1413
1414         /* Reset QDMA device structure */
1415         qdma_dev->num_vqs = 0;
1416
1417         return 0;
1418 }
1419
1420 static int
1421 dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
1422 {
1423         DPAA2_QDMA_FUNC_TRACE();
1424
1425         dpaa2_qdma_reset(dev);
1426
1427         return 0;
1428 }
1429
1430 static uint16_t
1431 dpaa2_qdma_burst_capacity(const void *dev_private, uint16_t vchan)
1432 {
1433         const struct dpaa2_dpdmai_dev *dpdmai_dev = dev_private;
1434         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1435         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vchan];
1436
1437         return qdma_vq->nb_desc - qdma_vq->num_valid_jobs;
1438 }
1439
1440 static struct rte_dma_dev_ops dpaa2_qdma_ops = {
1441         .dev_info_get     = dpaa2_qdma_info_get,
1442         .dev_configure    = dpaa2_qdma_configure,
1443         .dev_start        = dpaa2_qdma_start,
1444         .dev_stop         = dpaa2_qdma_stop,
1445         .dev_close        = dpaa2_qdma_close,
1446         .vchan_setup      = dpaa2_qdma_vchan_setup,
1447 };
1448
1449 static int
1450 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
1451 {
1452         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1453         int ret;
1454
1455         DPAA2_QDMA_FUNC_TRACE();
1456
1457         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1458                              dpdmai_dev->token);
1459         if (ret)
1460                 DPAA2_QDMA_ERR("dmdmai disable failed");
1461
1462         /* Set up the DQRR storage for Rx */
1463         struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
1464
1465         if (rxq->q_storage) {
1466                 dpaa2_free_dq_storage(rxq->q_storage);
1467                 rte_free(rxq->q_storage);
1468         }
1469
1470         /* Close the device at underlying layer*/
1471         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1472         if (ret)
1473                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1474
1475         return 0;
1476 }
1477
1478 static int
1479 dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
1480 {
1481         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
1482         struct dpdmai_rx_queue_cfg rx_queue_cfg;
1483         struct dpdmai_attr attr;
1484         struct dpdmai_rx_queue_attr rx_attr;
1485         struct dpdmai_tx_queue_attr tx_attr;
1486         struct dpaa2_queue *rxq;
1487         int ret;
1488
1489         DPAA2_QDMA_FUNC_TRACE();
1490
1491         /* Open DPDMAI device */
1492         dpdmai_dev->dpdmai_id = dpdmai_id;
1493         dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
1494         dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
1495                                           RTE_CACHE_LINE_SIZE);
1496         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1497                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1498         if (ret) {
1499                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1500                 return ret;
1501         }
1502
1503         /* Get DPDMAI attributes */
1504         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1505                                     dpdmai_dev->token, &attr);
1506         if (ret) {
1507                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1508                                ret);
1509                 goto init_err;
1510         }
1511         dpdmai_dev->num_queues = attr.num_of_queues;
1512
1513         /* Set up Rx Queue */
1514         memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1515         ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1516                                   CMD_PRI_LOW,
1517                                   dpdmai_dev->token,
1518                                   0, 0, &rx_queue_cfg);
1519         if (ret) {
1520                 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1521                                ret);
1522                 goto init_err;
1523         }
1524
1525         /* Allocate DQ storage for the DPDMAI Rx queues */
1526         rxq = &(dpdmai_dev->rx_queue[0]);
1527         rxq->q_storage = rte_malloc("dq_storage",
1528                                     sizeof(struct queue_storage_info_t),
1529                                     RTE_CACHE_LINE_SIZE);
1530         if (!rxq->q_storage) {
1531                 DPAA2_QDMA_ERR("q_storage allocation failed");
1532                 ret = -ENOMEM;
1533                 goto init_err;
1534         }
1535
1536         memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1537         ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1538         if (ret) {
1539                 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1540                 goto init_err;
1541         }
1542
1543         /* Get Rx and Tx queues FQID */
1544         ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1545                                   dpdmai_dev->token, 0, 0, &rx_attr);
1546         if (ret) {
1547                 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1548                                ret);
1549                 goto init_err;
1550         }
1551         dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
1552
1553         ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1554                                   dpdmai_dev->token, 0, 0, &tx_attr);
1555         if (ret) {
1556                 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1557                                ret);
1558                 goto init_err;
1559         }
1560         dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
1561
1562         /* Enable the device */
1563         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1564                             dpdmai_dev->token);
1565         if (ret) {
1566                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1567                 goto init_err;
1568         }
1569
1570         if (!dpaa2_coherent_no_alloc_cache) {
1571                 if (dpaa2_svr_family == SVR_LX2160A) {
1572                         dpaa2_coherent_no_alloc_cache =
1573                                 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1574                         dpaa2_coherent_alloc_cache =
1575                                 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1576                 } else {
1577                         dpaa2_coherent_no_alloc_cache =
1578                                 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1579                         dpaa2_coherent_alloc_cache =
1580                                 DPAA2_COHERENT_ALLOCATE_CACHE;
1581                 }
1582         }
1583
1584         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1585
1586         /* Reset the QDMA device */
1587         ret = dpaa2_qdma_reset(dev);
1588         if (ret) {
1589                 DPAA2_QDMA_ERR("Resetting QDMA failed");
1590                 goto init_err;
1591         }
1592
1593         return 0;
1594 init_err:
1595         dpaa2_dpdmai_dev_uninit(dev);
1596         return ret;
1597 }
1598
1599 static int
1600 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1601                  struct rte_dpaa2_device *dpaa2_dev)
1602 {
1603         struct rte_dma_dev *dmadev;
1604         int ret;
1605
1606         DPAA2_QDMA_FUNC_TRACE();
1607
1608         RTE_SET_USED(dpaa2_drv);
1609
1610         dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
1611                                       rte_socket_id(),
1612                                       sizeof(struct dpaa2_dpdmai_dev));
1613         if (!dmadev) {
1614                 DPAA2_QDMA_ERR("Unable to allocate dmadevice");
1615                 return -EINVAL;
1616         }
1617
1618         dpaa2_dev->dmadev = dmadev;
1619         dmadev->dev_ops = &dpaa2_qdma_ops;
1620         dmadev->device = &dpaa2_dev->device;
1621         dmadev->fp_obj->dev_private = dmadev->data->dev_private;
1622         dmadev->fp_obj->copy = dpaa2_qdma_enqueue;
1623         dmadev->fp_obj->submit = dpaa2_qdma_submit;
1624         dmadev->fp_obj->completed = dpaa2_qdma_dequeue;
1625         dmadev->fp_obj->completed_status = dpaa2_qdma_dequeue_status;
1626         dmadev->fp_obj->burst_capacity = dpaa2_qdma_burst_capacity;
1627
1628         /* Invoke PMD device initialization function */
1629         ret = dpaa2_dpdmai_dev_init(dmadev, dpaa2_dev->object_id);
1630         if (ret) {
1631                 rte_dma_pmd_release(dpaa2_dev->device.name);
1632                 return ret;
1633         }
1634
1635         dmadev->state = RTE_DMA_DEV_READY;
1636         return 0;
1637 }
1638
1639 static int
1640 dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1641 {
1642         struct rte_dma_dev *dmadev = dpaa2_dev->dmadev;
1643         int ret;
1644
1645         DPAA2_QDMA_FUNC_TRACE();
1646
1647         dpaa2_dpdmai_dev_uninit(dmadev);
1648
1649         ret = rte_dma_pmd_release(dpaa2_dev->device.name);
1650         if (ret)
1651                 DPAA2_QDMA_ERR("Device cleanup failed");
1652
1653         return 0;
1654 }
1655
1656 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd;
1657
1658 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1659         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1660         .drv_type = DPAA2_QDMA,
1661         .probe = dpaa2_qdma_probe,
1662         .remove = dpaa2_qdma_remove,
1663 };
1664
1665 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1666 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1667         "no_prefetch=<int> ");
1668 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO);