mempool: introduce helpers for populate and required size
[dpdk.git] / drivers / raw / dpaa2_qdma / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <string.h>
6
7 #include <rte_eal.h>
8 #include <rte_fslmc.h>
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
14 #include <rte_ring.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
18
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
22
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
26
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
28
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype;
31
32 uint32_t dpaa2_coherent_no_alloc_cache;
33 uint32_t dpaa2_coherent_alloc_cache;
34
35 /* QDMA device */
36 static struct qdma_device qdma_dev;
37
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
40 static struct qdma_hw_queue_list qdma_queue_list
41         = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
42
43 /* QDMA Virtual Queues */
44 static struct qdma_virt_queue *qdma_vqs;
45
46 /* QDMA per core data */
47 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
48
49 typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
50                                             uint16_t rxq_id,
51                                             uint16_t *vq_id,
52                                             struct rte_qdma_job **job,
53                                             uint16_t nb_jobs);
54
55 dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
56
57 static struct qdma_hw_queue *
58 alloc_hw_queue(uint32_t lcore_id)
59 {
60         struct qdma_hw_queue *queue = NULL;
61
62         DPAA2_QDMA_FUNC_TRACE();
63
64         /* Get a free queue from the list */
65         TAILQ_FOREACH(queue, &qdma_queue_list, next) {
66                 if (queue->num_users == 0) {
67                         queue->lcore_id = lcore_id;
68                         queue->num_users++;
69                         break;
70                 }
71         }
72
73         return queue;
74 }
75
76 static void
77 free_hw_queue(struct qdma_hw_queue *queue)
78 {
79         DPAA2_QDMA_FUNC_TRACE();
80
81         queue->num_users--;
82 }
83
84
85 static struct qdma_hw_queue *
86 get_hw_queue(uint32_t lcore_id)
87 {
88         struct qdma_per_core_info *core_info;
89         struct qdma_hw_queue *queue, *temp;
90         uint32_t least_num_users;
91         int num_hw_queues, i;
92
93         DPAA2_QDMA_FUNC_TRACE();
94
95         core_info = &qdma_core_info[lcore_id];
96         num_hw_queues = core_info->num_hw_queues;
97
98         /*
99          * Allocate a HW queue if there are less queues
100          * than maximum per core queues configured
101          */
102         if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
103                 queue = alloc_hw_queue(lcore_id);
104                 if (queue) {
105                         core_info->hw_queues[num_hw_queues] = queue;
106                         core_info->num_hw_queues++;
107                         return queue;
108                 }
109         }
110
111         queue = core_info->hw_queues[0];
112         /* In case there is no queue associated with the core return NULL */
113         if (!queue)
114                 return NULL;
115
116         /* Fetch the least loaded H/W queue */
117         least_num_users = core_info->hw_queues[0]->num_users;
118         for (i = 0; i < num_hw_queues; i++) {
119                 temp = core_info->hw_queues[i];
120                 if (temp->num_users < least_num_users)
121                         queue = temp;
122         }
123
124         if (queue)
125                 queue->num_users++;
126
127         return queue;
128 }
129
130 static void
131 put_hw_queue(struct qdma_hw_queue *queue)
132 {
133         struct qdma_per_core_info *core_info;
134         int lcore_id, num_hw_queues, i;
135
136         DPAA2_QDMA_FUNC_TRACE();
137
138         /*
139          * If this is the last user of the queue free it.
140          * Also remove it from QDMA core info.
141          */
142         if (queue->num_users == 1) {
143                 free_hw_queue(queue);
144
145                 /* Remove the physical queue from core info */
146                 lcore_id = queue->lcore_id;
147                 core_info = &qdma_core_info[lcore_id];
148                 num_hw_queues = core_info->num_hw_queues;
149                 for (i = 0; i < num_hw_queues; i++) {
150                         if (queue == core_info->hw_queues[i])
151                                 break;
152                 }
153                 for (; i < num_hw_queues - 1; i++)
154                         core_info->hw_queues[i] = core_info->hw_queues[i + 1];
155                 core_info->hw_queues[i] = NULL;
156         } else {
157                 queue->num_users--;
158         }
159 }
160
161 int
162 rte_qdma_init(void)
163 {
164         DPAA2_QDMA_FUNC_TRACE();
165
166         rte_spinlock_init(&qdma_dev.lock);
167
168         return 0;
169 }
170
171 void
172 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
173 {
174         DPAA2_QDMA_FUNC_TRACE();
175
176         qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
177 }
178
179 int
180 rte_qdma_reset(void)
181 {
182         struct qdma_hw_queue *queue;
183         int i;
184
185         DPAA2_QDMA_FUNC_TRACE();
186
187         /* In case QDMA device is not in stopped state, return -EBUSY */
188         if (qdma_dev.state == 1) {
189                 DPAA2_QDMA_ERR(
190                         "Device is in running state. Stop before reset.");
191                 return -EBUSY;
192         }
193
194         /* In case there are pending jobs on any VQ, return -EBUSY */
195         for (i = 0; i < qdma_dev.max_vqs; i++) {
196                 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
197                     qdma_vqs[i].num_dequeues))
198                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
199                         return -EBUSY;
200         }
201
202         /* Reset HW queues */
203         TAILQ_FOREACH(queue, &qdma_queue_list, next)
204                 queue->num_users = 0;
205
206         /* Reset and free virtual queues */
207         for (i = 0; i < qdma_dev.max_vqs; i++) {
208                 if (qdma_vqs[i].status_ring)
209                         rte_ring_free(qdma_vqs[i].status_ring);
210         }
211         if (qdma_vqs)
212                 rte_free(qdma_vqs);
213         qdma_vqs = NULL;
214
215         /* Reset per core info */
216         memset(&qdma_core_info, 0,
217                 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
218
219         /* Free the FLE pool */
220         if (qdma_dev.fle_pool)
221                 rte_mempool_free(qdma_dev.fle_pool);
222
223         /* Reset QDMA device structure */
224         qdma_dev.mode = RTE_QDMA_MODE_HW;
225         qdma_dev.max_hw_queues_per_core = 0;
226         qdma_dev.fle_pool = NULL;
227         qdma_dev.fle_pool_count = 0;
228         qdma_dev.max_vqs = 0;
229
230         return 0;
231 }
232
233 int
234 rte_qdma_configure(struct rte_qdma_config *qdma_config)
235 {
236         int ret;
237         char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
238
239         DPAA2_QDMA_FUNC_TRACE();
240
241         /* In case QDMA device is not in stopped state, return -EBUSY */
242         if (qdma_dev.state == 1) {
243                 DPAA2_QDMA_ERR(
244                         "Device is in running state. Stop before config.");
245                 return -1;
246         }
247
248         /* Reset the QDMA device */
249         ret = rte_qdma_reset();
250         if (ret) {
251                 DPAA2_QDMA_ERR("Resetting QDMA failed");
252                 return ret;
253         }
254
255         /* Set mode */
256         qdma_dev.mode = qdma_config->mode;
257
258         /* Set max HW queue per core */
259         if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
260                 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
261                                MAX_HW_QUEUE_PER_CORE);
262                 return -EINVAL;
263         }
264         qdma_dev.max_hw_queues_per_core =
265                 qdma_config->max_hw_queues_per_core;
266
267         /* Allocate Virtual Queues */
268         qdma_vqs = rte_malloc("qdma_virtual_queues",
269                         (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
270                         RTE_CACHE_LINE_SIZE);
271         if (!qdma_vqs) {
272                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
273                 return -ENOMEM;
274         }
275         qdma_dev.max_vqs = qdma_config->max_vqs;
276
277         /* Allocate FLE pool; just append PID so that in case of
278          * multiprocess, the pool's don't collide.
279          */
280         snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
281                  getpid());
282         qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
283                         qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
284                         QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
285                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
286         if (!qdma_dev.fle_pool) {
287                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
288                 rte_free(qdma_vqs);
289                 qdma_vqs = NULL;
290                 return -ENOMEM;
291         }
292         qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
293
294         return 0;
295 }
296
297 int
298 rte_qdma_start(void)
299 {
300         DPAA2_QDMA_FUNC_TRACE();
301
302         qdma_dev.state = 1;
303
304         return 0;
305 }
306
307 int
308 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
309 {
310         char ring_name[32];
311         int i;
312
313         DPAA2_QDMA_FUNC_TRACE();
314
315         rte_spinlock_lock(&qdma_dev.lock);
316
317         /* Get a free Virtual Queue */
318         for (i = 0; i < qdma_dev.max_vqs; i++) {
319                 if (qdma_vqs[i].in_use == 0)
320                         break;
321         }
322
323         /* Return in case no VQ is free */
324         if (i == qdma_dev.max_vqs) {
325                 rte_spinlock_unlock(&qdma_dev.lock);
326                 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
327                 return -ENODEV;
328         }
329
330         if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
331                         (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
332                 /* Allocate HW queue for a VQ */
333                 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
334                 qdma_vqs[i].exclusive_hw_queue = 1;
335         } else {
336                 /* Allocate a Ring for Virutal Queue in VQ mode */
337                 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
338                 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
339                         qdma_dev.fle_pool_count, rte_socket_id(), 0);
340                 if (!qdma_vqs[i].status_ring) {
341                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
342                         rte_spinlock_unlock(&qdma_dev.lock);
343                         return rte_errno;
344                 }
345
346                 /* Get a HW queue (shared) for a VQ */
347                 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
348                 qdma_vqs[i].exclusive_hw_queue = 0;
349         }
350
351         if (qdma_vqs[i].hw_queue == NULL) {
352                 DPAA2_QDMA_ERR("No H/W queue available for VQ");
353                 if (qdma_vqs[i].status_ring)
354                         rte_ring_free(qdma_vqs[i].status_ring);
355                 qdma_vqs[i].status_ring = NULL;
356                 rte_spinlock_unlock(&qdma_dev.lock);
357                 return -ENODEV;
358         }
359
360         qdma_vqs[i].in_use = 1;
361         qdma_vqs[i].lcore_id = lcore_id;
362         memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
363         rte_spinlock_unlock(&qdma_dev.lock);
364
365         return i;
366 }
367
368 /*create vq for route-by-port*/
369 int
370 rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
371                         struct rte_qdma_rbp *rbp)
372 {
373         int i;
374
375         i = rte_qdma_vq_create(lcore_id, flags);
376
377         memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
378
379         return i;
380 }
381
382 static void
383 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
384                         struct rte_qdma_rbp *rbp,
385                         uint64_t src, uint64_t dest,
386                         size_t len, uint32_t flags)
387 {
388         struct qdma_sdd *sdd;
389
390         sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
391                 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
392
393         /* first frame list to source descriptor */
394         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
395         DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
396
397         /* source and destination descriptor */
398         if (rbp && rbp->enable) {
399                 /* source */
400                 sdd->read_cmd.portid = rbp->sportid;
401                 sdd->rbpcmd_simple.pfid = rbp->spfid;
402                 sdd->rbpcmd_simple.vfid = rbp->svfid;
403
404                 if (rbp->srbp) {
405                         sdd->read_cmd.rbp = rbp->srbp;
406                         sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
407                 } else {
408                         sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
409                 }
410                 sdd++;
411                 /* destination */
412                 sdd->write_cmd.portid = rbp->dportid;
413                 sdd->rbpcmd_simple.pfid = rbp->dpfid;
414                 sdd->rbpcmd_simple.vfid = rbp->dvfid;
415
416                 if (rbp->drbp) {
417                         sdd->write_cmd.rbp = rbp->drbp;
418                         sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
419                 } else {
420                         sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
421                 }
422
423         } else {
424                 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
425                 sdd++;
426                 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
427         }
428         fle++;
429         /* source frame list to source buffer */
430         if (flags & RTE_QDMA_JOB_SRC_PHY) {
431                 DPAA2_SET_FLE_ADDR(fle, src);
432                 DPAA2_SET_FLE_BMT(fle);
433         } else {
434                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
435         }
436         DPAA2_SET_FLE_LEN(fle, len);
437
438         fle++;
439         /* destination frame list to destination buffer */
440         if (flags & RTE_QDMA_JOB_DEST_PHY) {
441                 DPAA2_SET_FLE_BMT(fle);
442                 DPAA2_SET_FLE_ADDR(fle, dest);
443         } else {
444                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
445         }
446         DPAA2_SET_FLE_LEN(fle, len);
447
448         /* Final bit: 1, for last frame list */
449         DPAA2_SET_FLE_FIN(fle);
450 }
451
452 static inline uint16_t dpdmai_dev_set_fd(struct qbman_fd *fd,
453                                         struct rte_qdma_job *job,
454                                         struct rte_qdma_rbp *rbp,
455                                         uint16_t vq_id)
456 {
457         struct qdma_io_meta *io_meta;
458         struct qbman_fle *fle;
459         int ret = 0;
460         /*
461          * Get an FLE/SDD from FLE pool.
462          * Note: IO metadata is before the FLE and SDD memory.
463          */
464         ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
465         if (ret) {
466                 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
467                 return ret;
468         }
469
470         /* Set the metadata */
471         io_meta->cnxt = (size_t)job;
472         io_meta->id = vq_id;
473
474         fle = (struct qbman_fle *)(io_meta + 1);
475
476         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
477         DPAA2_SET_FD_COMPOUND_FMT(fd);
478         DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
479
480         /* Populate FLE */
481         memset(fle, 0, QDMA_FLE_POOL_SIZE);
482         dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
483                                 job->len, job->flags);
484
485         return 0;
486 }
487
488 static int
489 dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
490                         uint16_t txq_id,
491                         uint16_t vq_id,
492                         struct rte_qdma_rbp *rbp,
493                         struct rte_qdma_job **job,
494                         uint16_t nb_jobs)
495 {
496         struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
497         struct dpaa2_queue *txq;
498         struct qbman_eq_desc eqdesc;
499         struct qbman_swp *swp;
500         int ret;
501         uint32_t num_to_send = 0;
502         uint16_t num_tx = 0;
503
504         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
505                 ret = dpaa2_affine_qbman_swp();
506                 if (ret) {
507                         DPAA2_QDMA_ERR("Failure in affining portal");
508                         return 0;
509                 }
510         }
511         swp = DPAA2_PER_LCORE_PORTAL;
512
513         txq = &(dpdmai_dev->tx_queue[txq_id]);
514
515         /* Prepare enqueue descriptor */
516         qbman_eq_desc_clear(&eqdesc);
517         qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
518         qbman_eq_desc_set_no_orp(&eqdesc, 0);
519         qbman_eq_desc_set_response(&eqdesc, 0, 0);
520
521         memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));
522
523         while (nb_jobs > 0) {
524                 uint32_t loop;
525
526                 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
527                         dpaa2_eqcr_size : nb_jobs;
528
529                 for (loop = 0; loop < num_to_send; loop++) {
530                         ret = dpdmai_dev_set_fd(&fd[loop],
531                                                 job[num_tx], rbp, vq_id);
532                         if (ret < 0) {
533                                 /* Set nb_jobs to loop, so outer while loop
534                                  * breaks out.
535                                  */
536                                 nb_jobs = loop;
537                                 break;
538                         }
539
540                         num_tx++;
541                 }
542
543                 /* Enqueue the packet to the QBMAN */
544                 uint32_t enqueue_loop = 0, retry_count = 0;
545                 while (enqueue_loop < loop) {
546                         ret = qbman_swp_enqueue_multiple(swp,
547                                                 &eqdesc,
548                                                 &fd[enqueue_loop],
549                                                 NULL,
550                                                 loop - enqueue_loop);
551                         if (unlikely(ret < 0)) {
552                                 retry_count++;
553                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
554                                         return num_tx - (loop - enqueue_loop);
555                         } else {
556                                 enqueue_loop += ret;
557                                 retry_count = 0;
558                         }
559                 }
560                 nb_jobs -= loop;
561         }
562         return num_tx;
563 }
564
565 int
566 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
567                           struct rte_qdma_job **job,
568                           uint16_t nb_jobs)
569 {
570         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
571         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
572         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
573         int ret;
574
575         /* Return error in case of wrong lcore_id */
576         if (rte_lcore_id() != qdma_vq->lcore_id) {
577                 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
578                                 vq_id);
579                 return -EINVAL;
580         }
581
582         ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
583                                  qdma_pq->queue_id,
584                                  vq_id,
585                                  &qdma_vq->rbp,
586                                  job,
587                                  nb_jobs);
588         if (ret < 0) {
589                 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
590                 return ret;
591         }
592
593         qdma_vq->num_enqueues += ret;
594
595         return ret;
596 }
597
598 int
599 rte_qdma_vq_enqueue(uint16_t vq_id,
600                     struct rte_qdma_job *job)
601 {
602         return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
603 }
604
605 static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd,
606                                         struct rte_qdma_job **job)
607 {
608         struct qbman_fle *fle;
609         struct qdma_io_meta *io_meta;
610         uint16_t vqid;
611         /*
612          * Fetch metadata from FLE. job and vq_id were set
613          * in metadata in the enqueue operation.
614          */
615         fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
616         io_meta = (struct qdma_io_meta *)(fle) - 1;
617
618         *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
619         (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
620                          (DPAA2_GET_FD_FRC(fd) & 0xFF);
621
622         vqid = io_meta->id;
623
624         /* Free FLE to the pool */
625         rte_mempool_put(qdma_dev.fle_pool, io_meta);
626
627         return vqid;
628 }
629
630 /* Function to receive a QDMA job for a given device and queue*/
631 static int
632 dpdmai_dev_dequeue_multijob_prefetch(
633                         struct dpaa2_dpdmai_dev *dpdmai_dev,
634                         uint16_t rxq_id,
635                         uint16_t *vq_id,
636                         struct rte_qdma_job **job,
637                         uint16_t nb_jobs)
638 {
639         struct dpaa2_queue *rxq;
640         struct qbman_result *dq_storage, *dq_storage1 = NULL;
641         struct qbman_pull_desc pulldesc;
642         struct qbman_swp *swp;
643         struct queue_storage_info_t *q_storage;
644         uint32_t fqid;
645         uint8_t status, pending;
646         uint8_t num_rx = 0;
647         const struct qbman_fd *fd;
648         uint16_t vqid;
649         int ret, pull_size;
650
651         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
652                 ret = dpaa2_affine_qbman_swp();
653                 if (ret) {
654                         DPAA2_QDMA_ERR("Failure in affining portal");
655                         return 0;
656                 }
657         }
658         swp = DPAA2_PER_LCORE_PORTAL;
659
660         pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
661         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
662         fqid = rxq->fqid;
663         q_storage = rxq->q_storage;
664
665         if (unlikely(!q_storage->active_dqs)) {
666                 q_storage->toggle = 0;
667                 dq_storage = q_storage->dq_storage[q_storage->toggle];
668                 q_storage->last_num_pkts = pull_size;
669                 qbman_pull_desc_clear(&pulldesc);
670                 qbman_pull_desc_set_numframes(&pulldesc,
671                                               q_storage->last_num_pkts);
672                 qbman_pull_desc_set_fq(&pulldesc, fqid);
673                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
674                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
675                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
676                         while (!qbman_check_command_complete(
677                                get_swp_active_dqs(
678                                DPAA2_PER_LCORE_DPIO->index)))
679                                 ;
680                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
681                 }
682                 while (1) {
683                         if (qbman_swp_pull(swp, &pulldesc)) {
684                                 DPAA2_QDMA_DP_WARN(
685                                         "VDQ command not issued.QBMAN busy\n");
686                                         /* Portal was busy, try again */
687                                 continue;
688                         }
689                         break;
690                 }
691                 q_storage->active_dqs = dq_storage;
692                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
693                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
694                                    dq_storage);
695         }
696
697         dq_storage = q_storage->active_dqs;
698         rte_prefetch0((void *)(size_t)(dq_storage));
699         rte_prefetch0((void *)(size_t)(dq_storage + 1));
700
701         /* Prepare next pull descriptor. This will give space for the
702          * prefething done on DQRR entries
703          */
704         q_storage->toggle ^= 1;
705         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
706         qbman_pull_desc_clear(&pulldesc);
707         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
708         qbman_pull_desc_set_fq(&pulldesc, fqid);
709         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
710                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
711
712         /* Check if the previous issued command is completed.
713          * Also seems like the SWP is shared between the Ethernet Driver
714          * and the SEC driver.
715          */
716         while (!qbman_check_command_complete(dq_storage))
717                 ;
718         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
719                 clear_swp_active_dqs(q_storage->active_dpio_id);
720
721         pending = 1;
722
723         do {
724                 /* Loop until the dq_storage is updated with
725                  * new token by QBMAN
726                  */
727                 while (!qbman_check_new_result(dq_storage))
728                         ;
729                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
730                 /* Check whether Last Pull command is Expired and
731                  * setting Condition for Loop termination
732                  */
733                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
734                         pending = 0;
735                         /* Check for valid frame. */
736                         status = qbman_result_DQ_flags(dq_storage);
737                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
738                                 continue;
739                 }
740                 fd = qbman_result_DQ_fd(dq_storage);
741
742                 vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
743                 if (vq_id)
744                         vq_id[num_rx] = vqid;
745
746                 dq_storage++;
747                 num_rx++;
748         } while (pending);
749
750         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
751                 while (!qbman_check_command_complete(
752                        get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
753                         ;
754                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
755         }
756         /* issue a volatile dequeue command for next pull */
757         while (1) {
758                 if (qbman_swp_pull(swp, &pulldesc)) {
759                         DPAA2_QDMA_DP_WARN("VDQ command is not issued."
760                                           "QBMAN is busy (2)\n");
761                         continue;
762                 }
763                 break;
764         }
765
766         q_storage->active_dqs = dq_storage1;
767         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
768         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
769
770         return num_rx;
771 }
772
773 static int
774 dpdmai_dev_dequeue_multijob_no_prefetch(
775                 struct dpaa2_dpdmai_dev *dpdmai_dev,
776                 uint16_t rxq_id,
777                 uint16_t *vq_id,
778                 struct rte_qdma_job **job,
779                 uint16_t nb_jobs)
780 {
781         struct dpaa2_queue *rxq;
782         struct qbman_result *dq_storage;
783         struct qbman_pull_desc pulldesc;
784         struct qbman_swp *swp;
785         uint32_t fqid;
786         uint8_t status, pending;
787         uint8_t num_rx = 0;
788         const struct qbman_fd *fd;
789         uint16_t vqid;
790         int ret, next_pull = nb_jobs, num_pulled = 0;
791
792         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
793                 ret = dpaa2_affine_qbman_swp();
794                 if (ret) {
795                         DPAA2_QDMA_ERR("Failure in affining portal");
796                         return 0;
797                 }
798         }
799         swp = DPAA2_PER_LCORE_PORTAL;
800
801         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
802         fqid = rxq->fqid;
803
804         do {
805                 dq_storage = rxq->q_storage->dq_storage[0];
806                 /* Prepare dequeue descriptor */
807                 qbman_pull_desc_clear(&pulldesc);
808                 qbman_pull_desc_set_fq(&pulldesc, fqid);
809                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
810                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
811
812                 if (next_pull > dpaa2_dqrr_size) {
813                         qbman_pull_desc_set_numframes(&pulldesc,
814                                         dpaa2_dqrr_size);
815                         next_pull -= dpaa2_dqrr_size;
816                 } else {
817                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
818                         next_pull = 0;
819                 }
820
821                 while (1) {
822                         if (qbman_swp_pull(swp, &pulldesc)) {
823                                 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
824                                 /* Portal was busy, try again */
825                                 continue;
826                         }
827                         break;
828                 }
829
830                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
831                 /* Check if the previous issued command is completed. */
832                 while (!qbman_check_command_complete(dq_storage))
833                         ;
834
835                 num_pulled = 0;
836                 pending = 1;
837
838                 do {
839                         /* Loop until dq_storage is updated
840                          * with new token by QBMAN
841                          */
842                         while (!qbman_check_new_result(dq_storage))
843                                 ;
844                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
845
846                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
847                                 pending = 0;
848                                 /* Check for valid frame. */
849                                 status = qbman_result_DQ_flags(dq_storage);
850                                 if (unlikely((status &
851                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
852                                         continue;
853                         }
854                         fd = qbman_result_DQ_fd(dq_storage);
855
856                         vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
857                         if (vq_id)
858                                 vq_id[num_rx] = vqid;
859
860                         dq_storage++;
861                         num_rx++;
862                         num_pulled++;
863
864                 } while (pending);
865         /* Last VDQ provided all packets and more packets are requested */
866         } while (next_pull && num_pulled == dpaa2_dqrr_size);
867
868         return num_rx;
869 }
870
871 int
872 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
873                           struct rte_qdma_job **job,
874                           uint16_t nb_jobs)
875 {
876         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
877         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
878         struct qdma_virt_queue *temp_qdma_vq;
879         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
880         int ring_count, ret = 0, i;
881
882         /* Return error in case of wrong lcore_id */
883         if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
884                 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
885                                 vq_id);
886                 return -1;
887         }
888
889         /* Only dequeue when there are pending jobs on VQ */
890         if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
891                 return 0;
892
893         if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
894                 nb_jobs = (qdma_vq->num_enqueues -  qdma_vq->num_dequeues);
895
896         if (qdma_vq->exclusive_hw_queue) {
897                 /* In case of exclusive queue directly fetch from HW queue */
898                 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
899                                          NULL, job, nb_jobs);
900                 if (ret < 0) {
901                         DPAA2_QDMA_ERR(
902                                 "Dequeue from DPDMAI device failed: %d", ret);
903                         return ret;
904                 }
905                 qdma_vq->num_dequeues += ret;
906         } else {
907                 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
908                 /*
909                  * Get the QDMA completed jobs from the software ring.
910                  * In case they are not available on the ring poke the HW
911                  * to fetch completed jobs from corresponding HW queues
912                  */
913                 ring_count = rte_ring_count(qdma_vq->status_ring);
914                 if (ring_count < nb_jobs) {
915                         /* TODO - How to have right budget */
916                         ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
917                                         qdma_pq->queue_id,
918                                         temp_vq_id, job, nb_jobs);
919                         for (i = 0; i < ret; i++) {
920                                 temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
921                                 rte_ring_enqueue(temp_qdma_vq->status_ring,
922                                         (void *)(job[i]));
923                         }
924                         ring_count = rte_ring_count(
925                                         qdma_vq->status_ring);
926                 }
927
928                 if (ring_count) {
929                         /* Dequeue job from the software ring
930                          * to provide to the user
931                          */
932                         ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
933                                         (void **)job, ring_count, NULL);
934                         if (ret)
935                                 qdma_vq->num_dequeues += ret;
936                 }
937         }
938
939         return ret;
940 }
941
942 struct rte_qdma_job *
943 rte_qdma_vq_dequeue(uint16_t vq_id)
944 {
945         int ret;
946         struct rte_qdma_job *job = NULL;
947
948         ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
949         if (ret < 0)
950                 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
951
952         return job;
953 }
954
955 void
956 rte_qdma_vq_stats(uint16_t vq_id,
957                   struct rte_qdma_vq_stats *vq_status)
958 {
959         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
960
961         if (qdma_vq->in_use) {
962                 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
963                 vq_status->lcore_id = qdma_vq->lcore_id;
964                 vq_status->num_enqueues = qdma_vq->num_enqueues;
965                 vq_status->num_dequeues = qdma_vq->num_dequeues;
966                 vq_status->num_pending_jobs = vq_status->num_enqueues -
967                                 vq_status->num_dequeues;
968         }
969 }
970
971 int
972 rte_qdma_vq_destroy(uint16_t vq_id)
973 {
974         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
975
976         DPAA2_QDMA_FUNC_TRACE();
977
978         /* In case there are pending jobs on any VQ, return -EBUSY */
979         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
980                 return -EBUSY;
981
982         rte_spinlock_lock(&qdma_dev.lock);
983
984         if (qdma_vq->exclusive_hw_queue)
985                 free_hw_queue(qdma_vq->hw_queue);
986         else {
987                 if (qdma_vqs->status_ring)
988                         rte_ring_free(qdma_vqs->status_ring);
989
990                 put_hw_queue(qdma_vq->hw_queue);
991         }
992
993         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
994
995         rte_spinlock_unlock(&qdma_dev.lock);
996
997         return 0;
998 }
999
1000 int
1001 rte_qdma_vq_destroy_rbp(uint16_t vq_id)
1002 {
1003         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1004
1005         DPAA2_QDMA_FUNC_TRACE();
1006
1007         /* In case there are pending jobs on any VQ, return -EBUSY */
1008         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1009                 return -EBUSY;
1010
1011         rte_spinlock_lock(&qdma_dev.lock);
1012
1013         if (qdma_vq->exclusive_hw_queue) {
1014                 free_hw_queue(qdma_vq->hw_queue);
1015         } else {
1016                 if (qdma_vqs->status_ring)
1017                         rte_ring_free(qdma_vqs->status_ring);
1018
1019                 put_hw_queue(qdma_vq->hw_queue);
1020         }
1021
1022         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1023
1024         rte_spinlock_unlock(&qdma_dev.lock);
1025
1026         return 0;
1027 }
1028
1029 void
1030 rte_qdma_stop(void)
1031 {
1032         DPAA2_QDMA_FUNC_TRACE();
1033
1034         qdma_dev.state = 0;
1035 }
1036
1037 void
1038 rte_qdma_destroy(void)
1039 {
1040         DPAA2_QDMA_FUNC_TRACE();
1041
1042         rte_qdma_reset();
1043 }
1044
1045 static const struct rte_rawdev_ops dpaa2_qdma_ops;
1046
1047 static int
1048 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1049 {
1050         struct qdma_hw_queue *queue;
1051         int i;
1052
1053         DPAA2_QDMA_FUNC_TRACE();
1054
1055         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1056                 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1057                 if (!queue) {
1058                         DPAA2_QDMA_ERR(
1059                                 "Memory allocation failed for QDMA queue");
1060                         return -ENOMEM;
1061                 }
1062
1063                 queue->dpdmai_dev = dpdmai_dev;
1064                 queue->queue_id = i;
1065
1066                 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1067                 qdma_dev.num_hw_queues++;
1068         }
1069
1070         return 0;
1071 }
1072
1073 static void
1074 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1075 {
1076         struct qdma_hw_queue *queue = NULL;
1077         struct qdma_hw_queue *tqueue = NULL;
1078
1079         DPAA2_QDMA_FUNC_TRACE();
1080
1081         TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1082                 if (queue->dpdmai_dev == dpdmai_dev) {
1083                         TAILQ_REMOVE(&qdma_queue_list, queue, next);
1084                         rte_free(queue);
1085                         queue = NULL;
1086                 }
1087         }
1088 }
1089
1090 static int
1091 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1092 {
1093         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1094         int ret, i;
1095
1096         DPAA2_QDMA_FUNC_TRACE();
1097
1098         /* Remove HW queues from global list */
1099         remove_hw_queues_from_list(dpdmai_dev);
1100
1101         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1102                              dpdmai_dev->token);
1103         if (ret)
1104                 DPAA2_QDMA_ERR("dmdmai disable failed");
1105
1106         /* Set up the DQRR storage for Rx */
1107         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1108                 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1109
1110                 if (rxq->q_storage) {
1111                         dpaa2_free_dq_storage(rxq->q_storage);
1112                         rte_free(rxq->q_storage);
1113                 }
1114         }
1115
1116         /* Close the device at underlying layer*/
1117         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1118         if (ret)
1119                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1120
1121         return 0;
1122 }
1123
1124 static int
1125 check_devargs_handler(__rte_unused const char *key, const char *value,
1126                       __rte_unused void *opaque)
1127 {
1128         if (strcmp(value, "1"))
1129                 return -1;
1130
1131         return 0;
1132 }
1133
1134 static int
1135 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
1136 {
1137         struct rte_kvargs *kvlist;
1138
1139         if (!devargs)
1140                 return 0;
1141
1142         kvlist = rte_kvargs_parse(devargs->args, NULL);
1143         if (!kvlist)
1144                 return 0;
1145
1146         if (!rte_kvargs_count(kvlist, key)) {
1147                 rte_kvargs_free(kvlist);
1148                 return 0;
1149         }
1150
1151         if (rte_kvargs_process(kvlist, key,
1152                                check_devargs_handler, NULL) < 0) {
1153                 rte_kvargs_free(kvlist);
1154                 return 0;
1155         }
1156         rte_kvargs_free(kvlist);
1157
1158         return 1;
1159 }
1160
1161 static int
1162 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1163 {
1164         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1165         struct dpdmai_rx_queue_cfg rx_queue_cfg;
1166         struct dpdmai_attr attr;
1167         struct dpdmai_rx_queue_attr rx_attr;
1168         struct dpdmai_tx_queue_attr tx_attr;
1169         int ret, i;
1170
1171         DPAA2_QDMA_FUNC_TRACE();
1172
1173         /* Open DPDMAI device */
1174         dpdmai_dev->dpdmai_id = dpdmai_id;
1175         dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
1176         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1177                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1178         if (ret) {
1179                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1180                 return ret;
1181         }
1182
1183         /* Get DPDMAI attributes */
1184         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1185                                     dpdmai_dev->token, &attr);
1186         if (ret) {
1187                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1188                                ret);
1189                 goto init_err;
1190         }
1191         dpdmai_dev->num_queues = attr.num_of_queues;
1192
1193         /* Set up Rx Queues */
1194         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1195                 struct dpaa2_queue *rxq;
1196
1197                 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1198                 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1199                                           CMD_PRI_LOW,
1200                                           dpdmai_dev->token,
1201                                           i, 0, &rx_queue_cfg);
1202                 if (ret) {
1203                         DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1204                                        ret);
1205                         goto init_err;
1206                 }
1207
1208                 /* Allocate DQ storage for the DPDMAI Rx queues */
1209                 rxq = &(dpdmai_dev->rx_queue[i]);
1210                 rxq->q_storage = rte_malloc("dq_storage",
1211                                             sizeof(struct queue_storage_info_t),
1212                                             RTE_CACHE_LINE_SIZE);
1213                 if (!rxq->q_storage) {
1214                         DPAA2_QDMA_ERR("q_storage allocation failed");
1215                         ret = -ENOMEM;
1216                         goto init_err;
1217                 }
1218
1219                 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1220                 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1221                 if (ret) {
1222                         DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1223                         goto init_err;
1224                 }
1225         }
1226
1227         /* Get Rx and Tx queues FQID's */
1228         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1229                 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1230                                           dpdmai_dev->token, i, 0, &rx_attr);
1231                 if (ret) {
1232                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
1233                                        ret);
1234                         goto init_err;
1235                 }
1236                 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1237
1238                 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1239                                           dpdmai_dev->token, i, 0, &tx_attr);
1240                 if (ret) {
1241                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
1242                                        ret);
1243                         goto init_err;
1244                 }
1245                 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1246         }
1247
1248         /* Enable the device */
1249         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1250                             dpdmai_dev->token);
1251         if (ret) {
1252                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1253                 goto init_err;
1254         }
1255
1256         /* Add the HW queue to the global list */
1257         ret = add_hw_queues_to_list(dpdmai_dev);
1258         if (ret) {
1259                 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1260                 goto init_err;
1261         }
1262
1263         if (dpaa2_get_devargs(rawdev->device->devargs,
1264                 DPAA2_QDMA_NO_PREFETCH)) {
1265                 /* If no prefetch is configured. */
1266                 dpdmai_dev_dequeue_multijob =
1267                                 dpdmai_dev_dequeue_multijob_no_prefetch;
1268                 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1269         } else {
1270                 dpdmai_dev_dequeue_multijob =
1271                         dpdmai_dev_dequeue_multijob_prefetch;
1272         }
1273
1274         if (!dpaa2_coherent_no_alloc_cache) {
1275                 if (dpaa2_svr_family == SVR_LX2160A) {
1276                         dpaa2_coherent_no_alloc_cache =
1277                                 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1278                         dpaa2_coherent_alloc_cache =
1279                                 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1280                 } else {
1281                         dpaa2_coherent_no_alloc_cache =
1282                                 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1283                         dpaa2_coherent_alloc_cache =
1284                                 DPAA2_COHERENT_ALLOCATE_CACHE;
1285                 }
1286         }
1287
1288         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1289
1290         return 0;
1291 init_err:
1292         dpaa2_dpdmai_dev_uninit(rawdev);
1293         return ret;
1294 }
1295
1296 static int
1297 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1298                      struct rte_dpaa2_device *dpaa2_dev)
1299 {
1300         struct rte_rawdev *rawdev;
1301         int ret;
1302
1303         DPAA2_QDMA_FUNC_TRACE();
1304
1305         rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1306                         sizeof(struct dpaa2_dpdmai_dev),
1307                         rte_socket_id());
1308         if (!rawdev) {
1309                 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1310                 return -EINVAL;
1311         }
1312
1313         dpaa2_dev->rawdev = rawdev;
1314         rawdev->dev_ops = &dpaa2_qdma_ops;
1315         rawdev->device = &dpaa2_dev->device;
1316         rawdev->driver_name = dpaa2_drv->driver.name;
1317
1318         /* Invoke PMD device initialization function */
1319         ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1320         if (ret) {
1321                 rte_rawdev_pmd_release(rawdev);
1322                 return ret;
1323         }
1324
1325         return 0;
1326 }
1327
1328 static int
1329 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1330 {
1331         struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1332         int ret;
1333
1334         DPAA2_QDMA_FUNC_TRACE();
1335
1336         dpaa2_dpdmai_dev_uninit(rawdev);
1337
1338         ret = rte_rawdev_pmd_release(rawdev);
1339         if (ret)
1340                 DPAA2_QDMA_ERR("Device cleanup failed");
1341
1342         return 0;
1343 }
1344
1345 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1346         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1347         .drv_type = DPAA2_QDMA,
1348         .probe = rte_dpaa2_qdma_probe,
1349         .remove = rte_dpaa2_qdma_remove,
1350 };
1351
1352 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1353 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1354         "no_prefetch=<int> ");
1355
1356 RTE_INIT(dpaa2_qdma_init_log)
1357 {
1358         dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
1359         if (dpaa2_qdma_logtype >= 0)
1360                 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
1361 }