raw/dpaa2_qdma: support burst mode
[dpdk.git] / drivers / raw / dpaa2_qdma / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <string.h>
6
7 #include <rte_eal.h>
8 #include <rte_fslmc.h>
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
14 #include <rte_ring.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17
18 #include <mc/fsl_dpdmai.h>
19 #include <portal/dpaa2_hw_pvt.h>
20 #include <portal/dpaa2_hw_dpio.h>
21
22 #include "dpaa2_qdma.h"
23 #include "dpaa2_qdma_logs.h"
24 #include "rte_pmd_dpaa2_qdma.h"
25
26 /* Dynamic log type identifier */
27 int dpaa2_qdma_logtype;
28
29 /* QDMA device */
30 static struct qdma_device qdma_dev;
31
32 /* QDMA H/W queues list */
33 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
34 static struct qdma_hw_queue_list qdma_queue_list
35         = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
36
37 /* QDMA Virtual Queues */
38 static struct qdma_virt_queue *qdma_vqs;
39
40 /* QDMA per core data */
41 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
42
43 static struct qdma_hw_queue *
44 alloc_hw_queue(uint32_t lcore_id)
45 {
46         struct qdma_hw_queue *queue = NULL;
47
48         DPAA2_QDMA_FUNC_TRACE();
49
50         /* Get a free queue from the list */
51         TAILQ_FOREACH(queue, &qdma_queue_list, next) {
52                 if (queue->num_users == 0) {
53                         queue->lcore_id = lcore_id;
54                         queue->num_users++;
55                         break;
56                 }
57         }
58
59         return queue;
60 }
61
62 static void
63 free_hw_queue(struct qdma_hw_queue *queue)
64 {
65         DPAA2_QDMA_FUNC_TRACE();
66
67         queue->num_users--;
68 }
69
70
71 static struct qdma_hw_queue *
72 get_hw_queue(uint32_t lcore_id)
73 {
74         struct qdma_per_core_info *core_info;
75         struct qdma_hw_queue *queue, *temp;
76         uint32_t least_num_users;
77         int num_hw_queues, i;
78
79         DPAA2_QDMA_FUNC_TRACE();
80
81         core_info = &qdma_core_info[lcore_id];
82         num_hw_queues = core_info->num_hw_queues;
83
84         /*
85          * Allocate a HW queue if there are less queues
86          * than maximum per core queues configured
87          */
88         if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
89                 queue = alloc_hw_queue(lcore_id);
90                 if (queue) {
91                         core_info->hw_queues[num_hw_queues] = queue;
92                         core_info->num_hw_queues++;
93                         return queue;
94                 }
95         }
96
97         queue = core_info->hw_queues[0];
98         /* In case there is no queue associated with the core return NULL */
99         if (!queue)
100                 return NULL;
101
102         /* Fetch the least loaded H/W queue */
103         least_num_users = core_info->hw_queues[0]->num_users;
104         for (i = 0; i < num_hw_queues; i++) {
105                 temp = core_info->hw_queues[i];
106                 if (temp->num_users < least_num_users)
107                         queue = temp;
108         }
109
110         if (queue)
111                 queue->num_users++;
112
113         return queue;
114 }
115
116 static void
117 put_hw_queue(struct qdma_hw_queue *queue)
118 {
119         struct qdma_per_core_info *core_info;
120         int lcore_id, num_hw_queues, i;
121
122         DPAA2_QDMA_FUNC_TRACE();
123
124         /*
125          * If this is the last user of the queue free it.
126          * Also remove it from QDMA core info.
127          */
128         if (queue->num_users == 1) {
129                 free_hw_queue(queue);
130
131                 /* Remove the physical queue from core info */
132                 lcore_id = queue->lcore_id;
133                 core_info = &qdma_core_info[lcore_id];
134                 num_hw_queues = core_info->num_hw_queues;
135                 for (i = 0; i < num_hw_queues; i++) {
136                         if (queue == core_info->hw_queues[i])
137                                 break;
138                 }
139                 for (; i < num_hw_queues - 1; i++)
140                         core_info->hw_queues[i] = core_info->hw_queues[i + 1];
141                 core_info->hw_queues[i] = NULL;
142         } else {
143                 queue->num_users--;
144         }
145 }
146
147 int
148 rte_qdma_init(void)
149 {
150         DPAA2_QDMA_FUNC_TRACE();
151
152         rte_spinlock_init(&qdma_dev.lock);
153
154         return 0;
155 }
156
157 void
158 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
159 {
160         DPAA2_QDMA_FUNC_TRACE();
161
162         qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
163 }
164
165 int
166 rte_qdma_reset(void)
167 {
168         struct qdma_hw_queue *queue;
169         int i;
170
171         DPAA2_QDMA_FUNC_TRACE();
172
173         /* In case QDMA device is not in stopped state, return -EBUSY */
174         if (qdma_dev.state == 1) {
175                 DPAA2_QDMA_ERR(
176                         "Device is in running state. Stop before reset.");
177                 return -EBUSY;
178         }
179
180         /* In case there are pending jobs on any VQ, return -EBUSY */
181         for (i = 0; i < qdma_dev.max_vqs; i++) {
182                 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
183                     qdma_vqs[i].num_dequeues))
184                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
185                         return -EBUSY;
186         }
187
188         /* Reset HW queues */
189         TAILQ_FOREACH(queue, &qdma_queue_list, next)
190                 queue->num_users = 0;
191
192         /* Reset and free virtual queues */
193         for (i = 0; i < qdma_dev.max_vqs; i++) {
194                 if (qdma_vqs[i].status_ring)
195                         rte_ring_free(qdma_vqs[i].status_ring);
196         }
197         if (qdma_vqs)
198                 rte_free(qdma_vqs);
199         qdma_vqs = NULL;
200
201         /* Reset per core info */
202         memset(&qdma_core_info, 0,
203                 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
204
205         /* Free the FLE pool */
206         if (qdma_dev.fle_pool)
207                 rte_mempool_free(qdma_dev.fle_pool);
208
209         /* Reset QDMA device structure */
210         qdma_dev.mode = RTE_QDMA_MODE_HW;
211         qdma_dev.max_hw_queues_per_core = 0;
212         qdma_dev.fle_pool = NULL;
213         qdma_dev.fle_pool_count = 0;
214         qdma_dev.max_vqs = 0;
215
216         return 0;
217 }
218
219 int
220 rte_qdma_configure(struct rte_qdma_config *qdma_config)
221 {
222         int ret;
223         char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
224
225         DPAA2_QDMA_FUNC_TRACE();
226
227         /* In case QDMA device is not in stopped state, return -EBUSY */
228         if (qdma_dev.state == 1) {
229                 DPAA2_QDMA_ERR(
230                         "Device is in running state. Stop before config.");
231                 return -1;
232         }
233
234         /* Reset the QDMA device */
235         ret = rte_qdma_reset();
236         if (ret) {
237                 DPAA2_QDMA_ERR("Resetting QDMA failed");
238                 return ret;
239         }
240
241         /* Set mode */
242         qdma_dev.mode = qdma_config->mode;
243
244         /* Set max HW queue per core */
245         if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
246                 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
247                                MAX_HW_QUEUE_PER_CORE);
248                 return -EINVAL;
249         }
250         qdma_dev.max_hw_queues_per_core =
251                 qdma_config->max_hw_queues_per_core;
252
253         /* Allocate Virtual Queues */
254         qdma_vqs = rte_malloc("qdma_virtual_queues",
255                         (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
256                         RTE_CACHE_LINE_SIZE);
257         if (!qdma_vqs) {
258                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
259                 return -ENOMEM;
260         }
261         qdma_dev.max_vqs = qdma_config->max_vqs;
262
263         /* Allocate FLE pool; just append PID so that in case of
264          * multiprocess, the pool's don't collide.
265          */
266         snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
267                  getpid());
268         qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
269                         qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
270                         QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
271                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
272         if (!qdma_dev.fle_pool) {
273                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
274                 rte_free(qdma_vqs);
275                 qdma_vqs = NULL;
276                 return -ENOMEM;
277         }
278         qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
279
280         return 0;
281 }
282
283 int
284 rte_qdma_start(void)
285 {
286         DPAA2_QDMA_FUNC_TRACE();
287
288         qdma_dev.state = 1;
289
290         return 0;
291 }
292
293 int
294 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
295 {
296         char ring_name[32];
297         int i;
298
299         DPAA2_QDMA_FUNC_TRACE();
300
301         rte_spinlock_lock(&qdma_dev.lock);
302
303         /* Get a free Virtual Queue */
304         for (i = 0; i < qdma_dev.max_vqs; i++) {
305                 if (qdma_vqs[i].in_use == 0)
306                         break;
307         }
308
309         /* Return in case no VQ is free */
310         if (i == qdma_dev.max_vqs) {
311                 rte_spinlock_unlock(&qdma_dev.lock);
312                 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
313                 return -ENODEV;
314         }
315
316         if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
317                         (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
318                 /* Allocate HW queue for a VQ */
319                 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
320                 qdma_vqs[i].exclusive_hw_queue = 1;
321         } else {
322                 /* Allocate a Ring for Virutal Queue in VQ mode */
323                 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
324                 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
325                         qdma_dev.fle_pool_count, rte_socket_id(), 0);
326                 if (!qdma_vqs[i].status_ring) {
327                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
328                         rte_spinlock_unlock(&qdma_dev.lock);
329                         return rte_errno;
330                 }
331
332                 /* Get a HW queue (shared) for a VQ */
333                 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
334                 qdma_vqs[i].exclusive_hw_queue = 0;
335         }
336
337         if (qdma_vqs[i].hw_queue == NULL) {
338                 DPAA2_QDMA_ERR("No H/W queue available for VQ");
339                 if (qdma_vqs[i].status_ring)
340                         rte_ring_free(qdma_vqs[i].status_ring);
341                 qdma_vqs[i].status_ring = NULL;
342                 rte_spinlock_unlock(&qdma_dev.lock);
343                 return -ENODEV;
344         }
345
346         qdma_vqs[i].in_use = 1;
347         qdma_vqs[i].lcore_id = lcore_id;
348
349         rte_spinlock_unlock(&qdma_dev.lock);
350
351         return i;
352 }
353
354 static void
355 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
356                         uint64_t src, uint64_t dest,
357                         size_t len, uint32_t flags)
358 {
359         struct qdma_sdd *sdd;
360
361         DPAA2_QDMA_FUNC_TRACE();
362
363         sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
364                 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
365
366         /* first frame list to source descriptor */
367         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
368         DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
369
370         /* source and destination descriptor */
371         DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
372         sdd++;
373         DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
374
375         fle++;
376         /* source frame list to source buffer */
377         if (flags & RTE_QDMA_JOB_SRC_PHY) {
378                 DPAA2_SET_FLE_ADDR(fle, src);
379                 DPAA2_SET_FLE_BMT(fle);
380         } else {
381                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
382         }
383         DPAA2_SET_FLE_LEN(fle, len);
384
385         fle++;
386         /* destination frame list to destination buffer */
387         if (flags & RTE_QDMA_JOB_DEST_PHY) {
388                 DPAA2_SET_FLE_BMT(fle);
389                 DPAA2_SET_FLE_ADDR(fle, dest);
390         } else {
391                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
392         }
393         DPAA2_SET_FLE_LEN(fle, len);
394
395         /* Final bit: 1, for last frame list */
396         DPAA2_SET_FLE_FIN(fle);
397 }
398
399 int
400 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
401                           struct rte_qdma_job **job,
402                           uint16_t nb_jobs)
403 {
404         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
405         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
406         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
407         struct qdma_io_meta *io_meta;
408         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
409         struct dpaa2_queue *txq;
410         struct qbman_fle *fle;
411         struct qbman_eq_desc eqdesc;
412         struct qbman_swp *swp;
413         int ret;
414         uint32_t num_to_send = 0;
415         uint16_t num_tx = 0;
416         uint16_t num_txed = 0;
417
418         /* Return error in case of wrong lcore_id */
419         if (rte_lcore_id() != qdma_vq->lcore_id) {
420                 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
421                                 vq_id);
422                 return -1;
423         }
424
425         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
426                 ret = dpaa2_affine_qbman_swp();
427                 if (ret) {
428                         DPAA2_QDMA_ERR("Failure in affining portal");
429                         return 0;
430                 }
431         }
432         swp = DPAA2_PER_LCORE_PORTAL;
433
434         txq = &(dpdmai_dev->tx_queue[qdma_pq->queue_id]);
435
436         /* Prepare enqueue descriptor */
437         qbman_eq_desc_clear(&eqdesc);
438         qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
439         qbman_eq_desc_set_no_orp(&eqdesc, 0);
440         qbman_eq_desc_set_response(&eqdesc, 0, 0);
441
442         while (nb_jobs > 0) {
443                 uint32_t loop;
444
445                 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
446                         dpaa2_eqcr_size : nb_jobs;
447
448                 for (loop = 0; loop < num_to_send; loop++) {
449                         /*
450                          * Get an FLE/SDD from FLE pool.
451                          * Note: IO metadata is before the FLE and SDD memory.
452                          */
453                         ret = rte_mempool_get(qdma_dev.fle_pool,
454                                         (void **)(&io_meta));
455                         if (ret) {
456                                 DPAA2_QDMA_DP_WARN("Me alloc failed for FLE");
457                                 return ret;
458                         }
459
460                         /* Set the metadata */
461                         io_meta->cnxt = (size_t)job[num_tx];
462                         io_meta->id = vq_id;
463
464                         fle = (struct qbman_fle *)(io_meta + 1);
465
466                         /* populate Frame descriptor */
467                         memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
468                         DPAA2_SET_FD_ADDR(&fd_arr[loop],
469                                         DPAA2_VADDR_TO_IOVA(fle));
470                         DPAA2_SET_FD_COMPOUND_FMT(&fd_arr[loop]);
471                         DPAA2_SET_FD_FRC(&fd_arr[loop], QDMA_SER_CTX);
472
473                         /* Populate FLE */
474                         memset(fle, 0, QDMA_FLE_POOL_SIZE);
475                         dpaa2_qdma_populate_fle(fle, job[num_tx]->src,
476                                                 job[num_tx]->dest,
477                                                 job[num_tx]->len,
478                                                 job[num_tx]->flags);
479
480                         num_tx++;
481                 }
482
483                 /* Enqueue the packet to the QBMAN */
484                 uint32_t enqueue_loop = 0;
485                 while (enqueue_loop < num_to_send) {
486                         enqueue_loop += qbman_swp_enqueue_multiple(swp,
487                                                 &eqdesc,
488                                                 &fd_arr[enqueue_loop],
489                                                 NULL,
490                                                 num_to_send - enqueue_loop);
491                 }
492
493                 num_txed += num_to_send;
494                 nb_jobs -= num_to_send;
495         }
496         qdma_vq->num_enqueues += num_txed;
497         return num_txed;
498 }
499
500 int
501 rte_qdma_vq_enqueue(uint16_t vq_id,
502                     struct rte_qdma_job *job)
503 {
504         int ret;
505
506         ret = rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
507         if (ret < 0) {
508                 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
509                 return ret;
510         }
511
512         return 1;
513 }
514
515 /* Function to receive a QDMA job for a given device and queue*/
516 static int
517 dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
518                    uint16_t rxq_id,
519                    uint16_t *vq_id,
520                    struct rte_qdma_job **job,
521                    uint16_t nb_jobs)
522 {
523         struct qdma_io_meta *io_meta;
524         struct dpaa2_queue *rxq;
525         struct qbman_result *dq_storage;
526         struct qbman_pull_desc pulldesc;
527         const struct qbman_fd *fd;
528         struct qbman_swp *swp;
529         struct qbman_fle *fle;
530         uint32_t fqid;
531         uint8_t status;
532         int ret;
533
534         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
535                 ret = dpaa2_affine_qbman_swp();
536                 if (ret) {
537                         DPAA2_QDMA_ERR("Failure in affining portal");
538                         return 0;
539                 }
540         }
541         swp = DPAA2_PER_LCORE_PORTAL;
542         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
543         dq_storage = rxq->q_storage->dq_storage[0];
544         fqid = rxq->fqid;
545
546         /* Prepare dequeue descriptor */
547         qbman_pull_desc_clear(&pulldesc);
548         qbman_pull_desc_set_fq(&pulldesc, fqid);
549         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
550                 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
551         if (nb_jobs > dpaa2_dqrr_size)
552                 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
553         else
554                 qbman_pull_desc_set_numframes(&pulldesc, nb_jobs);
555
556         while (1) {
557                 if (qbman_swp_pull(swp, &pulldesc)) {
558                         DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
559                         continue;
560                 }
561                 break;
562         }
563
564         rte_prefetch0((void *)((size_t)(dq_storage + 1)));
565         /* Check if the previous issued command is completed. */
566         while (!qbman_check_command_complete(dq_storage))
567                 ;
568
569         int num_pulled = 0;
570         int pending = 1;
571         do {
572                 /* Loop until the dq_storage is updated with
573                  * new token by QBMAN
574                  */
575                 while (!qbman_check_new_result(dq_storage))
576                         ;
577
578                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
579                 /* Check whether Last Pull command is Expired and
580                  * setting Condition for Loop termination
581                  */
582                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
583                         pending = 0;
584                         /* Check for valid frame. */
585                         status = qbman_result_DQ_flags(dq_storage);
586                         if (unlikely((status &
587                                 QBMAN_DQ_STAT_VALIDFRAME) == 0))
588                                 continue;
589                 }
590                 fd = qbman_result_DQ_fd(dq_storage);
591
592                 /*
593                  * Fetch metadata from FLE. job and vq_id were set
594                  * in metadata in the enqueue operation.
595                  */
596                 fle = (struct qbman_fle *)
597                                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
598                 io_meta = (struct qdma_io_meta *)(fle) - 1;
599                 if (vq_id)
600                         vq_id[num_pulled] = io_meta->id;
601
602                 job[num_pulled] = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
603                 job[num_pulled]->status = DPAA2_GET_FD_ERR(fd);
604
605                 /* Free FLE to the pool */
606                 rte_mempool_put(qdma_dev.fle_pool, io_meta);
607
608                 dq_storage++;
609                 num_pulled++;
610         } while (pending && (num_pulled <= dpaa2_dqrr_size));
611
612         return num_pulled;
613 }
614
615 int
616 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
617                           struct rte_qdma_job **job,
618                           uint16_t nb_jobs)
619 {
620         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
621         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
622         struct qdma_virt_queue *temp_qdma_vq;
623         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
624         int ring_count, ret = 0, i;
625
626         /* Return error in case of wrong lcore_id */
627         if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
628                 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
629                                 vq_id);
630                 return -1;
631         }
632
633         /* Only dequeue when there are pending jobs on VQ */
634         if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
635                 return 0;
636
637         if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
638                 nb_jobs = (qdma_vq->num_enqueues -  qdma_vq->num_dequeues);
639
640         if (qdma_vq->exclusive_hw_queue) {
641                 /* In case of exclusive queue directly fetch from HW queue */
642                 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
643                                          NULL, job, nb_jobs);
644                 if (ret < 0) {
645                         DPAA2_QDMA_ERR(
646                                 "Dequeue from DPDMAI device failed: %d", ret);
647                         return ret;
648                 }
649                 qdma_vq->num_dequeues += ret;
650         } else {
651                 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
652                 /*
653                  * Get the QDMA completed jobs from the software ring.
654                  * In case they are not available on the ring poke the HW
655                  * to fetch completed jobs from corresponding HW queues
656                  */
657                 ring_count = rte_ring_count(qdma_vq->status_ring);
658                 if (ring_count < nb_jobs) {
659                         /* TODO - How to have right budget */
660                         ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
661                                         qdma_pq->queue_id,
662                                         temp_vq_id, job, nb_jobs);
663                         for (i = 0; i < ret; i++) {
664                                 temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
665                                 rte_ring_enqueue(temp_qdma_vq->status_ring,
666                                         (void *)(job[i]));
667                                 ring_count = rte_ring_count(
668                                         qdma_vq->status_ring);
669                         }
670                 }
671
672                 if (ring_count) {
673                         /* Dequeue job from the software ring
674                          * to provide to the user
675                          */
676                         ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
677                                         (void **)job, ring_count, NULL);
678                         if (ret)
679                                 qdma_vq->num_dequeues += ret;
680                 }
681         }
682
683         return ret;
684 }
685
686 struct rte_qdma_job *
687 rte_qdma_vq_dequeue(uint16_t vq_id)
688 {
689         int ret;
690         struct rte_qdma_job *job = NULL;
691
692         ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
693         if (ret < 0)
694                 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
695
696         return job;
697 }
698
699 void
700 rte_qdma_vq_stats(uint16_t vq_id,
701                   struct rte_qdma_vq_stats *vq_status)
702 {
703         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
704
705         DPAA2_QDMA_FUNC_TRACE();
706
707         if (qdma_vq->in_use) {
708                 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
709                 vq_status->lcore_id = qdma_vq->lcore_id;
710                 vq_status->num_enqueues = qdma_vq->num_enqueues;
711                 vq_status->num_dequeues = qdma_vq->num_dequeues;
712                 vq_status->num_pending_jobs = vq_status->num_enqueues -
713                                 vq_status->num_dequeues;
714         }
715 }
716
717 int
718 rte_qdma_vq_destroy(uint16_t vq_id)
719 {
720         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
721
722         DPAA2_QDMA_FUNC_TRACE();
723
724         /* In case there are pending jobs on any VQ, return -EBUSY */
725         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
726                 return -EBUSY;
727
728         rte_spinlock_lock(&qdma_dev.lock);
729
730         if (qdma_vq->exclusive_hw_queue)
731                 free_hw_queue(qdma_vq->hw_queue);
732         else {
733                 if (qdma_vqs->status_ring)
734                         rte_ring_free(qdma_vqs->status_ring);
735
736                 put_hw_queue(qdma_vq->hw_queue);
737         }
738
739         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
740
741         rte_spinlock_lock(&qdma_dev.lock);
742
743         return 0;
744 }
745
746 void
747 rte_qdma_stop(void)
748 {
749         DPAA2_QDMA_FUNC_TRACE();
750
751         qdma_dev.state = 0;
752 }
753
754 void
755 rte_qdma_destroy(void)
756 {
757         DPAA2_QDMA_FUNC_TRACE();
758
759         rte_qdma_reset();
760 }
761
762 static const struct rte_rawdev_ops dpaa2_qdma_ops;
763
764 static int
765 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
766 {
767         struct qdma_hw_queue *queue;
768         int i;
769
770         DPAA2_QDMA_FUNC_TRACE();
771
772         for (i = 0; i < dpdmai_dev->num_queues; i++) {
773                 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
774                 if (!queue) {
775                         DPAA2_QDMA_ERR(
776                                 "Memory allocation failed for QDMA queue");
777                         return -ENOMEM;
778                 }
779
780                 queue->dpdmai_dev = dpdmai_dev;
781                 queue->queue_id = i;
782
783                 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
784                 qdma_dev.num_hw_queues++;
785         }
786
787         return 0;
788 }
789
790 static void
791 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
792 {
793         struct qdma_hw_queue *queue = NULL;
794         struct qdma_hw_queue *tqueue = NULL;
795
796         DPAA2_QDMA_FUNC_TRACE();
797
798         TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
799                 if (queue->dpdmai_dev == dpdmai_dev) {
800                         TAILQ_REMOVE(&qdma_queue_list, queue, next);
801                         rte_free(queue);
802                         queue = NULL;
803                 }
804         }
805 }
806
807 static int
808 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
809 {
810         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
811         int ret, i;
812
813         DPAA2_QDMA_FUNC_TRACE();
814
815         /* Remove HW queues from global list */
816         remove_hw_queues_from_list(dpdmai_dev);
817
818         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
819                              dpdmai_dev->token);
820         if (ret)
821                 DPAA2_QDMA_ERR("dmdmai disable failed");
822
823         /* Set up the DQRR storage for Rx */
824         for (i = 0; i < dpdmai_dev->num_queues; i++) {
825                 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
826
827                 if (rxq->q_storage) {
828                         dpaa2_free_dq_storage(rxq->q_storage);
829                         rte_free(rxq->q_storage);
830                 }
831         }
832
833         /* Close the device at underlying layer*/
834         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
835         if (ret)
836                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
837
838         return 0;
839 }
840
841 static int
842 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
843 {
844         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
845         struct dpdmai_rx_queue_cfg rx_queue_cfg;
846         struct dpdmai_attr attr;
847         struct dpdmai_rx_queue_attr rx_attr;
848         struct dpdmai_tx_queue_attr tx_attr;
849         int ret, i;
850
851         DPAA2_QDMA_FUNC_TRACE();
852
853         /* Open DPDMAI device */
854         dpdmai_dev->dpdmai_id = dpdmai_id;
855         dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
856         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
857                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
858         if (ret) {
859                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
860                 return ret;
861         }
862
863         /* Get DPDMAI attributes */
864         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
865                                     dpdmai_dev->token, &attr);
866         if (ret) {
867                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
868                                ret);
869                 goto init_err;
870         }
871         dpdmai_dev->num_queues = attr.num_of_queues;
872
873         /* Set up Rx Queues */
874         for (i = 0; i < dpdmai_dev->num_queues; i++) {
875                 struct dpaa2_queue *rxq;
876
877                 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
878                 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
879                                           CMD_PRI_LOW,
880                                           dpdmai_dev->token,
881                                           i, 0, &rx_queue_cfg);
882                 if (ret) {
883                         DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
884                                        ret);
885                         goto init_err;
886                 }
887
888                 /* Allocate DQ storage for the DPDMAI Rx queues */
889                 rxq = &(dpdmai_dev->rx_queue[i]);
890                 rxq->q_storage = rte_malloc("dq_storage",
891                                             sizeof(struct queue_storage_info_t),
892                                             RTE_CACHE_LINE_SIZE);
893                 if (!rxq->q_storage) {
894                         DPAA2_QDMA_ERR("q_storage allocation failed");
895                         ret = -ENOMEM;
896                         goto init_err;
897                 }
898
899                 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
900                 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
901                 if (ret) {
902                         DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
903                         goto init_err;
904                 }
905         }
906
907         /* Get Rx and Tx queues FQID's */
908         for (i = 0; i < dpdmai_dev->num_queues; i++) {
909                 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
910                                           dpdmai_dev->token, i, 0, &rx_attr);
911                 if (ret) {
912                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
913                                        ret);
914                         goto init_err;
915                 }
916                 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
917
918                 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
919                                           dpdmai_dev->token, i, 0, &tx_attr);
920                 if (ret) {
921                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
922                                        ret);
923                         goto init_err;
924                 }
925                 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
926         }
927
928         /* Enable the device */
929         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
930                             dpdmai_dev->token);
931         if (ret) {
932                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
933                 goto init_err;
934         }
935
936         /* Add the HW queue to the global list */
937         ret = add_hw_queues_to_list(dpdmai_dev);
938         if (ret) {
939                 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
940                 goto init_err;
941         }
942         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
943
944         return 0;
945 init_err:
946         dpaa2_dpdmai_dev_uninit(rawdev);
947         return ret;
948 }
949
950 static int
951 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
952                      struct rte_dpaa2_device *dpaa2_dev)
953 {
954         struct rte_rawdev *rawdev;
955         int ret;
956
957         DPAA2_QDMA_FUNC_TRACE();
958
959         rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
960                         sizeof(struct dpaa2_dpdmai_dev),
961                         rte_socket_id());
962         if (!rawdev) {
963                 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
964                 return -EINVAL;
965         }
966
967         dpaa2_dev->rawdev = rawdev;
968         rawdev->dev_ops = &dpaa2_qdma_ops;
969         rawdev->device = &dpaa2_dev->device;
970         rawdev->driver_name = dpaa2_drv->driver.name;
971
972         /* Invoke PMD device initialization function */
973         ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
974         if (ret) {
975                 rte_rawdev_pmd_release(rawdev);
976                 return ret;
977         }
978
979         return 0;
980 }
981
982 static int
983 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
984 {
985         struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
986         int ret;
987
988         DPAA2_QDMA_FUNC_TRACE();
989
990         dpaa2_dpdmai_dev_uninit(rawdev);
991
992         ret = rte_rawdev_pmd_release(rawdev);
993         if (ret)
994                 DPAA2_QDMA_ERR("Device cleanup failed");
995
996         return 0;
997 }
998
999 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1000         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1001         .drv_type = DPAA2_QDMA,
1002         .probe = rte_dpaa2_qdma_probe,
1003         .remove = rte_dpaa2_qdma_remove,
1004 };
1005
1006 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1007
1008 RTE_INIT(dpaa2_qdma_init_log)
1009 {
1010         dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
1011         if (dpaa2_qdma_logtype >= 0)
1012                 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
1013 }