sleep in control plane thread
[dpdk.git] / drivers / raw / dpaa2_qdma / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <string.h>
6
7 #include <rte_eal.h>
8 #include <rte_fslmc.h>
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
14 #include <rte_ring.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
18
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
22
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
26
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
28
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype;
31
32 uint32_t dpaa2_coherent_no_alloc_cache;
33 uint32_t dpaa2_coherent_alloc_cache;
34
35 /* QDMA device */
36 static struct qdma_device qdma_dev;
37
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
40 static struct qdma_hw_queue_list qdma_queue_list
41         = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
42
43 /* QDMA Virtual Queues */
44 static struct qdma_virt_queue *qdma_vqs;
45
46 /* QDMA per core data */
47 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
48
49 typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
50                                             uint16_t rxq_id,
51                                             uint16_t *vq_id,
52                                             struct rte_qdma_job **job,
53                                             uint16_t nb_jobs);
54
55 dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
56
57 typedef uint16_t (dpdmai_dev_get_job_t)(const struct qbman_fd *fd,
58                                         struct rte_qdma_job **job);
59 typedef int (dpdmai_dev_set_fd_t)(struct qbman_fd *fd,
60                                   struct rte_qdma_job *job,
61                                   struct rte_qdma_rbp *rbp,
62                                   uint16_t vq_id);
63 dpdmai_dev_get_job_t *dpdmai_dev_get_job;
64 dpdmai_dev_set_fd_t *dpdmai_dev_set_fd;
65
66 static inline int
67 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
68                         uint32_t len, struct qbman_fd *fd,
69                         struct rte_qdma_rbp *rbp)
70 {
71         fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
72         fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
73
74         fd->simple_pci.len_sl = len;
75
76         fd->simple_pci.bmt = 1;
77         fd->simple_pci.fmt = 3;
78         fd->simple_pci.sl = 1;
79         fd->simple_pci.ser = 1;
80
81         fd->simple_pci.sportid = rbp->sportid;  /*pcie 3 */
82         fd->simple_pci.srbp = rbp->srbp;
83         if (rbp->srbp)
84                 fd->simple_pci.rdttype = 0;
85         else
86                 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
87
88         /*dest is pcie memory */
89         fd->simple_pci.dportid = rbp->dportid;  /*pcie 3 */
90         fd->simple_pci.drbp = rbp->drbp;
91         if (rbp->drbp)
92                 fd->simple_pci.wrttype = 0;
93         else
94                 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
95
96         fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
97         fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
98
99         return 0;
100 }
101
102 static inline int
103 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
104                         uint32_t len, struct qbman_fd *fd)
105 {
106         fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
107         fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
108
109         fd->simple_ddr.len = len;
110
111         fd->simple_ddr.bmt = 1;
112         fd->simple_ddr.fmt = 3;
113         fd->simple_ddr.sl = 1;
114         fd->simple_ddr.ser = 1;
115         /**
116          * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
117          * Coherent copy of cacheable memory,
118          * lookup in downstream cache, no allocate
119          * on miss
120          */
121         fd->simple_ddr.rns = 0;
122         fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
123         /**
124          * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
125          * Coherent write of cacheable memory,
126          * lookup in downstream cache, no allocate on miss
127          */
128         fd->simple_ddr.wns = 0;
129         fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
130
131         fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
132         fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
133
134         return 0;
135 }
136
137 static void
138 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
139                         struct rte_qdma_rbp *rbp,
140                         uint64_t src, uint64_t dest,
141                         size_t len, uint32_t flags)
142 {
143         struct qdma_sdd *sdd;
144
145         sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
146                 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
147
148         /* first frame list to source descriptor */
149         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
150         DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
151
152         /* source and destination descriptor */
153         if (rbp && rbp->enable) {
154                 /* source */
155                 sdd->read_cmd.portid = rbp->sportid;
156                 sdd->rbpcmd_simple.pfid = rbp->spfid;
157                 sdd->rbpcmd_simple.vfid = rbp->svfid;
158
159                 if (rbp->srbp) {
160                         sdd->read_cmd.rbp = rbp->srbp;
161                         sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
162                 } else {
163                         sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
164                 }
165                 sdd++;
166                 /* destination */
167                 sdd->write_cmd.portid = rbp->dportid;
168                 sdd->rbpcmd_simple.pfid = rbp->dpfid;
169                 sdd->rbpcmd_simple.vfid = rbp->dvfid;
170
171                 if (rbp->drbp) {
172                         sdd->write_cmd.rbp = rbp->drbp;
173                         sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
174                 } else {
175                         sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
176                 }
177
178         } else {
179                 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
180                 sdd++;
181                 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
182         }
183         fle++;
184         /* source frame list to source buffer */
185         if (flags & RTE_QDMA_JOB_SRC_PHY) {
186                 DPAA2_SET_FLE_ADDR(fle, src);
187                 DPAA2_SET_FLE_BMT(fle);
188         } else {
189                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
190         }
191         DPAA2_SET_FLE_LEN(fle, len);
192
193         fle++;
194         /* destination frame list to destination buffer */
195         if (flags & RTE_QDMA_JOB_DEST_PHY) {
196                 DPAA2_SET_FLE_BMT(fle);
197                 DPAA2_SET_FLE_ADDR(fle, dest);
198         } else {
199                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
200         }
201         DPAA2_SET_FLE_LEN(fle, len);
202
203         /* Final bit: 1, for last frame list */
204         DPAA2_SET_FLE_FIN(fle);
205 }
206
207 static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,
208                                         struct rte_qdma_job *job,
209                                         struct rte_qdma_rbp *rbp,
210                                         uint16_t vq_id)
211 {
212         struct rte_qdma_job **ppjob;
213         size_t iova;
214         int ret = 0;
215
216         if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
217                 iova = (size_t)job->dest;
218         else
219                 iova = (size_t)job->src;
220
221         /* Set the metadata */
222         job->vq_id = vq_id;
223         ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
224         *ppjob = job;
225
226         if ((rbp->drbp == 1) || (rbp->srbp == 1))
227                 ret = qdma_populate_fd_pci((phys_addr_t) job->src,
228                                            (phys_addr_t) job->dest,
229                                            job->len, fd, rbp);
230         else
231                 ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
232                                            (phys_addr_t) job->dest,
233                                            job->len, fd);
234         return ret;
235 }
236 static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
237                                         struct rte_qdma_job *job,
238                                         struct rte_qdma_rbp *rbp,
239                                         uint16_t vq_id)
240 {
241         struct rte_qdma_job **ppjob;
242         struct qbman_fle *fle;
243         int ret = 0;
244         /*
245          * Get an FLE/SDD from FLE pool.
246          * Note: IO metadata is before the FLE and SDD memory.
247          */
248         ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&ppjob));
249         if (ret) {
250                 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
251                 return ret;
252         }
253
254         /* Set the metadata */
255         job->vq_id = vq_id;
256         *ppjob = job;
257
258         fle = (struct qbman_fle *)(ppjob + 1);
259
260         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
261         DPAA2_SET_FD_COMPOUND_FMT(fd);
262         DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
263
264         /* Populate FLE */
265         memset(fle, 0, QDMA_FLE_POOL_SIZE);
266         dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
267                                 job->len, job->flags);
268
269         return 0;
270 }
271
272 static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,
273                                         struct rte_qdma_job **job)
274 {
275         uint16_t vqid;
276         size_t iova;
277         struct rte_qdma_job **ppjob;
278
279         if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
280                 iova = (size_t) (((uint64_t)fd->simple_pci.daddr_hi) << 32
281                                 | (uint64_t)fd->simple_pci.daddr_lo);
282         else
283                 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
284                                 | (uint64_t)fd->simple_pci.saddr_lo);
285
286         ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
287         *job = (struct rte_qdma_job *)*ppjob;
288         (*job)->status = (fd->simple_pci.acc_err << 8) | (fd->simple_pci.error);
289         vqid = (*job)->vq_id;
290
291         return vqid;
292 }
293
294 static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,
295                                         struct rte_qdma_job **job)
296 {
297         struct rte_qdma_job **ppjob;
298         uint16_t vqid;
299         /*
300          * Fetch metadata from FLE. job and vq_id were set
301          * in metadata in the enqueue operation.
302          */
303         ppjob = (struct rte_qdma_job **)
304                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
305         ppjob -= 1;
306
307         *job = (struct rte_qdma_job *)*ppjob;
308         (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
309                          (DPAA2_GET_FD_FRC(fd) & 0xFF);
310         vqid = (*job)->vq_id;
311
312         /* Free FLE to the pool */
313         rte_mempool_put(qdma_dev.fle_pool, (void *)ppjob);
314
315         return vqid;
316 }
317
318 static struct qdma_hw_queue *
319 alloc_hw_queue(uint32_t lcore_id)
320 {
321         struct qdma_hw_queue *queue = NULL;
322
323         DPAA2_QDMA_FUNC_TRACE();
324
325         /* Get a free queue from the list */
326         TAILQ_FOREACH(queue, &qdma_queue_list, next) {
327                 if (queue->num_users == 0) {
328                         queue->lcore_id = lcore_id;
329                         queue->num_users++;
330                         break;
331                 }
332         }
333
334         return queue;
335 }
336
337 static void
338 free_hw_queue(struct qdma_hw_queue *queue)
339 {
340         DPAA2_QDMA_FUNC_TRACE();
341
342         queue->num_users--;
343 }
344
345
346 static struct qdma_hw_queue *
347 get_hw_queue(uint32_t lcore_id)
348 {
349         struct qdma_per_core_info *core_info;
350         struct qdma_hw_queue *queue, *temp;
351         uint32_t least_num_users;
352         int num_hw_queues, i;
353
354         DPAA2_QDMA_FUNC_TRACE();
355
356         core_info = &qdma_core_info[lcore_id];
357         num_hw_queues = core_info->num_hw_queues;
358
359         /*
360          * Allocate a HW queue if there are less queues
361          * than maximum per core queues configured
362          */
363         if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
364                 queue = alloc_hw_queue(lcore_id);
365                 if (queue) {
366                         core_info->hw_queues[num_hw_queues] = queue;
367                         core_info->num_hw_queues++;
368                         return queue;
369                 }
370         }
371
372         queue = core_info->hw_queues[0];
373         /* In case there is no queue associated with the core return NULL */
374         if (!queue)
375                 return NULL;
376
377         /* Fetch the least loaded H/W queue */
378         least_num_users = core_info->hw_queues[0]->num_users;
379         for (i = 0; i < num_hw_queues; i++) {
380                 temp = core_info->hw_queues[i];
381                 if (temp->num_users < least_num_users)
382                         queue = temp;
383         }
384
385         if (queue)
386                 queue->num_users++;
387
388         return queue;
389 }
390
391 static void
392 put_hw_queue(struct qdma_hw_queue *queue)
393 {
394         struct qdma_per_core_info *core_info;
395         int lcore_id, num_hw_queues, i;
396
397         DPAA2_QDMA_FUNC_TRACE();
398
399         /*
400          * If this is the last user of the queue free it.
401          * Also remove it from QDMA core info.
402          */
403         if (queue->num_users == 1) {
404                 free_hw_queue(queue);
405
406                 /* Remove the physical queue from core info */
407                 lcore_id = queue->lcore_id;
408                 core_info = &qdma_core_info[lcore_id];
409                 num_hw_queues = core_info->num_hw_queues;
410                 for (i = 0; i < num_hw_queues; i++) {
411                         if (queue == core_info->hw_queues[i])
412                                 break;
413                 }
414                 for (; i < num_hw_queues - 1; i++)
415                         core_info->hw_queues[i] = core_info->hw_queues[i + 1];
416                 core_info->hw_queues[i] = NULL;
417         } else {
418                 queue->num_users--;
419         }
420 }
421
422 int
423 rte_qdma_init(void)
424 {
425         DPAA2_QDMA_FUNC_TRACE();
426
427         rte_spinlock_init(&qdma_dev.lock);
428
429         return 0;
430 }
431
432 void
433 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
434 {
435         DPAA2_QDMA_FUNC_TRACE();
436
437         qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
438 }
439
440 int
441 rte_qdma_reset(void)
442 {
443         struct qdma_hw_queue *queue;
444         int i;
445
446         DPAA2_QDMA_FUNC_TRACE();
447
448         /* In case QDMA device is not in stopped state, return -EBUSY */
449         if (qdma_dev.state == 1) {
450                 DPAA2_QDMA_ERR(
451                         "Device is in running state. Stop before reset.");
452                 return -EBUSY;
453         }
454
455         /* In case there are pending jobs on any VQ, return -EBUSY */
456         for (i = 0; i < qdma_dev.max_vqs; i++) {
457                 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
458                     qdma_vqs[i].num_dequeues))
459                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
460                         return -EBUSY;
461         }
462
463         /* Reset HW queues */
464         TAILQ_FOREACH(queue, &qdma_queue_list, next)
465                 queue->num_users = 0;
466
467         /* Reset and free virtual queues */
468         for (i = 0; i < qdma_dev.max_vqs; i++) {
469                 if (qdma_vqs[i].status_ring)
470                         rte_ring_free(qdma_vqs[i].status_ring);
471         }
472         if (qdma_vqs)
473                 rte_free(qdma_vqs);
474         qdma_vqs = NULL;
475
476         /* Reset per core info */
477         memset(&qdma_core_info, 0,
478                 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
479
480         /* Free the FLE pool */
481         if (qdma_dev.fle_pool)
482                 rte_mempool_free(qdma_dev.fle_pool);
483
484         /* Reset QDMA device structure */
485         qdma_dev.mode = RTE_QDMA_MODE_HW;
486         qdma_dev.max_hw_queues_per_core = 0;
487         qdma_dev.fle_pool = NULL;
488         qdma_dev.fle_pool_count = 0;
489         qdma_dev.max_vqs = 0;
490
491         return 0;
492 }
493
494 int
495 rte_qdma_configure(struct rte_qdma_config *qdma_config)
496 {
497         int ret;
498         char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
499
500         DPAA2_QDMA_FUNC_TRACE();
501
502         /* In case QDMA device is not in stopped state, return -EBUSY */
503         if (qdma_dev.state == 1) {
504                 DPAA2_QDMA_ERR(
505                         "Device is in running state. Stop before config.");
506                 return -1;
507         }
508
509         /* Reset the QDMA device */
510         ret = rte_qdma_reset();
511         if (ret) {
512                 DPAA2_QDMA_ERR("Resetting QDMA failed");
513                 return ret;
514         }
515
516         /* Set mode */
517         qdma_dev.mode = qdma_config->mode;
518
519         /* Set max HW queue per core */
520         if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
521                 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
522                                MAX_HW_QUEUE_PER_CORE);
523                 return -EINVAL;
524         }
525         qdma_dev.max_hw_queues_per_core =
526                 qdma_config->max_hw_queues_per_core;
527
528         /* Allocate Virtual Queues */
529         qdma_vqs = rte_malloc("qdma_virtual_queues",
530                         (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
531                         RTE_CACHE_LINE_SIZE);
532         if (!qdma_vqs) {
533                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
534                 return -ENOMEM;
535         }
536         qdma_dev.max_vqs = qdma_config->max_vqs;
537
538         /* Allocate FLE pool; just append PID so that in case of
539          * multiprocess, the pool's don't collide.
540          */
541         snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
542                  getpid());
543         qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
544                         qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
545                         QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
546                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
547         if (!qdma_dev.fle_pool) {
548                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
549                 rte_free(qdma_vqs);
550                 qdma_vqs = NULL;
551                 return -ENOMEM;
552         }
553         qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
554
555         if (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {
556                 dpdmai_dev_get_job = dpdmai_dev_get_job_us;
557                 dpdmai_dev_set_fd = dpdmai_dev_set_fd_us;
558         } else {
559                 dpdmai_dev_get_job = dpdmai_dev_get_job_lf;
560                 dpdmai_dev_set_fd = dpdmai_dev_set_fd_lf;
561         }
562         return 0;
563 }
564
565 int
566 rte_qdma_start(void)
567 {
568         DPAA2_QDMA_FUNC_TRACE();
569
570         qdma_dev.state = 1;
571
572         return 0;
573 }
574
575 int
576 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
577 {
578         char ring_name[32];
579         int i;
580
581         DPAA2_QDMA_FUNC_TRACE();
582
583         rte_spinlock_lock(&qdma_dev.lock);
584
585         /* Get a free Virtual Queue */
586         for (i = 0; i < qdma_dev.max_vqs; i++) {
587                 if (qdma_vqs[i].in_use == 0)
588                         break;
589         }
590
591         /* Return in case no VQ is free */
592         if (i == qdma_dev.max_vqs) {
593                 rte_spinlock_unlock(&qdma_dev.lock);
594                 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
595                 return -ENODEV;
596         }
597
598         if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
599                         (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
600                 /* Allocate HW queue for a VQ */
601                 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
602                 qdma_vqs[i].exclusive_hw_queue = 1;
603         } else {
604                 /* Allocate a Ring for Virutal Queue in VQ mode */
605                 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
606                 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
607                         qdma_dev.fle_pool_count, rte_socket_id(), 0);
608                 if (!qdma_vqs[i].status_ring) {
609                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
610                         rte_spinlock_unlock(&qdma_dev.lock);
611                         return rte_errno;
612                 }
613
614                 /* Get a HW queue (shared) for a VQ */
615                 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
616                 qdma_vqs[i].exclusive_hw_queue = 0;
617         }
618
619         if (qdma_vqs[i].hw_queue == NULL) {
620                 DPAA2_QDMA_ERR("No H/W queue available for VQ");
621                 if (qdma_vqs[i].status_ring)
622                         rte_ring_free(qdma_vqs[i].status_ring);
623                 qdma_vqs[i].status_ring = NULL;
624                 rte_spinlock_unlock(&qdma_dev.lock);
625                 return -ENODEV;
626         }
627
628         qdma_vqs[i].in_use = 1;
629         qdma_vqs[i].lcore_id = lcore_id;
630         memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
631         rte_spinlock_unlock(&qdma_dev.lock);
632
633         return i;
634 }
635
636 /*create vq for route-by-port*/
637 int
638 rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
639                         struct rte_qdma_rbp *rbp)
640 {
641         int i;
642
643         i = rte_qdma_vq_create(lcore_id, flags);
644
645         memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
646
647         return i;
648 }
649
650 static int
651 dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
652                         uint16_t txq_id,
653                         uint16_t vq_id,
654                         struct rte_qdma_rbp *rbp,
655                         struct rte_qdma_job **job,
656                         uint16_t nb_jobs)
657 {
658         struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
659         struct dpaa2_queue *txq;
660         struct qbman_eq_desc eqdesc;
661         struct qbman_swp *swp;
662         int ret;
663         uint32_t num_to_send = 0;
664         uint16_t num_tx = 0;
665
666         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
667                 ret = dpaa2_affine_qbman_swp();
668                 if (ret) {
669                         DPAA2_QDMA_ERR("Failure in affining portal");
670                         return 0;
671                 }
672         }
673         swp = DPAA2_PER_LCORE_PORTAL;
674
675         txq = &(dpdmai_dev->tx_queue[txq_id]);
676
677         /* Prepare enqueue descriptor */
678         qbman_eq_desc_clear(&eqdesc);
679         qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
680         qbman_eq_desc_set_no_orp(&eqdesc, 0);
681         qbman_eq_desc_set_response(&eqdesc, 0, 0);
682
683         memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));
684
685         while (nb_jobs > 0) {
686                 uint32_t loop;
687
688                 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
689                         dpaa2_eqcr_size : nb_jobs;
690
691                 for (loop = 0; loop < num_to_send; loop++) {
692                         ret = dpdmai_dev_set_fd(&fd[loop],
693                                                 job[num_tx], rbp, vq_id);
694                         if (ret < 0) {
695                                 /* Set nb_jobs to loop, so outer while loop
696                                  * breaks out.
697                                  */
698                                 nb_jobs = loop;
699                                 break;
700                         }
701
702                         num_tx++;
703                 }
704
705                 /* Enqueue the packet to the QBMAN */
706                 uint32_t enqueue_loop = 0, retry_count = 0;
707                 while (enqueue_loop < loop) {
708                         ret = qbman_swp_enqueue_multiple(swp,
709                                                 &eqdesc,
710                                                 &fd[enqueue_loop],
711                                                 NULL,
712                                                 loop - enqueue_loop);
713                         if (unlikely(ret < 0)) {
714                                 retry_count++;
715                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
716                                         return num_tx - (loop - enqueue_loop);
717                         } else {
718                                 enqueue_loop += ret;
719                                 retry_count = 0;
720                         }
721                 }
722                 nb_jobs -= loop;
723         }
724         return num_tx;
725 }
726
727 int
728 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
729                           struct rte_qdma_job **job,
730                           uint16_t nb_jobs)
731 {
732         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
733         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
734         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
735         int ret;
736
737         /* Return error in case of wrong lcore_id */
738         if (rte_lcore_id() != qdma_vq->lcore_id) {
739                 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
740                                 vq_id);
741                 return -EINVAL;
742         }
743
744         ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
745                                  qdma_pq->queue_id,
746                                  vq_id,
747                                  &qdma_vq->rbp,
748                                  job,
749                                  nb_jobs);
750         if (ret < 0) {
751                 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
752                 return ret;
753         }
754
755         qdma_vq->num_enqueues += ret;
756
757         return ret;
758 }
759
760 int
761 rte_qdma_vq_enqueue(uint16_t vq_id,
762                     struct rte_qdma_job *job)
763 {
764         return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
765 }
766
767 /* Function to receive a QDMA job for a given device and queue*/
768 static int
769 dpdmai_dev_dequeue_multijob_prefetch(
770                         struct dpaa2_dpdmai_dev *dpdmai_dev,
771                         uint16_t rxq_id,
772                         uint16_t *vq_id,
773                         struct rte_qdma_job **job,
774                         uint16_t nb_jobs)
775 {
776         struct dpaa2_queue *rxq;
777         struct qbman_result *dq_storage, *dq_storage1 = NULL;
778         struct qbman_pull_desc pulldesc;
779         struct qbman_swp *swp;
780         struct queue_storage_info_t *q_storage;
781         uint32_t fqid;
782         uint8_t status, pending;
783         uint8_t num_rx = 0;
784         const struct qbman_fd *fd;
785         uint16_t vqid;
786         int ret, pull_size;
787
788         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
789                 ret = dpaa2_affine_qbman_swp();
790                 if (ret) {
791                         DPAA2_QDMA_ERR("Failure in affining portal");
792                         return 0;
793                 }
794         }
795         swp = DPAA2_PER_LCORE_PORTAL;
796
797         pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
798         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
799         fqid = rxq->fqid;
800         q_storage = rxq->q_storage;
801
802         if (unlikely(!q_storage->active_dqs)) {
803                 q_storage->toggle = 0;
804                 dq_storage = q_storage->dq_storage[q_storage->toggle];
805                 q_storage->last_num_pkts = pull_size;
806                 qbman_pull_desc_clear(&pulldesc);
807                 qbman_pull_desc_set_numframes(&pulldesc,
808                                               q_storage->last_num_pkts);
809                 qbman_pull_desc_set_fq(&pulldesc, fqid);
810                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
811                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
812                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
813                         while (!qbman_check_command_complete(
814                                get_swp_active_dqs(
815                                DPAA2_PER_LCORE_DPIO->index)))
816                                 ;
817                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
818                 }
819                 while (1) {
820                         if (qbman_swp_pull(swp, &pulldesc)) {
821                                 DPAA2_QDMA_DP_WARN(
822                                         "VDQ command not issued.QBMAN busy\n");
823                                         /* Portal was busy, try again */
824                                 continue;
825                         }
826                         break;
827                 }
828                 q_storage->active_dqs = dq_storage;
829                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
830                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
831                                    dq_storage);
832         }
833
834         dq_storage = q_storage->active_dqs;
835         rte_prefetch0((void *)(size_t)(dq_storage));
836         rte_prefetch0((void *)(size_t)(dq_storage + 1));
837
838         /* Prepare next pull descriptor. This will give space for the
839          * prefething done on DQRR entries
840          */
841         q_storage->toggle ^= 1;
842         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
843         qbman_pull_desc_clear(&pulldesc);
844         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
845         qbman_pull_desc_set_fq(&pulldesc, fqid);
846         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
847                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
848
849         /* Check if the previous issued command is completed.
850          * Also seems like the SWP is shared between the Ethernet Driver
851          * and the SEC driver.
852          */
853         while (!qbman_check_command_complete(dq_storage))
854                 ;
855         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
856                 clear_swp_active_dqs(q_storage->active_dpio_id);
857
858         pending = 1;
859
860         do {
861                 /* Loop until the dq_storage is updated with
862                  * new token by QBMAN
863                  */
864                 while (!qbman_check_new_result(dq_storage))
865                         ;
866                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
867                 /* Check whether Last Pull command is Expired and
868                  * setting Condition for Loop termination
869                  */
870                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
871                         pending = 0;
872                         /* Check for valid frame. */
873                         status = qbman_result_DQ_flags(dq_storage);
874                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
875                                 continue;
876                 }
877                 fd = qbman_result_DQ_fd(dq_storage);
878
879                 vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
880                 if (vq_id)
881                         vq_id[num_rx] = vqid;
882
883                 dq_storage++;
884                 num_rx++;
885         } while (pending);
886
887         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
888                 while (!qbman_check_command_complete(
889                        get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
890                         ;
891                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
892         }
893         /* issue a volatile dequeue command for next pull */
894         while (1) {
895                 if (qbman_swp_pull(swp, &pulldesc)) {
896                         DPAA2_QDMA_DP_WARN("VDQ command is not issued."
897                                           "QBMAN is busy (2)\n");
898                         continue;
899                 }
900                 break;
901         }
902
903         q_storage->active_dqs = dq_storage1;
904         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
905         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
906
907         return num_rx;
908 }
909
910 static int
911 dpdmai_dev_dequeue_multijob_no_prefetch(
912                 struct dpaa2_dpdmai_dev *dpdmai_dev,
913                 uint16_t rxq_id,
914                 uint16_t *vq_id,
915                 struct rte_qdma_job **job,
916                 uint16_t nb_jobs)
917 {
918         struct dpaa2_queue *rxq;
919         struct qbman_result *dq_storage;
920         struct qbman_pull_desc pulldesc;
921         struct qbman_swp *swp;
922         uint32_t fqid;
923         uint8_t status, pending;
924         uint8_t num_rx = 0;
925         const struct qbman_fd *fd;
926         uint16_t vqid;
927         int ret, next_pull = nb_jobs, num_pulled = 0;
928
929         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
930                 ret = dpaa2_affine_qbman_swp();
931                 if (ret) {
932                         DPAA2_QDMA_ERR("Failure in affining portal");
933                         return 0;
934                 }
935         }
936         swp = DPAA2_PER_LCORE_PORTAL;
937
938         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
939         fqid = rxq->fqid;
940
941         do {
942                 dq_storage = rxq->q_storage->dq_storage[0];
943                 /* Prepare dequeue descriptor */
944                 qbman_pull_desc_clear(&pulldesc);
945                 qbman_pull_desc_set_fq(&pulldesc, fqid);
946                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
947                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
948
949                 if (next_pull > dpaa2_dqrr_size) {
950                         qbman_pull_desc_set_numframes(&pulldesc,
951                                         dpaa2_dqrr_size);
952                         next_pull -= dpaa2_dqrr_size;
953                 } else {
954                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
955                         next_pull = 0;
956                 }
957
958                 while (1) {
959                         if (qbman_swp_pull(swp, &pulldesc)) {
960                                 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
961                                 /* Portal was busy, try again */
962                                 continue;
963                         }
964                         break;
965                 }
966
967                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
968                 /* Check if the previous issued command is completed. */
969                 while (!qbman_check_command_complete(dq_storage))
970                         ;
971
972                 num_pulled = 0;
973                 pending = 1;
974
975                 do {
976                         /* Loop until dq_storage is updated
977                          * with new token by QBMAN
978                          */
979                         while (!qbman_check_new_result(dq_storage))
980                                 ;
981                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
982
983                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
984                                 pending = 0;
985                                 /* Check for valid frame. */
986                                 status = qbman_result_DQ_flags(dq_storage);
987                                 if (unlikely((status &
988                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
989                                         continue;
990                         }
991                         fd = qbman_result_DQ_fd(dq_storage);
992
993                         vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
994                         if (vq_id)
995                                 vq_id[num_rx] = vqid;
996
997                         dq_storage++;
998                         num_rx++;
999                         num_pulled++;
1000
1001                 } while (pending);
1002         /* Last VDQ provided all packets and more packets are requested */
1003         } while (next_pull && num_pulled == dpaa2_dqrr_size);
1004
1005         return num_rx;
1006 }
1007
1008 int
1009 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
1010                           struct rte_qdma_job **job,
1011                           uint16_t nb_jobs)
1012 {
1013         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1014         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
1015         struct qdma_virt_queue *temp_qdma_vq;
1016         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
1017         int ring_count, ret = 0, i;
1018
1019         /* Return error in case of wrong lcore_id */
1020         if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
1021                 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
1022                                 vq_id);
1023                 return -1;
1024         }
1025
1026         /* Only dequeue when there are pending jobs on VQ */
1027         if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1028                 return 0;
1029
1030         if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1031                 nb_jobs = (qdma_vq->num_enqueues -  qdma_vq->num_dequeues);
1032
1033         if (qdma_vq->exclusive_hw_queue) {
1034                 /* In case of exclusive queue directly fetch from HW queue */
1035                 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
1036                                          NULL, job, nb_jobs);
1037                 if (ret < 0) {
1038                         DPAA2_QDMA_ERR(
1039                                 "Dequeue from DPDMAI device failed: %d", ret);
1040                         return ret;
1041                 }
1042                 qdma_vq->num_dequeues += ret;
1043         } else {
1044                 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
1045                 /*
1046                  * Get the QDMA completed jobs from the software ring.
1047                  * In case they are not available on the ring poke the HW
1048                  * to fetch completed jobs from corresponding HW queues
1049                  */
1050                 ring_count = rte_ring_count(qdma_vq->status_ring);
1051                 if (ring_count < nb_jobs) {
1052                         /* TODO - How to have right budget */
1053                         ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
1054                                         qdma_pq->queue_id,
1055                                         temp_vq_id, job, nb_jobs);
1056                         for (i = 0; i < ret; i++) {
1057                                 temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
1058                                 rte_ring_enqueue(temp_qdma_vq->status_ring,
1059                                         (void *)(job[i]));
1060                         }
1061                         ring_count = rte_ring_count(
1062                                         qdma_vq->status_ring);
1063                 }
1064
1065                 if (ring_count) {
1066                         /* Dequeue job from the software ring
1067                          * to provide to the user
1068                          */
1069                         ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1070                                         (void **)job, ring_count, NULL);
1071                         if (ret)
1072                                 qdma_vq->num_dequeues += ret;
1073                 }
1074         }
1075
1076         return ret;
1077 }
1078
1079 struct rte_qdma_job *
1080 rte_qdma_vq_dequeue(uint16_t vq_id)
1081 {
1082         int ret;
1083         struct rte_qdma_job *job = NULL;
1084
1085         ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
1086         if (ret < 0)
1087                 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
1088
1089         return job;
1090 }
1091
1092 void
1093 rte_qdma_vq_stats(uint16_t vq_id,
1094                   struct rte_qdma_vq_stats *vq_status)
1095 {
1096         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1097
1098         if (qdma_vq->in_use) {
1099                 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
1100                 vq_status->lcore_id = qdma_vq->lcore_id;
1101                 vq_status->num_enqueues = qdma_vq->num_enqueues;
1102                 vq_status->num_dequeues = qdma_vq->num_dequeues;
1103                 vq_status->num_pending_jobs = vq_status->num_enqueues -
1104                                 vq_status->num_dequeues;
1105         }
1106 }
1107
1108 int
1109 rte_qdma_vq_destroy(uint16_t vq_id)
1110 {
1111         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1112
1113         DPAA2_QDMA_FUNC_TRACE();
1114
1115         /* In case there are pending jobs on any VQ, return -EBUSY */
1116         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1117                 return -EBUSY;
1118
1119         rte_spinlock_lock(&qdma_dev.lock);
1120
1121         if (qdma_vq->exclusive_hw_queue)
1122                 free_hw_queue(qdma_vq->hw_queue);
1123         else {
1124                 if (qdma_vqs->status_ring)
1125                         rte_ring_free(qdma_vqs->status_ring);
1126
1127                 put_hw_queue(qdma_vq->hw_queue);
1128         }
1129
1130         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1131
1132         rte_spinlock_unlock(&qdma_dev.lock);
1133
1134         return 0;
1135 }
1136
1137 int
1138 rte_qdma_vq_destroy_rbp(uint16_t vq_id)
1139 {
1140         struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1141
1142         DPAA2_QDMA_FUNC_TRACE();
1143
1144         /* In case there are pending jobs on any VQ, return -EBUSY */
1145         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1146                 return -EBUSY;
1147
1148         rte_spinlock_lock(&qdma_dev.lock);
1149
1150         if (qdma_vq->exclusive_hw_queue) {
1151                 free_hw_queue(qdma_vq->hw_queue);
1152         } else {
1153                 if (qdma_vqs->status_ring)
1154                         rte_ring_free(qdma_vqs->status_ring);
1155
1156                 put_hw_queue(qdma_vq->hw_queue);
1157         }
1158
1159         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1160
1161         rte_spinlock_unlock(&qdma_dev.lock);
1162
1163         return 0;
1164 }
1165
1166 void
1167 rte_qdma_stop(void)
1168 {
1169         DPAA2_QDMA_FUNC_TRACE();
1170
1171         qdma_dev.state = 0;
1172 }
1173
1174 void
1175 rte_qdma_destroy(void)
1176 {
1177         DPAA2_QDMA_FUNC_TRACE();
1178
1179         rte_qdma_reset();
1180 }
1181
1182 static const struct rte_rawdev_ops dpaa2_qdma_ops;
1183
1184 static int
1185 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1186 {
1187         struct qdma_hw_queue *queue;
1188         int i;
1189
1190         DPAA2_QDMA_FUNC_TRACE();
1191
1192         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1193                 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1194                 if (!queue) {
1195                         DPAA2_QDMA_ERR(
1196                                 "Memory allocation failed for QDMA queue");
1197                         return -ENOMEM;
1198                 }
1199
1200                 queue->dpdmai_dev = dpdmai_dev;
1201                 queue->queue_id = i;
1202
1203                 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1204                 qdma_dev.num_hw_queues++;
1205         }
1206
1207         return 0;
1208 }
1209
1210 static void
1211 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1212 {
1213         struct qdma_hw_queue *queue = NULL;
1214         struct qdma_hw_queue *tqueue = NULL;
1215
1216         DPAA2_QDMA_FUNC_TRACE();
1217
1218         TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1219                 if (queue->dpdmai_dev == dpdmai_dev) {
1220                         TAILQ_REMOVE(&qdma_queue_list, queue, next);
1221                         rte_free(queue);
1222                         queue = NULL;
1223                 }
1224         }
1225 }
1226
1227 static int
1228 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1229 {
1230         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1231         int ret, i;
1232
1233         DPAA2_QDMA_FUNC_TRACE();
1234
1235         /* Remove HW queues from global list */
1236         remove_hw_queues_from_list(dpdmai_dev);
1237
1238         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1239                              dpdmai_dev->token);
1240         if (ret)
1241                 DPAA2_QDMA_ERR("dmdmai disable failed");
1242
1243         /* Set up the DQRR storage for Rx */
1244         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1245                 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1246
1247                 if (rxq->q_storage) {
1248                         dpaa2_free_dq_storage(rxq->q_storage);
1249                         rte_free(rxq->q_storage);
1250                 }
1251         }
1252
1253         /* Close the device at underlying layer*/
1254         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1255         if (ret)
1256                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1257
1258         return 0;
1259 }
1260
1261 static int
1262 check_devargs_handler(__rte_unused const char *key, const char *value,
1263                       __rte_unused void *opaque)
1264 {
1265         if (strcmp(value, "1"))
1266                 return -1;
1267
1268         return 0;
1269 }
1270
1271 static int
1272 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
1273 {
1274         struct rte_kvargs *kvlist;
1275
1276         if (!devargs)
1277                 return 0;
1278
1279         kvlist = rte_kvargs_parse(devargs->args, NULL);
1280         if (!kvlist)
1281                 return 0;
1282
1283         if (!rte_kvargs_count(kvlist, key)) {
1284                 rte_kvargs_free(kvlist);
1285                 return 0;
1286         }
1287
1288         if (rte_kvargs_process(kvlist, key,
1289                                check_devargs_handler, NULL) < 0) {
1290                 rte_kvargs_free(kvlist);
1291                 return 0;
1292         }
1293         rte_kvargs_free(kvlist);
1294
1295         return 1;
1296 }
1297
1298 static int
1299 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1300 {
1301         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1302         struct dpdmai_rx_queue_cfg rx_queue_cfg;
1303         struct dpdmai_attr attr;
1304         struct dpdmai_rx_queue_attr rx_attr;
1305         struct dpdmai_tx_queue_attr tx_attr;
1306         int ret, i;
1307
1308         DPAA2_QDMA_FUNC_TRACE();
1309
1310         /* Open DPDMAI device */
1311         dpdmai_dev->dpdmai_id = dpdmai_id;
1312         dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
1313         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1314                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1315         if (ret) {
1316                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1317                 return ret;
1318         }
1319
1320         /* Get DPDMAI attributes */
1321         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1322                                     dpdmai_dev->token, &attr);
1323         if (ret) {
1324                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1325                                ret);
1326                 goto init_err;
1327         }
1328         dpdmai_dev->num_queues = attr.num_of_queues;
1329
1330         /* Set up Rx Queues */
1331         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1332                 struct dpaa2_queue *rxq;
1333
1334                 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1335                 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1336                                           CMD_PRI_LOW,
1337                                           dpdmai_dev->token,
1338                                           i, 0, &rx_queue_cfg);
1339                 if (ret) {
1340                         DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1341                                        ret);
1342                         goto init_err;
1343                 }
1344
1345                 /* Allocate DQ storage for the DPDMAI Rx queues */
1346                 rxq = &(dpdmai_dev->rx_queue[i]);
1347                 rxq->q_storage = rte_malloc("dq_storage",
1348                                             sizeof(struct queue_storage_info_t),
1349                                             RTE_CACHE_LINE_SIZE);
1350                 if (!rxq->q_storage) {
1351                         DPAA2_QDMA_ERR("q_storage allocation failed");
1352                         ret = -ENOMEM;
1353                         goto init_err;
1354                 }
1355
1356                 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1357                 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1358                 if (ret) {
1359                         DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1360                         goto init_err;
1361                 }
1362         }
1363
1364         /* Get Rx and Tx queues FQID's */
1365         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1366                 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1367                                           dpdmai_dev->token, i, 0, &rx_attr);
1368                 if (ret) {
1369                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
1370                                        ret);
1371                         goto init_err;
1372                 }
1373                 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1374
1375                 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1376                                           dpdmai_dev->token, i, 0, &tx_attr);
1377                 if (ret) {
1378                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
1379                                        ret);
1380                         goto init_err;
1381                 }
1382                 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1383         }
1384
1385         /* Enable the device */
1386         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1387                             dpdmai_dev->token);
1388         if (ret) {
1389                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1390                 goto init_err;
1391         }
1392
1393         /* Add the HW queue to the global list */
1394         ret = add_hw_queues_to_list(dpdmai_dev);
1395         if (ret) {
1396                 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1397                 goto init_err;
1398         }
1399
1400         if (dpaa2_get_devargs(rawdev->device->devargs,
1401                 DPAA2_QDMA_NO_PREFETCH)) {
1402                 /* If no prefetch is configured. */
1403                 dpdmai_dev_dequeue_multijob =
1404                                 dpdmai_dev_dequeue_multijob_no_prefetch;
1405                 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1406         } else {
1407                 dpdmai_dev_dequeue_multijob =
1408                         dpdmai_dev_dequeue_multijob_prefetch;
1409         }
1410
1411         if (!dpaa2_coherent_no_alloc_cache) {
1412                 if (dpaa2_svr_family == SVR_LX2160A) {
1413                         dpaa2_coherent_no_alloc_cache =
1414                                 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1415                         dpaa2_coherent_alloc_cache =
1416                                 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1417                 } else {
1418                         dpaa2_coherent_no_alloc_cache =
1419                                 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1420                         dpaa2_coherent_alloc_cache =
1421                                 DPAA2_COHERENT_ALLOCATE_CACHE;
1422                 }
1423         }
1424
1425         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1426
1427         return 0;
1428 init_err:
1429         dpaa2_dpdmai_dev_uninit(rawdev);
1430         return ret;
1431 }
1432
1433 static int
1434 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1435                      struct rte_dpaa2_device *dpaa2_dev)
1436 {
1437         struct rte_rawdev *rawdev;
1438         int ret;
1439
1440         DPAA2_QDMA_FUNC_TRACE();
1441
1442         rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1443                         sizeof(struct dpaa2_dpdmai_dev),
1444                         rte_socket_id());
1445         if (!rawdev) {
1446                 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1447                 return -EINVAL;
1448         }
1449
1450         dpaa2_dev->rawdev = rawdev;
1451         rawdev->dev_ops = &dpaa2_qdma_ops;
1452         rawdev->device = &dpaa2_dev->device;
1453         rawdev->driver_name = dpaa2_drv->driver.name;
1454
1455         /* Invoke PMD device initialization function */
1456         ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1457         if (ret) {
1458                 rte_rawdev_pmd_release(rawdev);
1459                 return ret;
1460         }
1461
1462         return 0;
1463 }
1464
1465 static int
1466 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1467 {
1468         struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1469         int ret;
1470
1471         DPAA2_QDMA_FUNC_TRACE();
1472
1473         dpaa2_dpdmai_dev_uninit(rawdev);
1474
1475         ret = rte_rawdev_pmd_release(rawdev);
1476         if (ret)
1477                 DPAA2_QDMA_ERR("Device cleanup failed");
1478
1479         return 0;
1480 }
1481
1482 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1483         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1484         .drv_type = DPAA2_QDMA,
1485         .probe = rte_dpaa2_qdma_probe,
1486         .remove = rte_dpaa2_qdma_remove,
1487 };
1488
1489 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1490 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1491         "no_prefetch=<int> ");
1492
1493 RTE_INIT(dpaa2_qdma_init_log)
1494 {
1495         dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
1496         if (dpaa2_qdma_logtype >= 0)
1497                 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
1498 }