6eef7a57ab4782a564c32a49a68dd361f01aadb4
[dpdk.git] / drivers / raw / dpaa2_qdma / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <string.h>
6
7 #include <rte_eal.h>
8 #include <rte_fslmc.h>
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
14 #include <rte_ring.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
18
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
22
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
26
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
28
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype;
31
32 uint32_t dpaa2_coherent_no_alloc_cache;
33 uint32_t dpaa2_coherent_alloc_cache;
34
35 /* QDMA device */
36 static struct qdma_device q_dev;
37
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
40 static struct qdma_hw_queue_list qdma_queue_list
41         = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
42
43 /* QDMA per core data */
44 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
45
46 static inline int
47 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
48                         uint32_t len, struct qbman_fd *fd,
49                         struct rte_qdma_rbp *rbp)
50 {
51         fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
52         fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
53
54         fd->simple_pci.len_sl = len;
55
56         fd->simple_pci.bmt = 1;
57         fd->simple_pci.fmt = 3;
58         fd->simple_pci.sl = 1;
59         fd->simple_pci.ser = 1;
60
61         fd->simple_pci.sportid = rbp->sportid;  /*pcie 3 */
62         fd->simple_pci.srbp = rbp->srbp;
63         if (rbp->srbp)
64                 fd->simple_pci.rdttype = 0;
65         else
66                 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
67
68         /*dest is pcie memory */
69         fd->simple_pci.dportid = rbp->dportid;  /*pcie 3 */
70         fd->simple_pci.drbp = rbp->drbp;
71         if (rbp->drbp)
72                 fd->simple_pci.wrttype = 0;
73         else
74                 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
75
76         fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
77         fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
78
79         return 0;
80 }
81
82 static inline int
83 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
84                         uint32_t len, struct qbman_fd *fd)
85 {
86         fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
87         fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
88
89         fd->simple_ddr.len = len;
90
91         fd->simple_ddr.bmt = 1;
92         fd->simple_ddr.fmt = 3;
93         fd->simple_ddr.sl = 1;
94         fd->simple_ddr.ser = 1;
95         /**
96          * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
97          * Coherent copy of cacheable memory,
98         * lookup in downstream cache, no allocate
99          * on miss
100          */
101         fd->simple_ddr.rns = 0;
102         fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
103         /**
104          * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
105          * Coherent write of cacheable memory,
106          * lookup in downstream cache, no allocate on miss
107          */
108         fd->simple_ddr.wns = 0;
109         fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
110
111         fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
112         fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
113
114         return 0;
115 }
116
117 static void
118 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
119                         struct rte_qdma_rbp *rbp,
120                         uint64_t src, uint64_t dest,
121                         size_t len, uint32_t flags)
122 {
123         struct qdma_sdd *sdd;
124
125         sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
126                 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
127
128         /* first frame list to source descriptor */
129         DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
130         DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
131
132         /* source and destination descriptor */
133         if (rbp && rbp->enable) {
134                 /* source */
135                 sdd->read_cmd.portid = rbp->sportid;
136                 sdd->rbpcmd_simple.pfid = rbp->spfid;
137                 sdd->rbpcmd_simple.vfid = rbp->svfid;
138
139                 if (rbp->srbp) {
140                         sdd->read_cmd.rbp = rbp->srbp;
141                         sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
142                 } else {
143                         sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
144                 }
145                 sdd++;
146                 /* destination */
147                 sdd->write_cmd.portid = rbp->dportid;
148                 sdd->rbpcmd_simple.pfid = rbp->dpfid;
149                 sdd->rbpcmd_simple.vfid = rbp->dvfid;
150
151                 if (rbp->drbp) {
152                         sdd->write_cmd.rbp = rbp->drbp;
153                         sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
154                 } else {
155                         sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
156                 }
157
158         } else {
159                 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
160                 sdd++;
161                 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
162         }
163         fle++;
164         /* source frame list to source buffer */
165         if (flags & RTE_QDMA_JOB_SRC_PHY) {
166                 DPAA2_SET_FLE_ADDR(fle, src);
167                 DPAA2_SET_FLE_BMT(fle);
168         } else {
169                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
170         }
171         DPAA2_SET_FLE_LEN(fle, len);
172
173         fle++;
174         /* destination frame list to destination buffer */
175         if (flags & RTE_QDMA_JOB_DEST_PHY) {
176                 DPAA2_SET_FLE_BMT(fle);
177                 DPAA2_SET_FLE_ADDR(fle, dest);
178         } else {
179                 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
180         }
181         DPAA2_SET_FLE_LEN(fle, len);
182
183         /* Final bit: 1, for last frame list */
184         DPAA2_SET_FLE_FIN(fle);
185 }
186
187 static inline int dpdmai_dev_set_fd_us(
188                 struct qdma_virt_queue *qdma_vq,
189                 struct qbman_fd *fd,
190                 struct rte_qdma_job *job)
191 {
192         struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
193         struct rte_qdma_job **ppjob;
194         size_t iova;
195         int ret = 0;
196
197         if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
198                 iova = (size_t)job->dest;
199         else
200                 iova = (size_t)job->src;
201
202         /* Set the metadata */
203         job->vq_id = qdma_vq->vq_id;
204         ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
205         *ppjob = job;
206
207         if ((rbp->drbp == 1) || (rbp->srbp == 1))
208                 ret = qdma_populate_fd_pci((phys_addr_t) job->src,
209                                            (phys_addr_t) job->dest,
210                                            job->len, fd, rbp);
211         else
212                 ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
213                                            (phys_addr_t) job->dest,
214                                            job->len, fd);
215         return ret;
216 }
217 static inline int dpdmai_dev_set_fd_lf(
218                 struct qdma_virt_queue *qdma_vq,
219                 struct qbman_fd *fd,
220                 struct rte_qdma_job *job)
221 {
222         struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
223         struct rte_qdma_job **ppjob;
224         struct qbman_fle *fle;
225         int ret = 0;
226         struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);
227
228         /*
229          * Get an FLE/SDD from FLE pool.
230          * Note: IO metadata is before the FLE and SDD memory.
231          */
232         ret = rte_mempool_get(qdma_dev->fle_pool, (void **)(&ppjob));
233         if (ret) {
234                 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
235                 return ret;
236         }
237
238         /* Set the metadata */
239         job->vq_id = qdma_vq->vq_id;
240         *ppjob = job;
241
242         fle = (struct qbman_fle *)(ppjob + 1);
243
244         DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
245         DPAA2_SET_FD_COMPOUND_FMT(fd);
246         DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
247
248         /* Populate FLE */
249         memset(fle, 0, QDMA_FLE_POOL_SIZE);
250         dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
251                                 job->len, job->flags);
252
253         return 0;
254 }
255
256 static inline uint16_t dpdmai_dev_get_job_us(
257                                 struct qdma_virt_queue *qdma_vq __rte_unused,
258                                 const struct qbman_fd *fd,
259                                 struct rte_qdma_job **job)
260 {
261         uint16_t vqid;
262         size_t iova;
263         struct rte_qdma_job **ppjob;
264
265         if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
266                 iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
267                                 | (uint64_t)fd->simple_pci.daddr_lo);
268         else
269                 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
270                                 | (uint64_t)fd->simple_pci.saddr_lo);
271
272         ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
273         *job = (struct rte_qdma_job *)*ppjob;
274         (*job)->status = (fd->simple_pci.acc_err << 8) |
275                                         (fd->simple_pci.error);
276         vqid = (*job)->vq_id;
277
278         return vqid;
279 }
280
281 static inline uint16_t dpdmai_dev_get_job_lf(
282                                                 struct qdma_virt_queue *qdma_vq,
283                                                 const struct qbman_fd *fd,
284                                                 struct rte_qdma_job **job)
285 {
286         struct rte_qdma_job **ppjob;
287         uint16_t vqid;
288         struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);
289
290         /*
291          * Fetch metadata from FLE. job and vq_id were set
292          * in metadata in the enqueue operation.
293          */
294         ppjob = (struct rte_qdma_job **)
295                         DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
296         ppjob -= 1;
297
298         *job = (struct rte_qdma_job *)*ppjob;
299         (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
300                          (DPAA2_GET_FD_FRC(fd) & 0xFF);
301         vqid = (*job)->vq_id;
302
303         /* Free FLE to the pool */
304         rte_mempool_put(qdma_dev->fle_pool, (void *)ppjob);
305
306         return vqid;
307 }
308
309 /* Function to receive a QDMA job for a given device and queue*/
310 static int
311 dpdmai_dev_dequeue_multijob_prefetch(
312                         struct qdma_virt_queue *qdma_vq,
313                         uint16_t *vq_id,
314                         struct rte_qdma_job **job,
315                         uint16_t nb_jobs)
316 {
317         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
318         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
319         uint16_t rxq_id = qdma_pq->queue_id;
320
321         struct dpaa2_queue *rxq;
322         struct qbman_result *dq_storage, *dq_storage1 = NULL;
323         struct qbman_pull_desc pulldesc;
324         struct qbman_swp *swp;
325         struct queue_storage_info_t *q_storage;
326         uint32_t fqid;
327         uint8_t status, pending;
328         uint8_t num_rx = 0;
329         const struct qbman_fd *fd;
330         uint16_t vqid;
331         int ret, pull_size;
332
333         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
334                 ret = dpaa2_affine_qbman_swp();
335                 if (ret) {
336                         DPAA2_QDMA_ERR(
337                                 "Failed to allocate IO portal, tid: %d\n",
338                                 rte_gettid());
339                         return 0;
340                 }
341         }
342         swp = DPAA2_PER_LCORE_PORTAL;
343
344         pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
345         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
346         fqid = rxq->fqid;
347         q_storage = rxq->q_storage;
348
349         if (unlikely(!q_storage->active_dqs)) {
350                 q_storage->toggle = 0;
351                 dq_storage = q_storage->dq_storage[q_storage->toggle];
352                 q_storage->last_num_pkts = pull_size;
353                 qbman_pull_desc_clear(&pulldesc);
354                 qbman_pull_desc_set_numframes(&pulldesc,
355                                               q_storage->last_num_pkts);
356                 qbman_pull_desc_set_fq(&pulldesc, fqid);
357                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
358                                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
359                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
360                         while (!qbman_check_command_complete(
361                                 get_swp_active_dqs(
362                                 DPAA2_PER_LCORE_DPIO->index)))
363                                 ;
364                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
365                 }
366                 while (1) {
367                         if (qbman_swp_pull(swp, &pulldesc)) {
368                                 DPAA2_QDMA_DP_WARN(
369                                         "VDQ command not issued.QBMAN busy\n");
370                                         /* Portal was busy, try again */
371                                 continue;
372                         }
373                         break;
374                 }
375                 q_storage->active_dqs = dq_storage;
376                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
377                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
378                                    dq_storage);
379         }
380
381         dq_storage = q_storage->active_dqs;
382         rte_prefetch0((void *)(size_t)(dq_storage));
383         rte_prefetch0((void *)(size_t)(dq_storage + 1));
384
385         /* Prepare next pull descriptor. This will give space for the
386          * prefething done on DQRR entries
387          */
388         q_storage->toggle ^= 1;
389         dq_storage1 = q_storage->dq_storage[q_storage->toggle];
390         qbman_pull_desc_clear(&pulldesc);
391         qbman_pull_desc_set_numframes(&pulldesc, pull_size);
392         qbman_pull_desc_set_fq(&pulldesc, fqid);
393         qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
394                 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
395
396         /* Check if the previous issued command is completed.
397          * Also seems like the SWP is shared between the Ethernet Driver
398          * and the SEC driver.
399          */
400         while (!qbman_check_command_complete(dq_storage))
401                 ;
402         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
403                 clear_swp_active_dqs(q_storage->active_dpio_id);
404
405         pending = 1;
406
407         do {
408                 /* Loop until the dq_storage is updated with
409                  * new token by QBMAN
410                  */
411                 while (!qbman_check_new_result(dq_storage))
412                         ;
413                 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
414                 /* Check whether Last Pull command is Expired and
415                  * setting Condition for Loop termination
416                  */
417                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
418                         pending = 0;
419                         /* Check for valid frame. */
420                         status = qbman_result_DQ_flags(dq_storage);
421                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
422                                 continue;
423                 }
424                 fd = qbman_result_DQ_fd(dq_storage);
425
426                 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);
427                 if (vq_id)
428                         vq_id[num_rx] = vqid;
429
430                 dq_storage++;
431                 num_rx++;
432         } while (pending);
433
434         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
435                 while (!qbman_check_command_complete(
436                         get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
437                         ;
438                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
439         }
440         /* issue a volatile dequeue command for next pull */
441         while (1) {
442                 if (qbman_swp_pull(swp, &pulldesc)) {
443                         DPAA2_QDMA_DP_WARN(
444                                 "VDQ command is not issued. QBMAN is busy (2)\n");
445                         continue;
446                 }
447                 break;
448         }
449
450         q_storage->active_dqs = dq_storage1;
451         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
452         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
453
454         return num_rx;
455 }
456
457 static int
458 dpdmai_dev_dequeue_multijob_no_prefetch(
459                 struct qdma_virt_queue *qdma_vq,
460                 uint16_t *vq_id,
461                 struct rte_qdma_job **job,
462                 uint16_t nb_jobs)
463 {
464         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
465         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
466         uint16_t rxq_id = qdma_pq->queue_id;
467
468         struct dpaa2_queue *rxq;
469         struct qbman_result *dq_storage;
470         struct qbman_pull_desc pulldesc;
471         struct qbman_swp *swp;
472         uint32_t fqid;
473         uint8_t status, pending;
474         uint8_t num_rx = 0;
475         const struct qbman_fd *fd;
476         uint16_t vqid;
477         int ret, next_pull = nb_jobs, num_pulled = 0;
478
479         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
480                 ret = dpaa2_affine_qbman_swp();
481                 if (ret) {
482                         DPAA2_QDMA_ERR(
483                                 "Failed to allocate IO portal, tid: %d\n",
484                                 rte_gettid());
485                         return 0;
486                 }
487         }
488         swp = DPAA2_PER_LCORE_PORTAL;
489
490         rxq = &(dpdmai_dev->rx_queue[rxq_id]);
491         fqid = rxq->fqid;
492
493         do {
494                 dq_storage = rxq->q_storage->dq_storage[0];
495                 /* Prepare dequeue descriptor */
496                 qbman_pull_desc_clear(&pulldesc);
497                 qbman_pull_desc_set_fq(&pulldesc, fqid);
498                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
499                         (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
500
501                 if (next_pull > dpaa2_dqrr_size) {
502                         qbman_pull_desc_set_numframes(&pulldesc,
503                                         dpaa2_dqrr_size);
504                         next_pull -= dpaa2_dqrr_size;
505                 } else {
506                         qbman_pull_desc_set_numframes(&pulldesc, next_pull);
507                         next_pull = 0;
508                 }
509
510                 while (1) {
511                         if (qbman_swp_pull(swp, &pulldesc)) {
512                                 DPAA2_QDMA_DP_WARN(
513                                         "VDQ command not issued. QBMAN busy");
514                                 /* Portal was busy, try again */
515                                 continue;
516                         }
517                         break;
518                 }
519
520                 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
521                 /* Check if the previous issued command is completed. */
522                 while (!qbman_check_command_complete(dq_storage))
523                         ;
524
525                 num_pulled = 0;
526                 pending = 1;
527
528                 do {
529                         /* Loop until dq_storage is updated
530                          * with new token by QBMAN
531                          */
532                         while (!qbman_check_new_result(dq_storage))
533                                 ;
534                         rte_prefetch0((void *)((size_t)(dq_storage + 2)));
535
536                         if (qbman_result_DQ_is_pull_complete(dq_storage)) {
537                                 pending = 0;
538                                 /* Check for valid frame. */
539                                 status = qbman_result_DQ_flags(dq_storage);
540                                 if (unlikely((status &
541                                         QBMAN_DQ_STAT_VALIDFRAME) == 0))
542                                         continue;
543                         }
544                         fd = qbman_result_DQ_fd(dq_storage);
545
546                         vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);
547                         if (vq_id)
548                                 vq_id[num_rx] = vqid;
549
550                         dq_storage++;
551                         num_rx++;
552                         num_pulled++;
553
554                 } while (pending);
555         /* Last VDQ provided all packets and more packets are requested */
556         } while (next_pull && num_pulled == dpaa2_dqrr_size);
557
558         return num_rx;
559 }
560
561 static int
562 dpdmai_dev_enqueue_multi(
563                         struct qdma_virt_queue *qdma_vq,
564                         struct rte_qdma_job **job,
565                         uint16_t nb_jobs)
566 {
567         struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
568         struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
569         uint16_t txq_id = qdma_pq->queue_id;
570
571         struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
572         struct dpaa2_queue *txq;
573         struct qbman_eq_desc eqdesc;
574         struct qbman_swp *swp;
575         int ret;
576         uint32_t num_to_send = 0;
577         uint16_t num_tx = 0;
578
579         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
580                 ret = dpaa2_affine_qbman_swp();
581                 if (ret) {
582                         DPAA2_QDMA_ERR(
583                                 "Failed to allocate IO portal, tid: %d\n",
584                                 rte_gettid());
585                         return 0;
586                 }
587         }
588         swp = DPAA2_PER_LCORE_PORTAL;
589
590         txq = &(dpdmai_dev->tx_queue[txq_id]);
591
592         /* Prepare enqueue descriptor */
593         qbman_eq_desc_clear(&eqdesc);
594         qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
595         qbman_eq_desc_set_no_orp(&eqdesc, 0);
596         qbman_eq_desc_set_response(&eqdesc, 0, 0);
597
598         memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
599
600         while (nb_jobs > 0) {
601                 uint32_t loop;
602
603                 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
604                         dpaa2_eqcr_size : nb_jobs;
605
606                 for (loop = 0; loop < num_to_send; loop++) {
607                         ret = qdma_vq->set_fd(qdma_vq, &fd[loop], job[num_tx]);
608                         if (ret < 0) {
609                                 /* Set nb_jobs to loop, so outer while loop
610                                  * breaks out.
611                                  */
612                                 nb_jobs = loop;
613                                 break;
614                         }
615
616                         num_tx++;
617                 }
618
619                 /* Enqueue the packet to the QBMAN */
620                 uint32_t enqueue_loop = 0, retry_count = 0;
621
622                 while (enqueue_loop < loop) {
623                         ret = qbman_swp_enqueue_multiple(swp,
624                                                 &eqdesc,
625                                                 &fd[enqueue_loop],
626                                                 NULL,
627                                                 loop - enqueue_loop);
628                         if (unlikely(ret < 0)) {
629                                 retry_count++;
630                                 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
631                                         return num_tx - (loop - enqueue_loop);
632                         } else {
633                                 enqueue_loop += ret;
634                                 retry_count = 0;
635                         }
636                 }
637                 nb_jobs -= loop;
638         }
639         return num_tx;
640 }
641
642 static struct qdma_hw_queue *
643 alloc_hw_queue(uint32_t lcore_id)
644 {
645         struct qdma_hw_queue *queue = NULL;
646
647         DPAA2_QDMA_FUNC_TRACE();
648
649         /* Get a free queue from the list */
650         TAILQ_FOREACH(queue, &qdma_queue_list, next) {
651                 if (queue->num_users == 0) {
652                         queue->lcore_id = lcore_id;
653                         queue->num_users++;
654                         break;
655                 }
656         }
657
658         return queue;
659 }
660
661 static void
662 free_hw_queue(struct qdma_hw_queue *queue)
663 {
664         DPAA2_QDMA_FUNC_TRACE();
665
666         queue->num_users--;
667 }
668
669
670 static struct qdma_hw_queue *
671 get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)
672 {
673         struct qdma_per_core_info *core_info;
674         struct qdma_hw_queue *queue, *temp;
675         uint32_t least_num_users;
676         int num_hw_queues, i;
677
678         DPAA2_QDMA_FUNC_TRACE();
679
680         core_info = &qdma_core_info[lcore_id];
681         num_hw_queues = core_info->num_hw_queues;
682
683         /*
684          * Allocate a HW queue if there are less queues
685          * than maximum per core queues configured
686          */
687         if (num_hw_queues < qdma_dev->max_hw_queues_per_core) {
688                 queue = alloc_hw_queue(lcore_id);
689                 if (queue) {
690                         core_info->hw_queues[num_hw_queues] = queue;
691                         core_info->num_hw_queues++;
692                         return queue;
693                 }
694         }
695
696         queue = core_info->hw_queues[0];
697         /* In case there is no queue associated with the core return NULL */
698         if (!queue)
699                 return NULL;
700
701         /* Fetch the least loaded H/W queue */
702         least_num_users = core_info->hw_queues[0]->num_users;
703         for (i = 0; i < num_hw_queues; i++) {
704                 temp = core_info->hw_queues[i];
705                 if (temp->num_users < least_num_users)
706                         queue = temp;
707         }
708
709         if (queue)
710                 queue->num_users++;
711
712         return queue;
713 }
714
715 static void
716 put_hw_queue(struct qdma_hw_queue *queue)
717 {
718         struct qdma_per_core_info *core_info;
719         int lcore_id, num_hw_queues, i;
720
721         DPAA2_QDMA_FUNC_TRACE();
722
723         /*
724          * If this is the last user of the queue free it.
725          * Also remove it from QDMA core info.
726          */
727         if (queue->num_users == 1) {
728                 free_hw_queue(queue);
729
730                 /* Remove the physical queue from core info */
731                 lcore_id = queue->lcore_id;
732                 core_info = &qdma_core_info[lcore_id];
733                 num_hw_queues = core_info->num_hw_queues;
734                 for (i = 0; i < num_hw_queues; i++) {
735                         if (queue == core_info->hw_queues[i])
736                                 break;
737                 }
738                 for (; i < num_hw_queues - 1; i++)
739                         core_info->hw_queues[i] = core_info->hw_queues[i + 1];
740                 core_info->hw_queues[i] = NULL;
741         } else {
742                 queue->num_users--;
743         }
744 }
745
746 static int
747 dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,
748                     __rte_unused const char *attr_name,
749                     uint64_t *attr_value)
750 {
751         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
752         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
753         struct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;
754
755         DPAA2_QDMA_FUNC_TRACE();
756
757         qdma_attr->num_hw_queues = qdma_dev->num_hw_queues;
758
759         return 0;
760 }
761
762 static int
763 dpaa2_qdma_reset(struct rte_rawdev *rawdev)
764 {
765         struct qdma_hw_queue *queue;
766         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
767         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
768         int i;
769
770         DPAA2_QDMA_FUNC_TRACE();
771
772         /* In case QDMA device is not in stopped state, return -EBUSY */
773         if (qdma_dev->state == 1) {
774                 DPAA2_QDMA_ERR(
775                         "Device is in running state. Stop before reset.");
776                 return -EBUSY;
777         }
778
779         /* In case there are pending jobs on any VQ, return -EBUSY */
780         for (i = 0; i < qdma_dev->max_vqs; i++) {
781                 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
782                     qdma_dev->vqs[i].num_dequeues)) {
783                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
784                         return -EBUSY;
785                 }
786         }
787
788         /* Reset HW queues */
789         TAILQ_FOREACH(queue, &qdma_queue_list, next)
790                 queue->num_users = 0;
791
792         /* Reset and free virtual queues */
793         for (i = 0; i < qdma_dev->max_vqs; i++) {
794                 if (qdma_dev->vqs[i].status_ring)
795                         rte_ring_free(qdma_dev->vqs[i].status_ring);
796         }
797         if (qdma_dev->vqs)
798                 rte_free(qdma_dev->vqs);
799         qdma_dev->vqs = NULL;
800
801         /* Reset per core info */
802         memset(&qdma_core_info, 0,
803                 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
804
805         /* Free the FLE pool */
806         if (qdma_dev->fle_pool)
807                 rte_mempool_free(qdma_dev->fle_pool);
808
809         /* Reset QDMA device structure */
810         qdma_dev->max_hw_queues_per_core = 0;
811         qdma_dev->fle_pool = NULL;
812         qdma_dev->fle_pool_count = 0;
813         qdma_dev->max_vqs = 0;
814
815         return 0;
816 }
817
818 static int
819 dpaa2_qdma_configure(const struct rte_rawdev *rawdev,
820                          rte_rawdev_obj_t config,
821                          size_t config_size)
822 {
823         char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
824         struct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;
825         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
826         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
827
828         DPAA2_QDMA_FUNC_TRACE();
829
830         if (config_size != sizeof(*qdma_config))
831                 return -EINVAL;
832
833         /* In case QDMA device is not in stopped state, return -EBUSY */
834         if (qdma_dev->state == 1) {
835                 DPAA2_QDMA_ERR(
836                         "Device is in running state. Stop before config.");
837                 return -1;
838         }
839
840         /* Set max HW queue per core */
841         if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
842                 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
843                                MAX_HW_QUEUE_PER_CORE);
844                 return -EINVAL;
845         }
846         qdma_dev->max_hw_queues_per_core =
847                 qdma_config->max_hw_queues_per_core;
848
849         /* Allocate Virtual Queues */
850         sprintf(name, "qdma_%d_vq", rawdev->dev_id);
851         qdma_dev->vqs = rte_malloc(name,
852                         (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
853                         RTE_CACHE_LINE_SIZE);
854         if (!qdma_dev->vqs) {
855                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
856                 return -ENOMEM;
857         }
858         qdma_dev->max_vqs = qdma_config->max_vqs;
859
860         /* Allocate FLE pool; just append PID so that in case of
861          * multiprocess, the pool's don't collide.
862          */
863         snprintf(name, sizeof(name), "qdma_fle_pool%u",
864                  getpid());
865         qdma_dev->fle_pool = rte_mempool_create(name,
866                         qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
867                         QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
868                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
869         if (!qdma_dev->fle_pool) {
870                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
871                 rte_free(qdma_dev->vqs);
872                 qdma_dev->vqs = NULL;
873                 return -ENOMEM;
874         }
875         qdma_dev->fle_pool_count = qdma_config->fle_pool_count;
876
877         return 0;
878 }
879
880 static int
881 dpaa2_qdma_start(struct rte_rawdev *rawdev)
882 {
883         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
884         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
885
886         DPAA2_QDMA_FUNC_TRACE();
887
888         qdma_dev->state = 1;
889
890         return 0;
891 }
892
893 static int
894 check_devargs_handler(__rte_unused const char *key, const char *value,
895                       __rte_unused void *opaque)
896 {
897         if (strcmp(value, "1"))
898                 return -1;
899
900         return 0;
901 }
902
903 static int
904 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
905 {
906         struct rte_kvargs *kvlist;
907
908         if (!devargs)
909                 return 0;
910
911         kvlist = rte_kvargs_parse(devargs->args, NULL);
912         if (!kvlist)
913                 return 0;
914
915         if (!rte_kvargs_count(kvlist, key)) {
916                 rte_kvargs_free(kvlist);
917                 return 0;
918         }
919
920         if (rte_kvargs_process(kvlist, key,
921                                check_devargs_handler, NULL) < 0) {
922                 rte_kvargs_free(kvlist);
923                 return 0;
924         }
925         rte_kvargs_free(kvlist);
926
927         return 1;
928 }
929
930 static int
931 dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,
932                           __rte_unused uint16_t queue_id,
933                           rte_rawdev_obj_t queue_conf,
934                           size_t conf_size)
935 {
936         char ring_name[32];
937         int i;
938         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
939         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
940         struct rte_qdma_queue_config *q_config =
941                 (struct rte_qdma_queue_config *)queue_conf;
942
943         DPAA2_QDMA_FUNC_TRACE();
944
945         if (conf_size != sizeof(*q_config))
946                 return -EINVAL;
947
948         rte_spinlock_lock(&qdma_dev->lock);
949
950         /* Get a free Virtual Queue */
951         for (i = 0; i < qdma_dev->max_vqs; i++) {
952                 if (qdma_dev->vqs[i].in_use == 0)
953                         break;
954         }
955
956         /* Return in case no VQ is free */
957         if (i == qdma_dev->max_vqs) {
958                 rte_spinlock_unlock(&qdma_dev->lock);
959                 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
960                 return -ENODEV;
961         }
962
963         if (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) {
964                 /* Allocate HW queue for a VQ */
965                 qdma_dev->vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);
966                 qdma_dev->vqs[i].exclusive_hw_queue = 1;
967         } else {
968                 /* Allocate a Ring for Virtual Queue in VQ mode */
969                 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
970                 qdma_dev->vqs[i].status_ring = rte_ring_create(ring_name,
971                         qdma_dev->fle_pool_count, rte_socket_id(), 0);
972                 if (!qdma_dev->vqs[i].status_ring) {
973                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
974                         rte_spinlock_unlock(&qdma_dev->lock);
975                         return rte_errno;
976                 }
977
978                 /* Get a HW queue (shared) for a VQ */
979                 qdma_dev->vqs[i].hw_queue = get_hw_queue(qdma_dev,
980                                                     q_config->lcore_id);
981                 qdma_dev->vqs[i].exclusive_hw_queue = 0;
982         }
983
984         if (qdma_dev->vqs[i].hw_queue == NULL) {
985                 DPAA2_QDMA_ERR("No H/W queue available for VQ");
986                 if (qdma_dev->vqs[i].status_ring)
987                         rte_ring_free(qdma_dev->vqs[i].status_ring);
988                 qdma_dev->vqs[i].status_ring = NULL;
989                 rte_spinlock_unlock(&qdma_dev->lock);
990                 return -ENODEV;
991         }
992
993         qdma_dev->vqs[i].in_use = 1;
994         qdma_dev->vqs[i].lcore_id = q_config->lcore_id;
995         memset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
996
997         if (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) {
998                 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_lf;
999                 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_lf;
1000         } else {
1001                 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us;
1002                 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_us;
1003         }
1004         if (dpaa2_get_devargs(rawdev->device->devargs,
1005                         DPAA2_QDMA_NO_PREFETCH) ||
1006                         (getenv("DPAA2_NO_QDMA_PREFETCH_RX"))) {
1007                 /* If no prefetch is configured. */
1008                 qdma_dev->vqs[i].dequeue_job =
1009                                 dpdmai_dev_dequeue_multijob_no_prefetch;
1010                 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1011         } else {
1012                 qdma_dev->vqs[i].dequeue_job =
1013                         dpdmai_dev_dequeue_multijob_prefetch;
1014         }
1015
1016         qdma_dev->vqs[i].enqueue_job = dpdmai_dev_enqueue_multi;
1017
1018         if (q_config->rbp != NULL)
1019                 memcpy(&qdma_dev->vqs[i].rbp, q_config->rbp,
1020                                 sizeof(struct rte_qdma_rbp));
1021
1022         rte_spinlock_unlock(&qdma_dev->lock);
1023
1024         return i;
1025 }
1026
1027 static int
1028 dpaa2_qdma_enqueue(struct rte_rawdev *rawdev,
1029                   __rte_unused struct rte_rawdev_buf **buffers,
1030                   unsigned int nb_jobs,
1031                   rte_rawdev_obj_t context)
1032 {
1033         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1034         struct rte_qdma_enqdeq *e_context =
1035                 (struct rte_qdma_enqdeq *)context;
1036         struct qdma_virt_queue *qdma_vq =
1037                 &dpdmai_dev->qdma_dev->vqs[e_context->vq_id];
1038         int ret;
1039
1040         /* Return error in case of wrong lcore_id */
1041         if (rte_lcore_id() != qdma_vq->lcore_id) {
1042                 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
1043                                 e_context->vq_id);
1044                 return -EINVAL;
1045         }
1046
1047         ret = qdma_vq->enqueue_job(qdma_vq, e_context->job, nb_jobs);
1048         if (ret < 0) {
1049                 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
1050                 return ret;
1051         }
1052
1053         qdma_vq->num_enqueues += ret;
1054
1055         return ret;
1056 }
1057
1058 static int
1059 dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,
1060                    __rte_unused struct rte_rawdev_buf **buffers,
1061                    unsigned int nb_jobs,
1062                    rte_rawdev_obj_t cntxt)
1063 {
1064         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1065         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1066         struct rte_qdma_enqdeq *context =
1067                 (struct rte_qdma_enqdeq *)cntxt;
1068         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[context->vq_id];
1069         struct qdma_virt_queue *temp_qdma_vq;
1070         int ret = 0, i;
1071         unsigned int ring_count;
1072
1073         /* Return error in case of wrong lcore_id */
1074         if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
1075                 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
1076                                 context->vq_id);
1077                 return -1;
1078         }
1079
1080         /* Only dequeue when there are pending jobs on VQ */
1081         if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1082                 return 0;
1083
1084         if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1085                 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
1086
1087         if (qdma_vq->exclusive_hw_queue) {
1088                 /* In case of exclusive queue directly fetch from HW queue */
1089                 ret = qdma_vq->dequeue_job(qdma_vq, NULL,
1090                                         context->job, nb_jobs);
1091                 if (ret < 0) {
1092                         DPAA2_QDMA_ERR(
1093                                 "Dequeue from DPDMAI device failed: %d", ret);
1094                         return ret;
1095                 }
1096                 qdma_vq->num_dequeues += ret;
1097         } else {
1098                 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
1099                 /*
1100                  * Get the QDMA completed jobs from the software ring.
1101                  * In case they are not available on the ring poke the HW
1102                  * to fetch completed jobs from corresponding HW queues
1103                  */
1104                 ring_count = rte_ring_count(qdma_vq->status_ring);
1105                 if (ring_count < nb_jobs) {
1106                         /* TODO - How to have right budget */
1107                         ret = qdma_vq->dequeue_job(qdma_vq,
1108                                         temp_vq_id, context->job, nb_jobs);
1109                         for (i = 0; i < ret; i++) {
1110                                 temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
1111                                 rte_ring_enqueue(temp_qdma_vq->status_ring,
1112                                         (void *)(context->job[i]));
1113                         }
1114                         ring_count = rte_ring_count(
1115                                         qdma_vq->status_ring);
1116                 }
1117
1118                 if (ring_count) {
1119                         /* Dequeue job from the software ring
1120                          * to provide to the user
1121                          */
1122                         ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1123                                                     (void **)context->job,
1124                                                     ring_count, NULL);
1125                         if (ret)
1126                                 qdma_vq->num_dequeues += ret;
1127                 }
1128         }
1129
1130         return ret;
1131 }
1132
1133 void
1134 rte_qdma_vq_stats(struct rte_rawdev *rawdev,
1135                 uint16_t vq_id,
1136                 struct rte_qdma_vq_stats *vq_status)
1137 {
1138         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1139         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1140         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1141
1142         if (qdma_vq->in_use) {
1143                 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
1144                 vq_status->lcore_id = qdma_vq->lcore_id;
1145                 vq_status->num_enqueues = qdma_vq->num_enqueues;
1146                 vq_status->num_dequeues = qdma_vq->num_dequeues;
1147                 vq_status->num_pending_jobs = vq_status->num_enqueues -
1148                                 vq_status->num_dequeues;
1149         }
1150 }
1151
1152 static int
1153 dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,
1154                          uint16_t vq_id)
1155 {
1156         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1157         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1158
1159         struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1160
1161         DPAA2_QDMA_FUNC_TRACE();
1162
1163         /* In case there are pending jobs on any VQ, return -EBUSY */
1164         if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1165                 return -EBUSY;
1166
1167         rte_spinlock_lock(&qdma_dev->lock);
1168
1169         if (qdma_vq->exclusive_hw_queue)
1170                 free_hw_queue(qdma_vq->hw_queue);
1171         else {
1172                 if (qdma_vq->status_ring)
1173                         rte_ring_free(qdma_vq->status_ring);
1174
1175                 put_hw_queue(qdma_vq->hw_queue);
1176         }
1177
1178         memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1179
1180         rte_spinlock_unlock(&qdma_dev->lock);
1181
1182         return 0;
1183 }
1184
1185 static void
1186 dpaa2_qdma_stop(struct rte_rawdev *rawdev)
1187 {
1188         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1189         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1190
1191         DPAA2_QDMA_FUNC_TRACE();
1192
1193         qdma_dev->state = 0;
1194 }
1195
1196 static int
1197 dpaa2_qdma_close(struct rte_rawdev *rawdev)
1198 {
1199         DPAA2_QDMA_FUNC_TRACE();
1200
1201         dpaa2_qdma_reset(rawdev);
1202
1203         return 0;
1204 }
1205
1206 static struct rte_rawdev_ops dpaa2_qdma_ops = {
1207         .dev_configure            = dpaa2_qdma_configure,
1208         .dev_start                = dpaa2_qdma_start,
1209         .dev_stop                 = dpaa2_qdma_stop,
1210         .dev_reset                = dpaa2_qdma_reset,
1211         .dev_close                = dpaa2_qdma_close,
1212         .queue_setup              = dpaa2_qdma_queue_setup,
1213         .queue_release            = dpaa2_qdma_queue_release,
1214         .attr_get                 = dpaa2_qdma_attr_get,
1215         .enqueue_bufs             = dpaa2_qdma_enqueue,
1216         .dequeue_bufs             = dpaa2_qdma_dequeue,
1217 };
1218
1219 static int
1220 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1221 {
1222         struct qdma_hw_queue *queue;
1223         int i;
1224
1225         DPAA2_QDMA_FUNC_TRACE();
1226
1227         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1228                 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1229                 if (!queue) {
1230                         DPAA2_QDMA_ERR(
1231                                 "Memory allocation failed for QDMA queue");
1232                         return -ENOMEM;
1233                 }
1234
1235                 queue->dpdmai_dev = dpdmai_dev;
1236                 queue->queue_id = i;
1237
1238                 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1239                 dpdmai_dev->qdma_dev->num_hw_queues++;
1240         }
1241
1242         return 0;
1243 }
1244
1245 static void
1246 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1247 {
1248         struct qdma_hw_queue *queue = NULL;
1249         struct qdma_hw_queue *tqueue = NULL;
1250
1251         DPAA2_QDMA_FUNC_TRACE();
1252
1253         TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1254                 if (queue->dpdmai_dev == dpdmai_dev) {
1255                         TAILQ_REMOVE(&qdma_queue_list, queue, next);
1256                         rte_free(queue);
1257                         queue = NULL;
1258                 }
1259         }
1260 }
1261
1262 static int
1263 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1264 {
1265         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1266         int ret, i;
1267
1268         DPAA2_QDMA_FUNC_TRACE();
1269
1270         /* Remove HW queues from global list */
1271         remove_hw_queues_from_list(dpdmai_dev);
1272
1273         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1274                              dpdmai_dev->token);
1275         if (ret)
1276                 DPAA2_QDMA_ERR("dmdmai disable failed");
1277
1278         /* Set up the DQRR storage for Rx */
1279         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1280                 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1281
1282                 if (rxq->q_storage) {
1283                         dpaa2_free_dq_storage(rxq->q_storage);
1284                         rte_free(rxq->q_storage);
1285                 }
1286         }
1287
1288         /* Close the device at underlying layer*/
1289         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1290         if (ret)
1291                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1292
1293         return 0;
1294 }
1295
1296 static int
1297 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1298 {
1299         struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1300         struct dpdmai_rx_queue_cfg rx_queue_cfg;
1301         struct dpdmai_attr attr;
1302         struct dpdmai_rx_queue_attr rx_attr;
1303         struct dpdmai_tx_queue_attr tx_attr;
1304         int ret, i;
1305
1306         DPAA2_QDMA_FUNC_TRACE();
1307
1308         /* Open DPDMAI device */
1309         dpdmai_dev->dpdmai_id = dpdmai_id;
1310         dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
1311         dpdmai_dev->qdma_dev = &q_dev;
1312         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1313                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1314         if (ret) {
1315                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1316                 return ret;
1317         }
1318
1319         /* Get DPDMAI attributes */
1320         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1321                                     dpdmai_dev->token, &attr);
1322         if (ret) {
1323                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1324                                ret);
1325                 goto init_err;
1326         }
1327         dpdmai_dev->num_queues = attr.num_of_queues;
1328
1329         /* Set up Rx Queues */
1330         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1331                 struct dpaa2_queue *rxq;
1332
1333                 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1334                 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1335                                           CMD_PRI_LOW,
1336                                           dpdmai_dev->token,
1337                                           i, 0, &rx_queue_cfg);
1338                 if (ret) {
1339                         DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1340                                        ret);
1341                         goto init_err;
1342                 }
1343
1344                 /* Allocate DQ storage for the DPDMAI Rx queues */
1345                 rxq = &(dpdmai_dev->rx_queue[i]);
1346                 rxq->q_storage = rte_malloc("dq_storage",
1347                                             sizeof(struct queue_storage_info_t),
1348                                             RTE_CACHE_LINE_SIZE);
1349                 if (!rxq->q_storage) {
1350                         DPAA2_QDMA_ERR("q_storage allocation failed");
1351                         ret = -ENOMEM;
1352                         goto init_err;
1353                 }
1354
1355                 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1356                 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1357                 if (ret) {
1358                         DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1359                         goto init_err;
1360                 }
1361         }
1362
1363         /* Get Rx and Tx queues FQID's */
1364         for (i = 0; i < dpdmai_dev->num_queues; i++) {
1365                 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1366                                           dpdmai_dev->token, i, 0, &rx_attr);
1367                 if (ret) {
1368                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
1369                                        ret);
1370                         goto init_err;
1371                 }
1372                 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1373
1374                 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1375                                           dpdmai_dev->token, i, 0, &tx_attr);
1376                 if (ret) {
1377                         DPAA2_QDMA_ERR("Reading device failed with err: %d",
1378                                        ret);
1379                         goto init_err;
1380                 }
1381                 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1382         }
1383
1384         /* Enable the device */
1385         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1386                             dpdmai_dev->token);
1387         if (ret) {
1388                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1389                 goto init_err;
1390         }
1391
1392         /* Add the HW queue to the global list */
1393         ret = add_hw_queues_to_list(dpdmai_dev);
1394         if (ret) {
1395                 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1396                 goto init_err;
1397         }
1398
1399         if (!dpaa2_coherent_no_alloc_cache) {
1400                 if (dpaa2_svr_family == SVR_LX2160A) {
1401                         dpaa2_coherent_no_alloc_cache =
1402                                 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1403                         dpaa2_coherent_alloc_cache =
1404                                 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1405                 } else {
1406                         dpaa2_coherent_no_alloc_cache =
1407                                 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1408                         dpaa2_coherent_alloc_cache =
1409                                 DPAA2_COHERENT_ALLOCATE_CACHE;
1410                 }
1411         }
1412
1413         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1414
1415         rte_spinlock_init(&dpdmai_dev->qdma_dev->lock);
1416
1417         return 0;
1418 init_err:
1419         dpaa2_dpdmai_dev_uninit(rawdev);
1420         return ret;
1421 }
1422
1423 static int
1424 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1425                      struct rte_dpaa2_device *dpaa2_dev)
1426 {
1427         struct rte_rawdev *rawdev;
1428         int ret;
1429
1430         DPAA2_QDMA_FUNC_TRACE();
1431
1432         rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1433                         sizeof(struct dpaa2_dpdmai_dev),
1434                         rte_socket_id());
1435         if (!rawdev) {
1436                 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1437                 return -EINVAL;
1438         }
1439
1440         dpaa2_dev->rawdev = rawdev;
1441         rawdev->dev_ops = &dpaa2_qdma_ops;
1442         rawdev->device = &dpaa2_dev->device;
1443         rawdev->driver_name = dpaa2_drv->driver.name;
1444
1445         /* Invoke PMD device initialization function */
1446         ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1447         if (ret) {
1448                 rte_rawdev_pmd_release(rawdev);
1449                 return ret;
1450         }
1451
1452         /* Reset the QDMA device */
1453         ret = dpaa2_qdma_reset(rawdev);
1454         if (ret) {
1455                 DPAA2_QDMA_ERR("Resetting QDMA failed");
1456                 return ret;
1457         }
1458
1459         return 0;
1460 }
1461
1462 static int
1463 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1464 {
1465         struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1466         int ret;
1467
1468         DPAA2_QDMA_FUNC_TRACE();
1469
1470         dpaa2_dpdmai_dev_uninit(rawdev);
1471
1472         ret = rte_rawdev_pmd_release(rawdev);
1473         if (ret)
1474                 DPAA2_QDMA_ERR("Device cleanup failed");
1475
1476         return 0;
1477 }
1478
1479 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1480         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1481         .drv_type = DPAA2_QDMA,
1482         .probe = rte_dpaa2_qdma_probe,
1483         .remove = rte_dpaa2_qdma_remove,
1484 };
1485
1486 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1487 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1488         "no_prefetch=<int> ");
1489 RTE_LOG_REGISTER(dpaa2_qdma_logtype, pmd.raw.dpaa2.qdma, INFO);