1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2021 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype;
32 uint32_t dpaa2_coherent_no_alloc_cache;
33 uint32_t dpaa2_coherent_alloc_cache;
36 static struct qdma_device q_dev;
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
40 static struct qdma_hw_queue_list qdma_queue_list
41 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
43 /* QDMA per core data */
44 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
47 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
48 uint32_t len, struct qbman_fd *fd,
49 struct rte_qdma_rbp *rbp, int ser)
51 fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
52 fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
54 fd->simple_pci.len_sl = len;
56 fd->simple_pci.bmt = 1;
57 fd->simple_pci.fmt = 3;
58 fd->simple_pci.sl = 1;
59 fd->simple_pci.ser = ser;
61 fd->simple_pci.sportid = rbp->sportid; /*pcie 3 */
62 fd->simple_pci.srbp = rbp->srbp;
64 fd->simple_pci.rdttype = 0;
66 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
68 /*dest is pcie memory */
69 fd->simple_pci.dportid = rbp->dportid; /*pcie 3 */
70 fd->simple_pci.drbp = rbp->drbp;
72 fd->simple_pci.wrttype = 0;
74 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
76 fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
77 fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
83 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
84 uint32_t len, struct qbman_fd *fd, int ser)
86 fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
87 fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
89 fd->simple_ddr.len = len;
91 fd->simple_ddr.bmt = 1;
92 fd->simple_ddr.fmt = 3;
93 fd->simple_ddr.sl = 1;
94 fd->simple_ddr.ser = ser;
96 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
97 * Coherent copy of cacheable memory,
98 * lookup in downstream cache, no allocate
101 fd->simple_ddr.rns = 0;
102 fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
104 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
105 * Coherent write of cacheable memory,
106 * lookup in downstream cache, no allocate on miss
108 fd->simple_ddr.wns = 0;
109 fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
111 fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
112 fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
118 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
120 struct rte_qdma_rbp *rbp,
121 uint64_t src, uint64_t dest,
122 size_t len, uint32_t flags, uint32_t fmt)
124 struct qdma_sdd *sdd;
127 sdd = (struct qdma_sdd *)
128 ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET +
129 QDMA_FLE_SDD_OFFSET);
130 sdd_iova = fle_iova - QDMA_FLE_FLE_OFFSET + QDMA_FLE_SDD_OFFSET;
132 /* first frame list to source descriptor */
133 DPAA2_SET_FLE_ADDR(fle, sdd_iova);
134 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
136 /* source and destination descriptor */
137 if (rbp && rbp->enable) {
139 sdd->read_cmd.portid = rbp->sportid;
140 sdd->rbpcmd_simple.pfid = rbp->spfid;
141 sdd->rbpcmd_simple.vfid = rbp->svfid;
144 sdd->read_cmd.rbp = rbp->srbp;
145 sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
147 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
151 sdd->write_cmd.portid = rbp->dportid;
152 sdd->rbpcmd_simple.pfid = rbp->dpfid;
153 sdd->rbpcmd_simple.vfid = rbp->dvfid;
156 sdd->write_cmd.rbp = rbp->drbp;
157 sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
159 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
163 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
165 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
168 /* source frame list to source buffer */
169 if (flags & RTE_QDMA_JOB_SRC_PHY) {
170 DPAA2_SET_FLE_ADDR(fle, src);
171 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
172 DPAA2_SET_FLE_BMT(fle);
175 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
177 fle->word4.fmt = fmt;
178 DPAA2_SET_FLE_LEN(fle, len);
181 /* destination frame list to destination buffer */
182 if (flags & RTE_QDMA_JOB_DEST_PHY) {
183 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
184 DPAA2_SET_FLE_BMT(fle);
186 DPAA2_SET_FLE_ADDR(fle, dest);
188 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
190 fle->word4.fmt = fmt;
191 DPAA2_SET_FLE_LEN(fle, len);
193 /* Final bit: 1, for last frame list */
194 DPAA2_SET_FLE_FIN(fle);
197 static inline int dpdmai_dev_set_fd_us(
198 struct qdma_virt_queue *qdma_vq,
200 struct rte_qdma_job **job,
203 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
204 struct rte_qdma_job **ppjob;
207 int ser = (qdma_vq->flags & RTE_QDMA_VQ_NO_RESPONSE) ?
210 for (loop = 0; loop < nb_jobs; loop++) {
211 if (job[loop]->src & QDMA_RBP_UPPER_ADDRESS_MASK)
212 iova = (size_t)job[loop]->dest;
214 iova = (size_t)job[loop]->src;
216 /* Set the metadata */
217 job[loop]->vq_id = qdma_vq->vq_id;
218 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
221 if ((rbp->drbp == 1) || (rbp->srbp == 1))
222 ret = qdma_populate_fd_pci((phys_addr_t)job[loop]->src,
223 (phys_addr_t)job[loop]->dest,
224 job[loop]->len, &fd[loop], rbp, ser);
226 ret = qdma_populate_fd_ddr((phys_addr_t)job[loop]->src,
227 (phys_addr_t)job[loop]->dest,
228 job[loop]->len, &fd[loop], ser);
234 static uint32_t qdma_populate_sg_entry(
235 struct rte_qdma_job **jobs,
236 struct qdma_sg_entry *src_sge,
237 struct qdma_sg_entry *dst_sge,
241 uint32_t total_len = 0;
244 for (i = 0; i < nb_jobs; i++) {
246 if (likely(jobs[i]->flags & RTE_QDMA_JOB_SRC_PHY)) {
247 src_sge->addr_lo = (uint32_t)jobs[i]->src;
248 src_sge->addr_hi = (jobs[i]->src >> 32);
250 iova = DPAA2_VADDR_TO_IOVA(jobs[i]->src);
251 src_sge->addr_lo = (uint32_t)iova;
252 src_sge->addr_hi = iova >> 32;
254 src_sge->data_len.data_len_sl0 = jobs[i]->len;
255 src_sge->ctrl.sl = QDMA_SG_SL_LONG;
256 src_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
257 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
258 src_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
260 src_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
263 if (likely(jobs[i]->flags & RTE_QDMA_JOB_DEST_PHY)) {
264 dst_sge->addr_lo = (uint32_t)jobs[i]->dest;
265 dst_sge->addr_hi = (jobs[i]->dest >> 32);
267 iova = DPAA2_VADDR_TO_IOVA(jobs[i]->dest);
268 dst_sge->addr_lo = (uint32_t)iova;
269 dst_sge->addr_hi = iova >> 32;
271 dst_sge->data_len.data_len_sl0 = jobs[i]->len;
272 dst_sge->ctrl.sl = QDMA_SG_SL_LONG;
273 dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB;
274 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
275 dst_sge->ctrl.bmt = QDMA_SG_BMT_ENABLE;
277 dst_sge->ctrl.bmt = QDMA_SG_BMT_DISABLE;
279 total_len += jobs[i]->len;
281 if (i == (nb_jobs - 1)) {
282 src_sge->ctrl.f = QDMA_SG_F;
283 dst_sge->ctrl.f = QDMA_SG_F;
295 static inline int dpdmai_dev_set_multi_fd_lf_no_rsp(
296 struct qdma_virt_queue *qdma_vq,
298 struct rte_qdma_job **job,
301 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
302 struct rte_qdma_job **ppjob;
305 struct qbman_fle *fle;
306 uint64_t elem_iova, fle_iova;
308 for (i = 0; i < nb_jobs; i++) {
309 elem = job[i]->usr_elem;
310 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
311 elem_iova = rte_mempool_virt2iova(elem);
313 elem_iova = DPAA2_VADDR_TO_IOVA(elem);
316 ppjob = (struct rte_qdma_job **)
317 ((uintptr_t)(uint64_t)elem +
318 QDMA_FLE_SINGLE_JOB_OFFSET);
321 job[i]->vq_id = qdma_vq->vq_id;
323 fle = (struct qbman_fle *)
324 ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
325 fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
327 DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
328 DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
330 memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
331 DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
333 dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
334 job[i]->src, job[i]->dest, job[i]->len,
335 job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
341 static inline int dpdmai_dev_set_multi_fd_lf(
342 struct qdma_virt_queue *qdma_vq,
344 struct rte_qdma_job **job,
347 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
348 struct rte_qdma_job **ppjob;
351 void *elem[RTE_QDMA_BURST_NB_MAX];
352 struct qbman_fle *fle;
353 uint64_t elem_iova, fle_iova;
355 ret = rte_mempool_get_bulk(qdma_vq->fle_pool, elem, nb_jobs);
357 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
361 for (i = 0; i < nb_jobs; i++) {
362 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
363 elem_iova = rte_mempool_virt2iova(elem[i]);
365 elem_iova = DPAA2_VADDR_TO_IOVA(elem[i]);
368 ppjob = (struct rte_qdma_job **)
369 ((uintptr_t)(uint64_t)elem[i] +
370 QDMA_FLE_SINGLE_JOB_OFFSET);
373 job[i]->vq_id = qdma_vq->vq_id;
375 fle = (struct qbman_fle *)
376 ((uintptr_t)(uint64_t)elem[i] + QDMA_FLE_FLE_OFFSET);
377 fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
379 DPAA2_SET_FD_ADDR(&fd[i], fle_iova);
380 DPAA2_SET_FD_COMPOUND_FMT(&fd[i]);
381 DPAA2_SET_FD_FRC(&fd[i], QDMA_SER_CTX);
383 memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
384 DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
386 dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
387 job[i]->src, job[i]->dest, job[i]->len,
388 job[i]->flags, QBMAN_FLE_WORD4_FMT_SBF);
394 static inline int dpdmai_dev_set_sg_fd_lf(
395 struct qdma_virt_queue *qdma_vq,
397 struct rte_qdma_job **job,
400 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
401 struct rte_qdma_job **ppjob;
403 struct qbman_fle *fle;
404 uint64_t elem_iova, fle_iova, src, dst;
406 struct qdma_sg_entry *src_sge, *dst_sge;
407 uint32_t len, fmt, flags;
410 * Get an FLE/SDD from FLE pool.
411 * Note: IO metadata is before the FLE and SDD memory.
413 if (qdma_vq->flags & RTE_QDMA_VQ_NO_RESPONSE) {
414 elem = job[0]->usr_elem;
416 ret = rte_mempool_get(qdma_vq->fle_pool, &elem);
418 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
423 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
424 elem_iova = rte_mempool_virt2iova(elem);
426 elem_iova = DPAA2_VADDR_TO_IOVA(elem);
429 /* Set the metadata */
430 /* Save job context. */
432 ((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_NB_OFFSET)) = nb_jobs;
433 ppjob = (struct rte_qdma_job **)
434 ((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_JOBS_OFFSET);
435 for (i = 0; i < nb_jobs; i++)
438 ppjob[0]->vq_id = qdma_vq->vq_id;
440 fle = (struct qbman_fle *)
441 ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
442 fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
444 DPAA2_SET_FD_ADDR(fd, fle_iova);
445 DPAA2_SET_FD_COMPOUND_FMT(fd);
446 if (!(qdma_vq->flags & RTE_QDMA_VQ_NO_RESPONSE))
447 DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
450 if (likely(nb_jobs > 1)) {
451 src_sge = (struct qdma_sg_entry *)
452 ((uintptr_t)(uint64_t)elem + QDMA_FLE_SG_ENTRY_OFFSET);
453 dst_sge = src_sge + DPAA2_QDMA_MAX_SG_NB;
454 src = elem_iova + QDMA_FLE_SG_ENTRY_OFFSET;
456 DPAA2_QDMA_MAX_SG_NB * sizeof(struct qdma_sg_entry);
457 len = qdma_populate_sg_entry(job, src_sge, dst_sge, nb_jobs);
458 fmt = QBMAN_FLE_WORD4_FMT_SGE;
459 flags = RTE_QDMA_JOB_SRC_PHY | RTE_QDMA_JOB_DEST_PHY;
464 fmt = QBMAN_FLE_WORD4_FMT_SBF;
465 flags = job[0]->flags;
468 memset(fle, 0, DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle) +
469 DPAA2_QDMA_MAX_SDD * sizeof(struct qdma_sdd));
471 dpaa2_qdma_populate_fle(fle, fle_iova, rbp,
472 src, dst, len, flags, fmt);
477 static inline uint16_t dpdmai_dev_get_job_us(
478 struct qdma_virt_queue *qdma_vq __rte_unused,
479 const struct qbman_fd *fd,
480 struct rte_qdma_job **job, uint16_t *nb_jobs)
484 struct rte_qdma_job **ppjob;
486 if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
487 iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
488 | (uint64_t)fd->simple_pci.daddr_lo);
490 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
491 | (uint64_t)fd->simple_pci.saddr_lo);
493 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
494 *job = (struct rte_qdma_job *)*ppjob;
495 (*job)->status = (fd->simple_pci.acc_err << 8) |
496 (fd->simple_pci.error);
497 vqid = (*job)->vq_id;
503 static inline uint16_t dpdmai_dev_get_single_job_lf(
504 struct qdma_virt_queue *qdma_vq,
505 const struct qbman_fd *fd,
506 struct rte_qdma_job **job,
509 struct qbman_fle *fle;
510 struct rte_qdma_job **ppjob = NULL;
514 * Fetch metadata from FLE. job and vq_id were set
515 * in metadata in the enqueue operation.
517 fle = (struct qbman_fle *)
518 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
521 ppjob = (struct rte_qdma_job **)((uintptr_t)(uint64_t)fle -
522 QDMA_FLE_FLE_OFFSET + QDMA_FLE_SINGLE_JOB_OFFSET);
524 status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
527 (*job)->status = status;
529 /* Free FLE to the pool */
530 rte_mempool_put(qdma_vq->fle_pool,
532 ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
534 return (*job)->vq_id;
537 static inline uint16_t dpdmai_dev_get_sg_job_lf(
538 struct qdma_virt_queue *qdma_vq,
539 const struct qbman_fd *fd,
540 struct rte_qdma_job **job,
543 struct qbman_fle *fle;
544 struct rte_qdma_job **ppjob = NULL;
548 * Fetch metadata from FLE. job and vq_id were set
549 * in metadata in the enqueue operation.
551 fle = (struct qbman_fle *)
552 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
553 *nb_jobs = *((uint16_t *)((uintptr_t)(uint64_t)fle -
554 QDMA_FLE_FLE_OFFSET + QDMA_FLE_JOB_NB_OFFSET));
555 ppjob = (struct rte_qdma_job **)((uintptr_t)(uint64_t)fle -
556 QDMA_FLE_FLE_OFFSET + QDMA_FLE_SG_JOBS_OFFSET);
557 status = (DPAA2_GET_FD_ERR(fd) << 8) | (DPAA2_GET_FD_FRC(fd) & 0xFF);
559 for (i = 0; i < (*nb_jobs); i++) {
561 job[i]->status = status;
564 /* Free FLE to the pool */
565 rte_mempool_put(qdma_vq->fle_pool,
567 ((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET));
569 return job[0]->vq_id;
572 /* Function to receive a QDMA job for a given device and queue*/
574 dpdmai_dev_dequeue_multijob_prefetch(
575 struct qdma_virt_queue *qdma_vq,
577 struct rte_qdma_job **job,
580 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
581 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
582 uint16_t rxq_id = qdma_pq->queue_id;
584 struct dpaa2_queue *rxq;
585 struct qbman_result *dq_storage, *dq_storage1 = NULL;
586 struct qbman_pull_desc pulldesc;
587 struct qbman_swp *swp;
588 struct queue_storage_info_t *q_storage;
590 uint8_t status, pending;
592 const struct qbman_fd *fd;
593 uint16_t vqid, num_rx_ret;
596 if (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
597 /** Make sure there are enough space to get jobs.*/
598 if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
603 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
604 ret = dpaa2_affine_qbman_swp();
607 "Failed to allocate IO portal, tid: %d\n",
612 swp = DPAA2_PER_LCORE_PORTAL;
614 pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
615 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
617 q_storage = rxq->q_storage;
619 if (unlikely(!q_storage->active_dqs)) {
620 q_storage->toggle = 0;
621 dq_storage = q_storage->dq_storage[q_storage->toggle];
622 q_storage->last_num_pkts = pull_size;
623 qbman_pull_desc_clear(&pulldesc);
624 qbman_pull_desc_set_numframes(&pulldesc,
625 q_storage->last_num_pkts);
626 qbman_pull_desc_set_fq(&pulldesc, fqid);
627 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
628 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
629 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
630 while (!qbman_check_command_complete(
632 DPAA2_PER_LCORE_DPIO->index)))
634 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
637 if (qbman_swp_pull(swp, &pulldesc)) {
639 "VDQ command not issued.QBMAN busy\n");
640 /* Portal was busy, try again */
645 q_storage->active_dqs = dq_storage;
646 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
647 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
651 dq_storage = q_storage->active_dqs;
652 rte_prefetch0((void *)(size_t)(dq_storage));
653 rte_prefetch0((void *)(size_t)(dq_storage + 1));
655 /* Prepare next pull descriptor. This will give space for the
656 * prefething done on DQRR entries
658 q_storage->toggle ^= 1;
659 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
660 qbman_pull_desc_clear(&pulldesc);
661 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
662 qbman_pull_desc_set_fq(&pulldesc, fqid);
663 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
664 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
666 /* Check if the previous issued command is completed.
667 * Also seems like the SWP is shared between the Ethernet Driver
668 * and the SEC driver.
670 while (!qbman_check_command_complete(dq_storage))
672 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
673 clear_swp_active_dqs(q_storage->active_dpio_id);
678 /* Loop until the dq_storage is updated with
681 while (!qbman_check_new_result(dq_storage))
683 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
684 /* Check whether Last Pull command is Expired and
685 * setting Condition for Loop termination
687 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
689 /* Check for valid frame. */
690 status = qbman_result_DQ_flags(dq_storage);
691 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
694 fd = qbman_result_DQ_fd(dq_storage);
696 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx],
699 vq_id[num_rx] = vqid;
702 num_rx += num_rx_ret;
705 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
706 while (!qbman_check_command_complete(
707 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
709 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
711 /* issue a volatile dequeue command for next pull */
713 if (qbman_swp_pull(swp, &pulldesc)) {
715 "VDQ command is not issued. QBMAN is busy (2)\n");
721 q_storage->active_dqs = dq_storage1;
722 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
723 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
729 dpdmai_dev_dequeue_multijob_no_prefetch(
730 struct qdma_virt_queue *qdma_vq,
732 struct rte_qdma_job **job,
735 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
736 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
737 uint16_t rxq_id = qdma_pq->queue_id;
739 struct dpaa2_queue *rxq;
740 struct qbman_result *dq_storage;
741 struct qbman_pull_desc pulldesc;
742 struct qbman_swp *swp;
744 uint8_t status, pending;
746 const struct qbman_fd *fd;
747 uint16_t vqid, num_rx_ret;
748 int ret, next_pull, num_pulled = 0;
750 if (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
751 /** Make sure there are enough space to get jobs.*/
752 if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
759 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
760 ret = dpaa2_affine_qbman_swp();
763 "Failed to allocate IO portal, tid: %d\n",
768 swp = DPAA2_PER_LCORE_PORTAL;
770 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
774 dq_storage = rxq->q_storage->dq_storage[0];
775 /* Prepare dequeue descriptor */
776 qbman_pull_desc_clear(&pulldesc);
777 qbman_pull_desc_set_fq(&pulldesc, fqid);
778 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
779 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
781 if (next_pull > dpaa2_dqrr_size) {
782 qbman_pull_desc_set_numframes(&pulldesc,
784 next_pull -= dpaa2_dqrr_size;
786 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
791 if (qbman_swp_pull(swp, &pulldesc)) {
793 "VDQ command not issued. QBMAN busy");
794 /* Portal was busy, try again */
800 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
801 /* Check if the previous issued command is completed. */
802 while (!qbman_check_command_complete(dq_storage))
809 /* Loop until dq_storage is updated
810 * with new token by QBMAN
812 while (!qbman_check_new_result(dq_storage))
814 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
816 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
818 /* Check for valid frame. */
819 status = qbman_result_DQ_flags(dq_storage);
820 if (unlikely((status &
821 QBMAN_DQ_STAT_VALIDFRAME) == 0))
824 fd = qbman_result_DQ_fd(dq_storage);
826 vqid = qdma_vq->get_job(qdma_vq, fd,
827 &job[num_rx], &num_rx_ret);
829 vq_id[num_rx] = vqid;
832 num_rx += num_rx_ret;
836 /* Last VDQ provided all packets and more packets are requested */
837 } while (next_pull && num_pulled == dpaa2_dqrr_size);
843 dpdmai_dev_enqueue_multi(
844 struct qdma_virt_queue *qdma_vq,
845 struct rte_qdma_job **job,
848 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
849 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
850 uint16_t txq_id = qdma_pq->queue_id;
852 struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
853 struct dpaa2_queue *txq;
854 struct qbman_eq_desc eqdesc;
855 struct qbman_swp *swp;
857 uint32_t num_to_send = 0;
859 uint32_t enqueue_loop, retry_count, loop;
861 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
862 ret = dpaa2_affine_qbman_swp();
865 "Failed to allocate IO portal, tid: %d\n",
870 swp = DPAA2_PER_LCORE_PORTAL;
872 txq = &(dpdmai_dev->tx_queue[txq_id]);
874 /* Prepare enqueue descriptor */
875 qbman_eq_desc_clear(&eqdesc);
876 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
877 qbman_eq_desc_set_no_orp(&eqdesc, 0);
878 qbman_eq_desc_set_response(&eqdesc, 0, 0);
880 if (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
882 uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
883 DPAA2_QDMA_MAX_SG_NB : nb_jobs;
884 uint16_t job_idx = 0;
885 uint16_t fd_sg_nb[8];
886 uint16_t nb_jobs_ret = 0;
888 if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
889 fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
891 fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
893 memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
895 for (loop = 0; loop < fd_nb; loop++) {
896 ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
898 if (unlikely(ret < 0))
900 fd_sg_nb[loop] = sg_entry_nb;
901 nb_jobs -= sg_entry_nb;
902 job_idx += sg_entry_nb;
903 sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
904 DPAA2_QDMA_MAX_SG_NB : nb_jobs;
907 /* Enqueue the packet to the QBMAN */
908 enqueue_loop = 0; retry_count = 0;
910 while (enqueue_loop < fd_nb) {
911 ret = qbman_swp_enqueue_multiple(swp,
912 &eqdesc, &fd[enqueue_loop],
913 NULL, fd_nb - enqueue_loop);
914 if (unlikely(ret < 0)) {
916 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
919 for (loop = 0; loop < (uint32_t)ret; loop++)
921 fd_sg_nb[enqueue_loop + loop];
930 memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
932 while (nb_jobs > 0) {
933 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
934 dpaa2_eqcr_size : nb_jobs;
936 ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
937 &job[num_tx], num_to_send);
938 if (unlikely(ret < 0))
941 /* Enqueue the packet to the QBMAN */
942 enqueue_loop = 0; retry_count = 0;
945 while (enqueue_loop < loop) {
946 ret = qbman_swp_enqueue_multiple(swp,
948 &fd[num_tx + enqueue_loop],
950 loop - enqueue_loop);
951 if (unlikely(ret < 0)) {
953 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
960 num_tx += num_to_send;
966 static struct qdma_hw_queue *
967 alloc_hw_queue(uint32_t lcore_id)
969 struct qdma_hw_queue *queue = NULL;
971 DPAA2_QDMA_FUNC_TRACE();
973 /* Get a free queue from the list */
974 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
975 if (queue->num_users == 0) {
976 queue->lcore_id = lcore_id;
986 free_hw_queue(struct qdma_hw_queue *queue)
988 DPAA2_QDMA_FUNC_TRACE();
994 static struct qdma_hw_queue *
995 get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)
997 struct qdma_per_core_info *core_info;
998 struct qdma_hw_queue *queue, *temp;
999 uint32_t least_num_users;
1000 int num_hw_queues, i;
1002 DPAA2_QDMA_FUNC_TRACE();
1004 core_info = &qdma_core_info[lcore_id];
1005 num_hw_queues = core_info->num_hw_queues;
1008 * Allocate a HW queue if there are less queues
1009 * than maximum per core queues configured
1011 if (num_hw_queues < qdma_dev->max_hw_queues_per_core) {
1012 queue = alloc_hw_queue(lcore_id);
1014 core_info->hw_queues[num_hw_queues] = queue;
1015 core_info->num_hw_queues++;
1020 queue = core_info->hw_queues[0];
1021 /* In case there is no queue associated with the core return NULL */
1025 /* Fetch the least loaded H/W queue */
1026 least_num_users = core_info->hw_queues[0]->num_users;
1027 for (i = 0; i < num_hw_queues; i++) {
1028 temp = core_info->hw_queues[i];
1029 if (temp->num_users < least_num_users)
1040 put_hw_queue(struct qdma_hw_queue *queue)
1042 struct qdma_per_core_info *core_info;
1043 int lcore_id, num_hw_queues, i;
1045 DPAA2_QDMA_FUNC_TRACE();
1048 * If this is the last user of the queue free it.
1049 * Also remove it from QDMA core info.
1051 if (queue->num_users == 1) {
1052 free_hw_queue(queue);
1054 /* Remove the physical queue from core info */
1055 lcore_id = queue->lcore_id;
1056 core_info = &qdma_core_info[lcore_id];
1057 num_hw_queues = core_info->num_hw_queues;
1058 for (i = 0; i < num_hw_queues; i++) {
1059 if (queue == core_info->hw_queues[i])
1062 for (; i < num_hw_queues - 1; i++)
1063 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
1064 core_info->hw_queues[i] = NULL;
1071 dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,
1072 __rte_unused const char *attr_name,
1073 uint64_t *attr_value)
1075 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1076 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1077 struct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;
1079 DPAA2_QDMA_FUNC_TRACE();
1081 qdma_attr->num_hw_queues = qdma_dev->num_hw_queues;
1087 dpaa2_qdma_reset(struct rte_rawdev *rawdev)
1089 struct qdma_hw_queue *queue;
1090 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1091 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1094 DPAA2_QDMA_FUNC_TRACE();
1096 /* In case QDMA device is not in stopped state, return -EBUSY */
1097 if (qdma_dev->state == 1) {
1099 "Device is in running state. Stop before reset.");
1103 /* In case there are pending jobs on any VQ, return -EBUSY */
1104 for (i = 0; i < qdma_dev->max_vqs; i++) {
1105 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
1106 qdma_dev->vqs[i].num_dequeues)) {
1107 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
1112 /* Reset HW queues */
1113 TAILQ_FOREACH(queue, &qdma_queue_list, next)
1114 queue->num_users = 0;
1116 /* Reset and free virtual queues */
1117 for (i = 0; i < qdma_dev->max_vqs; i++) {
1118 if (qdma_dev->vqs[i].status_ring)
1119 rte_ring_free(qdma_dev->vqs[i].status_ring);
1122 rte_free(qdma_dev->vqs);
1123 qdma_dev->vqs = NULL;
1125 /* Reset per core info */
1126 memset(&qdma_core_info, 0,
1127 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
1129 /* Reset QDMA device structure */
1130 qdma_dev->max_hw_queues_per_core = 0;
1131 qdma_dev->fle_queue_pool_cnt = 0;
1132 qdma_dev->max_vqs = 0;
1138 dpaa2_qdma_configure(const struct rte_rawdev *rawdev,
1139 rte_rawdev_obj_t config,
1142 char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
1143 struct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;
1144 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1145 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1147 DPAA2_QDMA_FUNC_TRACE();
1149 if (config_size != sizeof(*qdma_config)) {
1150 DPAA2_QDMA_ERR("Config size mismatch. Expected %" PRIu64
1151 ", Got: %" PRIu64, (uint64_t)(sizeof(*qdma_config)),
1152 (uint64_t)config_size);
1156 /* In case QDMA device is not in stopped state, return -EBUSY */
1157 if (qdma_dev->state == 1) {
1159 "Device is in running state. Stop before config.");
1163 /* Set max HW queue per core */
1164 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
1165 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
1166 MAX_HW_QUEUE_PER_CORE);
1169 qdma_dev->max_hw_queues_per_core =
1170 qdma_config->max_hw_queues_per_core;
1172 /* Allocate Virtual Queues */
1173 sprintf(name, "qdma_%d_vq", rawdev->dev_id);
1174 qdma_dev->vqs = rte_malloc(name,
1175 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
1176 RTE_CACHE_LINE_SIZE);
1177 if (!qdma_dev->vqs) {
1178 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
1181 qdma_dev->max_vqs = qdma_config->max_vqs;
1182 qdma_dev->fle_queue_pool_cnt = qdma_config->fle_queue_pool_cnt;
1188 dpaa2_qdma_start(struct rte_rawdev *rawdev)
1190 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1191 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1193 DPAA2_QDMA_FUNC_TRACE();
1195 qdma_dev->state = 1;
1201 check_devargs_handler(__rte_unused const char *key, const char *value,
1202 __rte_unused void *opaque)
1204 if (strcmp(value, "1"))
1211 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
1213 struct rte_kvargs *kvlist;
1218 kvlist = rte_kvargs_parse(devargs->args, NULL);
1222 if (!rte_kvargs_count(kvlist, key)) {
1223 rte_kvargs_free(kvlist);
1227 if (rte_kvargs_process(kvlist, key,
1228 check_devargs_handler, NULL) < 0) {
1229 rte_kvargs_free(kvlist);
1232 rte_kvargs_free(kvlist);
1238 dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,
1239 __rte_unused uint16_t queue_id,
1240 rte_rawdev_obj_t queue_conf,
1246 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1247 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1248 struct rte_qdma_queue_config *q_config =
1249 (struct rte_qdma_queue_config *)queue_conf;
1252 DPAA2_QDMA_FUNC_TRACE();
1254 if (conf_size != sizeof(*q_config)) {
1255 DPAA2_QDMA_ERR("Config size mismatch. Expected %" PRIu64
1256 ", Got: %" PRIu64, (uint64_t)(sizeof(*q_config)),
1257 (uint64_t)conf_size);
1261 rte_spinlock_lock(&qdma_dev->lock);
1263 /* Get a free Virtual Queue */
1264 for (i = 0; i < qdma_dev->max_vqs; i++) {
1265 if (qdma_dev->vqs[i].in_use == 0)
1269 /* Return in case no VQ is free */
1270 if (i == qdma_dev->max_vqs) {
1271 rte_spinlock_unlock(&qdma_dev->lock);
1272 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
1276 if (q_config->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
1277 if (!(q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
1279 "qDMA SG format only supports physical queue!");
1280 rte_spinlock_unlock(&qdma_dev->lock);
1283 if (!(q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT)) {
1285 "qDMA SG format only supports long FD format!");
1286 rte_spinlock_unlock(&qdma_dev->lock);
1289 pool_size = QDMA_FLE_SG_POOL_SIZE;
1291 pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
1294 if (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) {
1295 /* Allocate HW queue for a VQ */
1296 qdma_dev->vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);
1297 qdma_dev->vqs[i].exclusive_hw_queue = 1;
1299 /* Allocate a Ring for Virtual Queue in VQ mode */
1300 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
1301 qdma_dev->vqs[i].status_ring = rte_ring_create(ring_name,
1302 qdma_dev->fle_queue_pool_cnt, rte_socket_id(), 0);
1303 if (!qdma_dev->vqs[i].status_ring) {
1304 DPAA2_QDMA_ERR("Status ring creation failed for vq");
1305 rte_spinlock_unlock(&qdma_dev->lock);
1309 /* Get a HW queue (shared) for a VQ */
1310 qdma_dev->vqs[i].hw_queue = get_hw_queue(qdma_dev,
1311 q_config->lcore_id);
1312 qdma_dev->vqs[i].exclusive_hw_queue = 0;
1315 if (qdma_dev->vqs[i].hw_queue == NULL) {
1316 DPAA2_QDMA_ERR("No H/W queue available for VQ");
1317 if (qdma_dev->vqs[i].status_ring)
1318 rte_ring_free(qdma_dev->vqs[i].status_ring);
1319 qdma_dev->vqs[i].status_ring = NULL;
1320 rte_spinlock_unlock(&qdma_dev->lock);
1324 snprintf(pool_name, sizeof(pool_name),
1325 "qdma_fle_pool%u_queue%d", getpid(), i);
1326 qdma_dev->vqs[i].fle_pool = rte_mempool_create(pool_name,
1327 qdma_dev->fle_queue_pool_cnt, pool_size,
1328 QDMA_FLE_CACHE_SIZE(qdma_dev->fle_queue_pool_cnt), 0,
1329 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1330 if (!qdma_dev->vqs[i].fle_pool) {
1331 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
1332 rte_spinlock_unlock(&qdma_dev->lock);
1336 qdma_dev->vqs[i].flags = q_config->flags;
1337 qdma_dev->vqs[i].in_use = 1;
1338 qdma_dev->vqs[i].lcore_id = q_config->lcore_id;
1339 memset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
1341 if (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) {
1342 if (q_config->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
1343 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_sg_fd_lf;
1344 qdma_dev->vqs[i].get_job = dpdmai_dev_get_sg_job_lf;
1346 if (q_config->flags & RTE_QDMA_VQ_NO_RESPONSE)
1347 qdma_dev->vqs[i].set_fd =
1348 dpdmai_dev_set_multi_fd_lf_no_rsp;
1350 qdma_dev->vqs[i].set_fd =
1351 dpdmai_dev_set_multi_fd_lf;
1352 qdma_dev->vqs[i].get_job = dpdmai_dev_get_single_job_lf;
1355 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us;
1356 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_us;
1358 if (dpaa2_get_devargs(rawdev->device->devargs,
1359 DPAA2_QDMA_NO_PREFETCH) ||
1360 (getenv("DPAA2_NO_QDMA_PREFETCH_RX"))) {
1361 /* If no prefetch is configured. */
1362 qdma_dev->vqs[i].dequeue_job =
1363 dpdmai_dev_dequeue_multijob_no_prefetch;
1364 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1366 qdma_dev->vqs[i].dequeue_job =
1367 dpdmai_dev_dequeue_multijob_prefetch;
1370 qdma_dev->vqs[i].enqueue_job = dpdmai_dev_enqueue_multi;
1372 if (q_config->rbp != NULL)
1373 memcpy(&qdma_dev->vqs[i].rbp, q_config->rbp,
1374 sizeof(struct rte_qdma_rbp));
1376 rte_spinlock_unlock(&qdma_dev->lock);
1382 dpaa2_qdma_enqueue(struct rte_rawdev *rawdev,
1383 __rte_unused struct rte_rawdev_buf **buffers,
1384 unsigned int nb_jobs,
1385 rte_rawdev_obj_t context)
1387 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1388 struct rte_qdma_enqdeq *e_context =
1389 (struct rte_qdma_enqdeq *)context;
1390 struct qdma_virt_queue *qdma_vq =
1391 &dpdmai_dev->qdma_dev->vqs[e_context->vq_id];
1394 ret = qdma_vq->enqueue_job(qdma_vq, e_context->job, nb_jobs);
1396 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
1400 qdma_vq->num_enqueues += ret;
1406 dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,
1407 __rte_unused struct rte_rawdev_buf **buffers,
1408 unsigned int nb_jobs,
1409 rte_rawdev_obj_t cntxt)
1411 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1412 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1413 struct rte_qdma_enqdeq *context =
1414 (struct rte_qdma_enqdeq *)cntxt;
1415 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[context->vq_id];
1416 struct qdma_virt_queue *temp_qdma_vq;
1418 unsigned int ring_count;
1420 if (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
1421 /** Make sure there are enough space to get jobs.*/
1422 if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
1426 /* Only dequeue when there are pending jobs on VQ */
1427 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1430 if (!(qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) &&
1431 qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1432 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
1434 if (qdma_vq->exclusive_hw_queue) {
1435 /* In case of exclusive queue directly fetch from HW queue */
1436 ret = qdma_vq->dequeue_job(qdma_vq, NULL,
1437 context->job, nb_jobs);
1440 "Dequeue from DPDMAI device failed: %d", ret);
1443 qdma_vq->num_dequeues += ret;
1445 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
1447 * Get the QDMA completed jobs from the software ring.
1448 * In case they are not available on the ring poke the HW
1449 * to fetch completed jobs from corresponding HW queues
1451 ring_count = rte_ring_count(qdma_vq->status_ring);
1452 if (ring_count < nb_jobs) {
1453 /* TODO - How to have right budget */
1454 ret = qdma_vq->dequeue_job(qdma_vq,
1455 temp_vq_id, context->job, nb_jobs);
1456 for (i = 0; i < ret; i++) {
1457 temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
1458 rte_ring_enqueue(temp_qdma_vq->status_ring,
1459 (void *)(context->job[i]));
1461 ring_count = rte_ring_count(
1462 qdma_vq->status_ring);
1466 /* Dequeue job from the software ring
1467 * to provide to the user
1469 ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1470 (void **)context->job,
1473 qdma_vq->num_dequeues += ret;
1481 rte_qdma_vq_stats(struct rte_rawdev *rawdev,
1483 struct rte_qdma_vq_stats *vq_status)
1485 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1486 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1487 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1489 if (qdma_vq->in_use) {
1490 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
1491 vq_status->lcore_id = qdma_vq->lcore_id;
1492 vq_status->num_enqueues = qdma_vq->num_enqueues;
1493 vq_status->num_dequeues = qdma_vq->num_dequeues;
1494 vq_status->num_pending_jobs = vq_status->num_enqueues -
1495 vq_status->num_dequeues;
1500 dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,
1503 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1504 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1506 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1508 DPAA2_QDMA_FUNC_TRACE();
1510 /* In case there are pending jobs on any VQ, return -EBUSY */
1511 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1514 rte_spinlock_lock(&qdma_dev->lock);
1516 if (qdma_vq->exclusive_hw_queue)
1517 free_hw_queue(qdma_vq->hw_queue);
1519 if (qdma_vq->status_ring)
1520 rte_ring_free(qdma_vq->status_ring);
1522 put_hw_queue(qdma_vq->hw_queue);
1525 if (qdma_vq->fle_pool)
1526 rte_mempool_free(qdma_vq->fle_pool);
1528 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1530 rte_spinlock_unlock(&qdma_dev->lock);
1536 dpaa2_qdma_stop(struct rte_rawdev *rawdev)
1538 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1539 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1541 DPAA2_QDMA_FUNC_TRACE();
1543 qdma_dev->state = 0;
1547 dpaa2_qdma_close(struct rte_rawdev *rawdev)
1549 DPAA2_QDMA_FUNC_TRACE();
1551 dpaa2_qdma_reset(rawdev);
1556 static struct rte_rawdev_ops dpaa2_qdma_ops = {
1557 .dev_configure = dpaa2_qdma_configure,
1558 .dev_start = dpaa2_qdma_start,
1559 .dev_stop = dpaa2_qdma_stop,
1560 .dev_reset = dpaa2_qdma_reset,
1561 .dev_close = dpaa2_qdma_close,
1562 .queue_setup = dpaa2_qdma_queue_setup,
1563 .queue_release = dpaa2_qdma_queue_release,
1564 .attr_get = dpaa2_qdma_attr_get,
1565 .enqueue_bufs = dpaa2_qdma_enqueue,
1566 .dequeue_bufs = dpaa2_qdma_dequeue,
1570 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1572 struct qdma_hw_queue *queue;
1575 DPAA2_QDMA_FUNC_TRACE();
1577 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1578 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1581 "Memory allocation failed for QDMA queue");
1585 queue->dpdmai_dev = dpdmai_dev;
1586 queue->queue_id = i;
1588 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1589 dpdmai_dev->qdma_dev->num_hw_queues++;
1596 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1598 struct qdma_hw_queue *queue = NULL;
1599 struct qdma_hw_queue *tqueue = NULL;
1601 DPAA2_QDMA_FUNC_TRACE();
1603 RTE_TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1604 if (queue->dpdmai_dev == dpdmai_dev) {
1605 TAILQ_REMOVE(&qdma_queue_list, queue, next);
1613 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1615 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1618 DPAA2_QDMA_FUNC_TRACE();
1620 /* Remove HW queues from global list */
1621 remove_hw_queues_from_list(dpdmai_dev);
1623 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1626 DPAA2_QDMA_ERR("dmdmai disable failed");
1628 /* Set up the DQRR storage for Rx */
1629 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1630 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1632 if (rxq->q_storage) {
1633 dpaa2_free_dq_storage(rxq->q_storage);
1634 rte_free(rxq->q_storage);
1638 /* Close the device at underlying layer*/
1639 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1641 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1647 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1649 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1650 struct dpdmai_rx_queue_cfg rx_queue_cfg;
1651 struct dpdmai_attr attr;
1652 struct dpdmai_rx_queue_attr rx_attr;
1653 struct dpdmai_tx_queue_attr tx_attr;
1656 DPAA2_QDMA_FUNC_TRACE();
1658 /* Open DPDMAI device */
1659 dpdmai_dev->dpdmai_id = dpdmai_id;
1660 dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
1661 dpdmai_dev->qdma_dev = &q_dev;
1662 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1663 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1665 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1669 /* Get DPDMAI attributes */
1670 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1671 dpdmai_dev->token, &attr);
1673 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1677 dpdmai_dev->num_queues = attr.num_of_queues;
1679 /* Set up Rx Queues */
1680 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1681 struct dpaa2_queue *rxq;
1683 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1684 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1687 i, 0, &rx_queue_cfg);
1689 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1694 /* Allocate DQ storage for the DPDMAI Rx queues */
1695 rxq = &(dpdmai_dev->rx_queue[i]);
1696 rxq->q_storage = rte_malloc("dq_storage",
1697 sizeof(struct queue_storage_info_t),
1698 RTE_CACHE_LINE_SIZE);
1699 if (!rxq->q_storage) {
1700 DPAA2_QDMA_ERR("q_storage allocation failed");
1705 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1706 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1708 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1713 /* Get Rx and Tx queues FQID's */
1714 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1715 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1716 dpdmai_dev->token, i, 0, &rx_attr);
1718 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1722 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1724 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1725 dpdmai_dev->token, i, 0, &tx_attr);
1727 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1731 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1734 /* Enable the device */
1735 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1738 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1742 /* Add the HW queue to the global list */
1743 ret = add_hw_queues_to_list(dpdmai_dev);
1745 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1749 if (!dpaa2_coherent_no_alloc_cache) {
1750 if (dpaa2_svr_family == SVR_LX2160A) {
1751 dpaa2_coherent_no_alloc_cache =
1752 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1753 dpaa2_coherent_alloc_cache =
1754 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1756 dpaa2_coherent_no_alloc_cache =
1757 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1758 dpaa2_coherent_alloc_cache =
1759 DPAA2_COHERENT_ALLOCATE_CACHE;
1763 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1765 rte_spinlock_init(&dpdmai_dev->qdma_dev->lock);
1769 dpaa2_dpdmai_dev_uninit(rawdev);
1774 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1775 struct rte_dpaa2_device *dpaa2_dev)
1777 struct rte_rawdev *rawdev;
1780 DPAA2_QDMA_FUNC_TRACE();
1782 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1783 sizeof(struct dpaa2_dpdmai_dev),
1786 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1790 dpaa2_dev->rawdev = rawdev;
1791 rawdev->dev_ops = &dpaa2_qdma_ops;
1792 rawdev->device = &dpaa2_dev->device;
1793 rawdev->driver_name = dpaa2_drv->driver.name;
1795 /* Invoke PMD device initialization function */
1796 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1798 rte_rawdev_pmd_release(rawdev);
1802 /* Reset the QDMA device */
1803 ret = dpaa2_qdma_reset(rawdev);
1805 DPAA2_QDMA_ERR("Resetting QDMA failed");
1813 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1815 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1818 DPAA2_QDMA_FUNC_TRACE();
1820 dpaa2_dpdmai_dev_uninit(rawdev);
1822 ret = rte_rawdev_pmd_release(rawdev);
1824 DPAA2_QDMA_ERR("Device cleanup failed");
1829 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1830 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1831 .drv_type = DPAA2_QDMA,
1832 .probe = rte_dpaa2_qdma_probe,
1833 .remove = rte_dpaa2_qdma_remove,
1836 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1837 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1838 "no_prefetch=<int> ");
1839 RTE_LOG_REGISTER(dpaa2_qdma_logtype, pmd.raw.dpaa2.qdma, INFO);