1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype;
32 uint32_t dpaa2_coherent_no_alloc_cache;
33 uint32_t dpaa2_coherent_alloc_cache;
36 static struct qdma_device q_dev;
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
40 static struct qdma_hw_queue_list qdma_queue_list
41 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
43 /* QDMA per core data */
44 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
47 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
48 uint32_t len, struct qbman_fd *fd,
49 struct rte_qdma_rbp *rbp)
51 fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
52 fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
54 fd->simple_pci.len_sl = len;
56 fd->simple_pci.bmt = 1;
57 fd->simple_pci.fmt = 3;
58 fd->simple_pci.sl = 1;
59 fd->simple_pci.ser = 1;
61 fd->simple_pci.sportid = rbp->sportid; /*pcie 3 */
62 fd->simple_pci.srbp = rbp->srbp;
64 fd->simple_pci.rdttype = 0;
66 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
68 /*dest is pcie memory */
69 fd->simple_pci.dportid = rbp->dportid; /*pcie 3 */
70 fd->simple_pci.drbp = rbp->drbp;
72 fd->simple_pci.wrttype = 0;
74 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
76 fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
77 fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
83 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
84 uint32_t len, struct qbman_fd *fd)
86 fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
87 fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
89 fd->simple_ddr.len = len;
91 fd->simple_ddr.bmt = 1;
92 fd->simple_ddr.fmt = 3;
93 fd->simple_ddr.sl = 1;
94 fd->simple_ddr.ser = 1;
96 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
97 * Coherent copy of cacheable memory,
98 * lookup in downstream cache, no allocate
101 fd->simple_ddr.rns = 0;
102 fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
104 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
105 * Coherent write of cacheable memory,
106 * lookup in downstream cache, no allocate on miss
108 fd->simple_ddr.wns = 0;
109 fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
111 fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
112 fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
118 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
119 struct rte_qdma_rbp *rbp,
120 uint64_t src, uint64_t dest,
121 size_t len, uint32_t flags)
123 struct qdma_sdd *sdd;
125 sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
126 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
128 /* first frame list to source descriptor */
129 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
130 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
132 /* source and destination descriptor */
133 if (rbp && rbp->enable) {
135 sdd->read_cmd.portid = rbp->sportid;
136 sdd->rbpcmd_simple.pfid = rbp->spfid;
137 sdd->rbpcmd_simple.vfid = rbp->svfid;
140 sdd->read_cmd.rbp = rbp->srbp;
141 sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
143 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
147 sdd->write_cmd.portid = rbp->dportid;
148 sdd->rbpcmd_simple.pfid = rbp->dpfid;
149 sdd->rbpcmd_simple.vfid = rbp->dvfid;
152 sdd->write_cmd.rbp = rbp->drbp;
153 sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
155 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
159 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
161 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
164 /* source frame list to source buffer */
165 if (flags & RTE_QDMA_JOB_SRC_PHY) {
166 DPAA2_SET_FLE_ADDR(fle, src);
167 DPAA2_SET_FLE_BMT(fle);
169 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
171 DPAA2_SET_FLE_LEN(fle, len);
174 /* destination frame list to destination buffer */
175 if (flags & RTE_QDMA_JOB_DEST_PHY) {
176 DPAA2_SET_FLE_BMT(fle);
177 DPAA2_SET_FLE_ADDR(fle, dest);
179 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
181 DPAA2_SET_FLE_LEN(fle, len);
183 /* Final bit: 1, for last frame list */
184 DPAA2_SET_FLE_FIN(fle);
187 static inline int dpdmai_dev_set_fd_us(
188 struct qdma_virt_queue *qdma_vq,
190 struct rte_qdma_job *job)
192 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
193 struct rte_qdma_job **ppjob;
197 if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
198 iova = (size_t)job->dest;
200 iova = (size_t)job->src;
202 /* Set the metadata */
203 job->vq_id = qdma_vq->vq_id;
204 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
207 if ((rbp->drbp == 1) || (rbp->srbp == 1))
208 ret = qdma_populate_fd_pci((phys_addr_t) job->src,
209 (phys_addr_t) job->dest,
212 ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
213 (phys_addr_t) job->dest,
217 static inline int dpdmai_dev_set_fd_lf(
218 struct qdma_virt_queue *qdma_vq,
220 struct rte_qdma_job *job)
222 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
223 struct rte_qdma_job **ppjob;
225 struct qbman_fle *fle;
226 uint64_t elem_iova, fle_iova;
228 struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);
231 * Get an FLE/SDD from FLE pool.
232 * Note: IO metadata is before the FLE and SDD memory.
234 ret = rte_mempool_get(qdma_dev->fle_pool, (void **)(&elem));
236 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
240 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
241 elem_iova = rte_mempool_virt2iova(elem);
243 elem_iova = DPAA2_VADDR_TO_IOVA(elem);
246 /* Set the metadata */
247 job->vq_id = qdma_vq->vq_id;
248 ppjob = (struct rte_qdma_job **)
249 ((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_OFFSET);
252 fle = (struct qbman_fle *)
253 ((uintptr_t)(uint64_t)elem + QDMA_FLE_FLE_OFFSET);
254 fle_iova = elem_iova + QDMA_FLE_FLE_OFFSET;
256 DPAA2_SET_FD_ADDR(fd, fle_iova);
257 DPAA2_SET_FD_COMPOUND_FMT(fd);
258 DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
261 memset(fle, 0, QDMA_FLE_POOL_SIZE);
262 dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
263 job->len, job->flags);
268 static inline uint16_t dpdmai_dev_get_job_us(
269 struct qdma_virt_queue *qdma_vq __rte_unused,
270 const struct qbman_fd *fd,
271 struct rte_qdma_job **job)
275 struct rte_qdma_job **ppjob;
277 if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
278 iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
279 | (uint64_t)fd->simple_pci.daddr_lo);
281 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
282 | (uint64_t)fd->simple_pci.saddr_lo);
284 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
285 *job = (struct rte_qdma_job *)*ppjob;
286 (*job)->status = (fd->simple_pci.acc_err << 8) |
287 (fd->simple_pci.error);
288 vqid = (*job)->vq_id;
293 static inline uint16_t dpdmai_dev_get_job_lf(
294 struct qdma_virt_queue *qdma_vq,
295 const struct qbman_fd *fd,
296 struct rte_qdma_job **job)
299 struct qbman_fle *fle;
300 struct rte_qdma_job **ppjob;
302 struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);
305 * Fetch metadata from FLE. job and vq_id were set
306 * in metadata in the enqueue operation.
308 fle = (struct qbman_fle *)
309 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
310 elem = (void *)((uintptr_t)(uint64_t)fle - QDMA_FLE_FLE_OFFSET);
312 ppjob = (struct rte_qdma_job **)
313 ((uintptr_t)(uint64_t)elem + QDMA_FLE_JOB_OFFSET);
315 *job = (struct rte_qdma_job *)*ppjob;
316 (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
317 (DPAA2_GET_FD_FRC(fd) & 0xFF);
318 vqid = (*job)->vq_id;
320 /* Free FLE to the pool */
321 rte_mempool_put(qdma_dev->fle_pool, elem);
326 /* Function to receive a QDMA job for a given device and queue*/
328 dpdmai_dev_dequeue_multijob_prefetch(
329 struct qdma_virt_queue *qdma_vq,
331 struct rte_qdma_job **job,
334 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
335 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
336 uint16_t rxq_id = qdma_pq->queue_id;
338 struct dpaa2_queue *rxq;
339 struct qbman_result *dq_storage, *dq_storage1 = NULL;
340 struct qbman_pull_desc pulldesc;
341 struct qbman_swp *swp;
342 struct queue_storage_info_t *q_storage;
344 uint8_t status, pending;
346 const struct qbman_fd *fd;
350 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
351 ret = dpaa2_affine_qbman_swp();
354 "Failed to allocate IO portal, tid: %d\n",
359 swp = DPAA2_PER_LCORE_PORTAL;
361 pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
362 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
364 q_storage = rxq->q_storage;
366 if (unlikely(!q_storage->active_dqs)) {
367 q_storage->toggle = 0;
368 dq_storage = q_storage->dq_storage[q_storage->toggle];
369 q_storage->last_num_pkts = pull_size;
370 qbman_pull_desc_clear(&pulldesc);
371 qbman_pull_desc_set_numframes(&pulldesc,
372 q_storage->last_num_pkts);
373 qbman_pull_desc_set_fq(&pulldesc, fqid);
374 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
375 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
376 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
377 while (!qbman_check_command_complete(
379 DPAA2_PER_LCORE_DPIO->index)))
381 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
384 if (qbman_swp_pull(swp, &pulldesc)) {
386 "VDQ command not issued.QBMAN busy\n");
387 /* Portal was busy, try again */
392 q_storage->active_dqs = dq_storage;
393 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
394 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
398 dq_storage = q_storage->active_dqs;
399 rte_prefetch0((void *)(size_t)(dq_storage));
400 rte_prefetch0((void *)(size_t)(dq_storage + 1));
402 /* Prepare next pull descriptor. This will give space for the
403 * prefething done on DQRR entries
405 q_storage->toggle ^= 1;
406 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
407 qbman_pull_desc_clear(&pulldesc);
408 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
409 qbman_pull_desc_set_fq(&pulldesc, fqid);
410 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
411 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
413 /* Check if the previous issued command is completed.
414 * Also seems like the SWP is shared between the Ethernet Driver
415 * and the SEC driver.
417 while (!qbman_check_command_complete(dq_storage))
419 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
420 clear_swp_active_dqs(q_storage->active_dpio_id);
425 /* Loop until the dq_storage is updated with
428 while (!qbman_check_new_result(dq_storage))
430 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
431 /* Check whether Last Pull command is Expired and
432 * setting Condition for Loop termination
434 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
436 /* Check for valid frame. */
437 status = qbman_result_DQ_flags(dq_storage);
438 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
441 fd = qbman_result_DQ_fd(dq_storage);
443 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);
445 vq_id[num_rx] = vqid;
451 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
452 while (!qbman_check_command_complete(
453 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
455 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
457 /* issue a volatile dequeue command for next pull */
459 if (qbman_swp_pull(swp, &pulldesc)) {
461 "VDQ command is not issued. QBMAN is busy (2)\n");
467 q_storage->active_dqs = dq_storage1;
468 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
469 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
475 dpdmai_dev_dequeue_multijob_no_prefetch(
476 struct qdma_virt_queue *qdma_vq,
478 struct rte_qdma_job **job,
481 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
482 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
483 uint16_t rxq_id = qdma_pq->queue_id;
485 struct dpaa2_queue *rxq;
486 struct qbman_result *dq_storage;
487 struct qbman_pull_desc pulldesc;
488 struct qbman_swp *swp;
490 uint8_t status, pending;
492 const struct qbman_fd *fd;
494 int ret, next_pull = nb_jobs, num_pulled = 0;
496 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
497 ret = dpaa2_affine_qbman_swp();
500 "Failed to allocate IO portal, tid: %d\n",
505 swp = DPAA2_PER_LCORE_PORTAL;
507 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
511 dq_storage = rxq->q_storage->dq_storage[0];
512 /* Prepare dequeue descriptor */
513 qbman_pull_desc_clear(&pulldesc);
514 qbman_pull_desc_set_fq(&pulldesc, fqid);
515 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
516 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
518 if (next_pull > dpaa2_dqrr_size) {
519 qbman_pull_desc_set_numframes(&pulldesc,
521 next_pull -= dpaa2_dqrr_size;
523 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
528 if (qbman_swp_pull(swp, &pulldesc)) {
530 "VDQ command not issued. QBMAN busy");
531 /* Portal was busy, try again */
537 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
538 /* Check if the previous issued command is completed. */
539 while (!qbman_check_command_complete(dq_storage))
546 /* Loop until dq_storage is updated
547 * with new token by QBMAN
549 while (!qbman_check_new_result(dq_storage))
551 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
553 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
555 /* Check for valid frame. */
556 status = qbman_result_DQ_flags(dq_storage);
557 if (unlikely((status &
558 QBMAN_DQ_STAT_VALIDFRAME) == 0))
561 fd = qbman_result_DQ_fd(dq_storage);
563 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);
565 vq_id[num_rx] = vqid;
572 /* Last VDQ provided all packets and more packets are requested */
573 } while (next_pull && num_pulled == dpaa2_dqrr_size);
579 dpdmai_dev_enqueue_multi(
580 struct qdma_virt_queue *qdma_vq,
581 struct rte_qdma_job **job,
584 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
585 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
586 uint16_t txq_id = qdma_pq->queue_id;
588 struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
589 struct dpaa2_queue *txq;
590 struct qbman_eq_desc eqdesc;
591 struct qbman_swp *swp;
593 uint32_t num_to_send = 0;
596 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
597 ret = dpaa2_affine_qbman_swp();
600 "Failed to allocate IO portal, tid: %d\n",
605 swp = DPAA2_PER_LCORE_PORTAL;
607 txq = &(dpdmai_dev->tx_queue[txq_id]);
609 /* Prepare enqueue descriptor */
610 qbman_eq_desc_clear(&eqdesc);
611 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
612 qbman_eq_desc_set_no_orp(&eqdesc, 0);
613 qbman_eq_desc_set_response(&eqdesc, 0, 0);
615 memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
617 while (nb_jobs > 0) {
620 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
621 dpaa2_eqcr_size : nb_jobs;
623 for (loop = 0; loop < num_to_send; loop++) {
624 ret = qdma_vq->set_fd(qdma_vq, &fd[loop], job[num_tx]);
626 /* Set nb_jobs to loop, so outer while loop
636 /* Enqueue the packet to the QBMAN */
637 uint32_t enqueue_loop = 0, retry_count = 0;
639 while (enqueue_loop < loop) {
640 ret = qbman_swp_enqueue_multiple(swp,
644 loop - enqueue_loop);
645 if (unlikely(ret < 0)) {
647 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
648 return num_tx - (loop - enqueue_loop);
659 static struct qdma_hw_queue *
660 alloc_hw_queue(uint32_t lcore_id)
662 struct qdma_hw_queue *queue = NULL;
664 DPAA2_QDMA_FUNC_TRACE();
666 /* Get a free queue from the list */
667 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
668 if (queue->num_users == 0) {
669 queue->lcore_id = lcore_id;
679 free_hw_queue(struct qdma_hw_queue *queue)
681 DPAA2_QDMA_FUNC_TRACE();
687 static struct qdma_hw_queue *
688 get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)
690 struct qdma_per_core_info *core_info;
691 struct qdma_hw_queue *queue, *temp;
692 uint32_t least_num_users;
693 int num_hw_queues, i;
695 DPAA2_QDMA_FUNC_TRACE();
697 core_info = &qdma_core_info[lcore_id];
698 num_hw_queues = core_info->num_hw_queues;
701 * Allocate a HW queue if there are less queues
702 * than maximum per core queues configured
704 if (num_hw_queues < qdma_dev->max_hw_queues_per_core) {
705 queue = alloc_hw_queue(lcore_id);
707 core_info->hw_queues[num_hw_queues] = queue;
708 core_info->num_hw_queues++;
713 queue = core_info->hw_queues[0];
714 /* In case there is no queue associated with the core return NULL */
718 /* Fetch the least loaded H/W queue */
719 least_num_users = core_info->hw_queues[0]->num_users;
720 for (i = 0; i < num_hw_queues; i++) {
721 temp = core_info->hw_queues[i];
722 if (temp->num_users < least_num_users)
733 put_hw_queue(struct qdma_hw_queue *queue)
735 struct qdma_per_core_info *core_info;
736 int lcore_id, num_hw_queues, i;
738 DPAA2_QDMA_FUNC_TRACE();
741 * If this is the last user of the queue free it.
742 * Also remove it from QDMA core info.
744 if (queue->num_users == 1) {
745 free_hw_queue(queue);
747 /* Remove the physical queue from core info */
748 lcore_id = queue->lcore_id;
749 core_info = &qdma_core_info[lcore_id];
750 num_hw_queues = core_info->num_hw_queues;
751 for (i = 0; i < num_hw_queues; i++) {
752 if (queue == core_info->hw_queues[i])
755 for (; i < num_hw_queues - 1; i++)
756 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
757 core_info->hw_queues[i] = NULL;
764 dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,
765 __rte_unused const char *attr_name,
766 uint64_t *attr_value)
768 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
769 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
770 struct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;
772 DPAA2_QDMA_FUNC_TRACE();
774 qdma_attr->num_hw_queues = qdma_dev->num_hw_queues;
780 dpaa2_qdma_reset(struct rte_rawdev *rawdev)
782 struct qdma_hw_queue *queue;
783 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
784 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
787 DPAA2_QDMA_FUNC_TRACE();
789 /* In case QDMA device is not in stopped state, return -EBUSY */
790 if (qdma_dev->state == 1) {
792 "Device is in running state. Stop before reset.");
796 /* In case there are pending jobs on any VQ, return -EBUSY */
797 for (i = 0; i < qdma_dev->max_vqs; i++) {
798 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
799 qdma_dev->vqs[i].num_dequeues)) {
800 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
805 /* Reset HW queues */
806 TAILQ_FOREACH(queue, &qdma_queue_list, next)
807 queue->num_users = 0;
809 /* Reset and free virtual queues */
810 for (i = 0; i < qdma_dev->max_vqs; i++) {
811 if (qdma_dev->vqs[i].status_ring)
812 rte_ring_free(qdma_dev->vqs[i].status_ring);
815 rte_free(qdma_dev->vqs);
816 qdma_dev->vqs = NULL;
818 /* Reset per core info */
819 memset(&qdma_core_info, 0,
820 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
822 /* Free the FLE pool */
823 if (qdma_dev->fle_pool)
824 rte_mempool_free(qdma_dev->fle_pool);
826 /* Reset QDMA device structure */
827 qdma_dev->max_hw_queues_per_core = 0;
828 qdma_dev->fle_pool = NULL;
829 qdma_dev->fle_pool_count = 0;
830 qdma_dev->max_vqs = 0;
836 dpaa2_qdma_configure(const struct rte_rawdev *rawdev,
837 rte_rawdev_obj_t config,
840 char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
841 struct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;
842 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
843 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
845 DPAA2_QDMA_FUNC_TRACE();
847 if (config_size != sizeof(*qdma_config))
850 /* In case QDMA device is not in stopped state, return -EBUSY */
851 if (qdma_dev->state == 1) {
853 "Device is in running state. Stop before config.");
857 /* Set max HW queue per core */
858 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
859 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
860 MAX_HW_QUEUE_PER_CORE);
863 qdma_dev->max_hw_queues_per_core =
864 qdma_config->max_hw_queues_per_core;
866 /* Allocate Virtual Queues */
867 sprintf(name, "qdma_%d_vq", rawdev->dev_id);
868 qdma_dev->vqs = rte_malloc(name,
869 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
870 RTE_CACHE_LINE_SIZE);
871 if (!qdma_dev->vqs) {
872 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
875 qdma_dev->max_vqs = qdma_config->max_vqs;
877 /* Allocate FLE pool; just append PID so that in case of
878 * multiprocess, the pool's don't collide.
880 snprintf(name, sizeof(name), "qdma_fle_pool%u",
882 qdma_dev->fle_pool = rte_mempool_create(name,
883 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
884 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
885 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
886 if (!qdma_dev->fle_pool) {
887 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
888 rte_free(qdma_dev->vqs);
889 qdma_dev->vqs = NULL;
892 qdma_dev->fle_pool_count = qdma_config->fle_pool_count;
898 dpaa2_qdma_start(struct rte_rawdev *rawdev)
900 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
901 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
903 DPAA2_QDMA_FUNC_TRACE();
911 check_devargs_handler(__rte_unused const char *key, const char *value,
912 __rte_unused void *opaque)
914 if (strcmp(value, "1"))
921 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
923 struct rte_kvargs *kvlist;
928 kvlist = rte_kvargs_parse(devargs->args, NULL);
932 if (!rte_kvargs_count(kvlist, key)) {
933 rte_kvargs_free(kvlist);
937 if (rte_kvargs_process(kvlist, key,
938 check_devargs_handler, NULL) < 0) {
939 rte_kvargs_free(kvlist);
942 rte_kvargs_free(kvlist);
948 dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,
949 __rte_unused uint16_t queue_id,
950 rte_rawdev_obj_t queue_conf,
955 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
956 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
957 struct rte_qdma_queue_config *q_config =
958 (struct rte_qdma_queue_config *)queue_conf;
960 DPAA2_QDMA_FUNC_TRACE();
962 if (conf_size != sizeof(*q_config))
965 rte_spinlock_lock(&qdma_dev->lock);
967 /* Get a free Virtual Queue */
968 for (i = 0; i < qdma_dev->max_vqs; i++) {
969 if (qdma_dev->vqs[i].in_use == 0)
973 /* Return in case no VQ is free */
974 if (i == qdma_dev->max_vqs) {
975 rte_spinlock_unlock(&qdma_dev->lock);
976 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
980 if (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) {
981 /* Allocate HW queue for a VQ */
982 qdma_dev->vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);
983 qdma_dev->vqs[i].exclusive_hw_queue = 1;
985 /* Allocate a Ring for Virtual Queue in VQ mode */
986 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
987 qdma_dev->vqs[i].status_ring = rte_ring_create(ring_name,
988 qdma_dev->fle_pool_count, rte_socket_id(), 0);
989 if (!qdma_dev->vqs[i].status_ring) {
990 DPAA2_QDMA_ERR("Status ring creation failed for vq");
991 rte_spinlock_unlock(&qdma_dev->lock);
995 /* Get a HW queue (shared) for a VQ */
996 qdma_dev->vqs[i].hw_queue = get_hw_queue(qdma_dev,
998 qdma_dev->vqs[i].exclusive_hw_queue = 0;
1001 if (qdma_dev->vqs[i].hw_queue == NULL) {
1002 DPAA2_QDMA_ERR("No H/W queue available for VQ");
1003 if (qdma_dev->vqs[i].status_ring)
1004 rte_ring_free(qdma_dev->vqs[i].status_ring);
1005 qdma_dev->vqs[i].status_ring = NULL;
1006 rte_spinlock_unlock(&qdma_dev->lock);
1010 qdma_dev->vqs[i].in_use = 1;
1011 qdma_dev->vqs[i].lcore_id = q_config->lcore_id;
1012 memset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
1014 if (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) {
1015 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_lf;
1016 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_lf;
1018 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us;
1019 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_us;
1021 if (dpaa2_get_devargs(rawdev->device->devargs,
1022 DPAA2_QDMA_NO_PREFETCH) ||
1023 (getenv("DPAA2_NO_QDMA_PREFETCH_RX"))) {
1024 /* If no prefetch is configured. */
1025 qdma_dev->vqs[i].dequeue_job =
1026 dpdmai_dev_dequeue_multijob_no_prefetch;
1027 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1029 qdma_dev->vqs[i].dequeue_job =
1030 dpdmai_dev_dequeue_multijob_prefetch;
1033 qdma_dev->vqs[i].enqueue_job = dpdmai_dev_enqueue_multi;
1035 if (q_config->rbp != NULL)
1036 memcpy(&qdma_dev->vqs[i].rbp, q_config->rbp,
1037 sizeof(struct rte_qdma_rbp));
1039 rte_spinlock_unlock(&qdma_dev->lock);
1045 dpaa2_qdma_enqueue(struct rte_rawdev *rawdev,
1046 __rte_unused struct rte_rawdev_buf **buffers,
1047 unsigned int nb_jobs,
1048 rte_rawdev_obj_t context)
1050 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1051 struct rte_qdma_enqdeq *e_context =
1052 (struct rte_qdma_enqdeq *)context;
1053 struct qdma_virt_queue *qdma_vq =
1054 &dpdmai_dev->qdma_dev->vqs[e_context->vq_id];
1057 /* Return error in case of wrong lcore_id */
1058 if (rte_lcore_id() != qdma_vq->lcore_id) {
1059 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
1064 ret = qdma_vq->enqueue_job(qdma_vq, e_context->job, nb_jobs);
1066 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
1070 qdma_vq->num_enqueues += ret;
1076 dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,
1077 __rte_unused struct rte_rawdev_buf **buffers,
1078 unsigned int nb_jobs,
1079 rte_rawdev_obj_t cntxt)
1081 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1082 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1083 struct rte_qdma_enqdeq *context =
1084 (struct rte_qdma_enqdeq *)cntxt;
1085 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[context->vq_id];
1086 struct qdma_virt_queue *temp_qdma_vq;
1088 unsigned int ring_count;
1090 /* Return error in case of wrong lcore_id */
1091 if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
1092 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
1097 /* Only dequeue when there are pending jobs on VQ */
1098 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1101 if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1102 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
1104 if (qdma_vq->exclusive_hw_queue) {
1105 /* In case of exclusive queue directly fetch from HW queue */
1106 ret = qdma_vq->dequeue_job(qdma_vq, NULL,
1107 context->job, nb_jobs);
1110 "Dequeue from DPDMAI device failed: %d", ret);
1113 qdma_vq->num_dequeues += ret;
1115 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
1117 * Get the QDMA completed jobs from the software ring.
1118 * In case they are not available on the ring poke the HW
1119 * to fetch completed jobs from corresponding HW queues
1121 ring_count = rte_ring_count(qdma_vq->status_ring);
1122 if (ring_count < nb_jobs) {
1123 /* TODO - How to have right budget */
1124 ret = qdma_vq->dequeue_job(qdma_vq,
1125 temp_vq_id, context->job, nb_jobs);
1126 for (i = 0; i < ret; i++) {
1127 temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
1128 rte_ring_enqueue(temp_qdma_vq->status_ring,
1129 (void *)(context->job[i]));
1131 ring_count = rte_ring_count(
1132 qdma_vq->status_ring);
1136 /* Dequeue job from the software ring
1137 * to provide to the user
1139 ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1140 (void **)context->job,
1143 qdma_vq->num_dequeues += ret;
1151 rte_qdma_vq_stats(struct rte_rawdev *rawdev,
1153 struct rte_qdma_vq_stats *vq_status)
1155 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1156 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1157 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1159 if (qdma_vq->in_use) {
1160 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
1161 vq_status->lcore_id = qdma_vq->lcore_id;
1162 vq_status->num_enqueues = qdma_vq->num_enqueues;
1163 vq_status->num_dequeues = qdma_vq->num_dequeues;
1164 vq_status->num_pending_jobs = vq_status->num_enqueues -
1165 vq_status->num_dequeues;
1170 dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,
1173 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1174 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1176 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1178 DPAA2_QDMA_FUNC_TRACE();
1180 /* In case there are pending jobs on any VQ, return -EBUSY */
1181 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1184 rte_spinlock_lock(&qdma_dev->lock);
1186 if (qdma_vq->exclusive_hw_queue)
1187 free_hw_queue(qdma_vq->hw_queue);
1189 if (qdma_vq->status_ring)
1190 rte_ring_free(qdma_vq->status_ring);
1192 put_hw_queue(qdma_vq->hw_queue);
1195 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1197 rte_spinlock_unlock(&qdma_dev->lock);
1203 dpaa2_qdma_stop(struct rte_rawdev *rawdev)
1205 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1206 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1208 DPAA2_QDMA_FUNC_TRACE();
1210 qdma_dev->state = 0;
1214 dpaa2_qdma_close(struct rte_rawdev *rawdev)
1216 DPAA2_QDMA_FUNC_TRACE();
1218 dpaa2_qdma_reset(rawdev);
1223 static struct rte_rawdev_ops dpaa2_qdma_ops = {
1224 .dev_configure = dpaa2_qdma_configure,
1225 .dev_start = dpaa2_qdma_start,
1226 .dev_stop = dpaa2_qdma_stop,
1227 .dev_reset = dpaa2_qdma_reset,
1228 .dev_close = dpaa2_qdma_close,
1229 .queue_setup = dpaa2_qdma_queue_setup,
1230 .queue_release = dpaa2_qdma_queue_release,
1231 .attr_get = dpaa2_qdma_attr_get,
1232 .enqueue_bufs = dpaa2_qdma_enqueue,
1233 .dequeue_bufs = dpaa2_qdma_dequeue,
1237 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1239 struct qdma_hw_queue *queue;
1242 DPAA2_QDMA_FUNC_TRACE();
1244 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1245 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1248 "Memory allocation failed for QDMA queue");
1252 queue->dpdmai_dev = dpdmai_dev;
1253 queue->queue_id = i;
1255 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1256 dpdmai_dev->qdma_dev->num_hw_queues++;
1263 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1265 struct qdma_hw_queue *queue = NULL;
1266 struct qdma_hw_queue *tqueue = NULL;
1268 DPAA2_QDMA_FUNC_TRACE();
1270 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1271 if (queue->dpdmai_dev == dpdmai_dev) {
1272 TAILQ_REMOVE(&qdma_queue_list, queue, next);
1280 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1282 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1285 DPAA2_QDMA_FUNC_TRACE();
1287 /* Remove HW queues from global list */
1288 remove_hw_queues_from_list(dpdmai_dev);
1290 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1293 DPAA2_QDMA_ERR("dmdmai disable failed");
1295 /* Set up the DQRR storage for Rx */
1296 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1297 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1299 if (rxq->q_storage) {
1300 dpaa2_free_dq_storage(rxq->q_storage);
1301 rte_free(rxq->q_storage);
1305 /* Close the device at underlying layer*/
1306 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1308 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1314 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1316 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1317 struct dpdmai_rx_queue_cfg rx_queue_cfg;
1318 struct dpdmai_attr attr;
1319 struct dpdmai_rx_queue_attr rx_attr;
1320 struct dpdmai_tx_queue_attr tx_attr;
1323 DPAA2_QDMA_FUNC_TRACE();
1325 /* Open DPDMAI device */
1326 dpdmai_dev->dpdmai_id = dpdmai_id;
1327 dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
1328 dpdmai_dev->qdma_dev = &q_dev;
1329 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1330 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1332 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1336 /* Get DPDMAI attributes */
1337 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1338 dpdmai_dev->token, &attr);
1340 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1344 dpdmai_dev->num_queues = attr.num_of_queues;
1346 /* Set up Rx Queues */
1347 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1348 struct dpaa2_queue *rxq;
1350 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1351 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1354 i, 0, &rx_queue_cfg);
1356 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1361 /* Allocate DQ storage for the DPDMAI Rx queues */
1362 rxq = &(dpdmai_dev->rx_queue[i]);
1363 rxq->q_storage = rte_malloc("dq_storage",
1364 sizeof(struct queue_storage_info_t),
1365 RTE_CACHE_LINE_SIZE);
1366 if (!rxq->q_storage) {
1367 DPAA2_QDMA_ERR("q_storage allocation failed");
1372 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1373 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1375 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1380 /* Get Rx and Tx queues FQID's */
1381 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1382 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1383 dpdmai_dev->token, i, 0, &rx_attr);
1385 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1389 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1391 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1392 dpdmai_dev->token, i, 0, &tx_attr);
1394 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1398 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1401 /* Enable the device */
1402 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1405 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1409 /* Add the HW queue to the global list */
1410 ret = add_hw_queues_to_list(dpdmai_dev);
1412 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1416 if (!dpaa2_coherent_no_alloc_cache) {
1417 if (dpaa2_svr_family == SVR_LX2160A) {
1418 dpaa2_coherent_no_alloc_cache =
1419 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1420 dpaa2_coherent_alloc_cache =
1421 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1423 dpaa2_coherent_no_alloc_cache =
1424 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1425 dpaa2_coherent_alloc_cache =
1426 DPAA2_COHERENT_ALLOCATE_CACHE;
1430 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1432 rte_spinlock_init(&dpdmai_dev->qdma_dev->lock);
1436 dpaa2_dpdmai_dev_uninit(rawdev);
1441 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1442 struct rte_dpaa2_device *dpaa2_dev)
1444 struct rte_rawdev *rawdev;
1447 DPAA2_QDMA_FUNC_TRACE();
1449 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1450 sizeof(struct dpaa2_dpdmai_dev),
1453 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1457 dpaa2_dev->rawdev = rawdev;
1458 rawdev->dev_ops = &dpaa2_qdma_ops;
1459 rawdev->device = &dpaa2_dev->device;
1460 rawdev->driver_name = dpaa2_drv->driver.name;
1462 /* Invoke PMD device initialization function */
1463 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1465 rte_rawdev_pmd_release(rawdev);
1469 /* Reset the QDMA device */
1470 ret = dpaa2_qdma_reset(rawdev);
1472 DPAA2_QDMA_ERR("Resetting QDMA failed");
1480 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1482 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1485 DPAA2_QDMA_FUNC_TRACE();
1487 dpaa2_dpdmai_dev_uninit(rawdev);
1489 ret = rte_rawdev_pmd_release(rawdev);
1491 DPAA2_QDMA_ERR("Device cleanup failed");
1496 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1497 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1498 .drv_type = DPAA2_QDMA,
1499 .probe = rte_dpaa2_qdma_probe,
1500 .remove = rte_dpaa2_qdma_remove,
1503 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1504 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1505 "no_prefetch=<int> ");
1506 RTE_LOG_REGISTER(dpaa2_qdma_logtype, pmd.raw.dpaa2.qdma, INFO);