1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
29 /* Dynamic log type identifier */
30 int dpaa2_qdma_logtype;
32 uint32_t dpaa2_coherent_no_alloc_cache;
33 uint32_t dpaa2_coherent_alloc_cache;
36 static struct qdma_device q_dev;
38 /* QDMA H/W queues list */
39 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
40 static struct qdma_hw_queue_list qdma_queue_list
41 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
43 /* QDMA per core data */
44 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
47 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
48 uint32_t len, struct qbman_fd *fd,
49 struct rte_qdma_rbp *rbp)
51 fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
52 fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
54 fd->simple_pci.len_sl = len;
56 fd->simple_pci.bmt = 1;
57 fd->simple_pci.fmt = 3;
58 fd->simple_pci.sl = 1;
59 fd->simple_pci.ser = 1;
61 fd->simple_pci.sportid = rbp->sportid; /*pcie 3 */
62 fd->simple_pci.srbp = rbp->srbp;
64 fd->simple_pci.rdttype = 0;
66 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
68 /*dest is pcie memory */
69 fd->simple_pci.dportid = rbp->dportid; /*pcie 3 */
70 fd->simple_pci.drbp = rbp->drbp;
72 fd->simple_pci.wrttype = 0;
74 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
76 fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
77 fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
83 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
84 uint32_t len, struct qbman_fd *fd)
86 fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
87 fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
89 fd->simple_ddr.len = len;
91 fd->simple_ddr.bmt = 1;
92 fd->simple_ddr.fmt = 3;
93 fd->simple_ddr.sl = 1;
94 fd->simple_ddr.ser = 1;
96 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
97 * Coherent copy of cacheable memory,
98 * lookup in downstream cache, no allocate
101 fd->simple_ddr.rns = 0;
102 fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
104 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
105 * Coherent write of cacheable memory,
106 * lookup in downstream cache, no allocate on miss
108 fd->simple_ddr.wns = 0;
109 fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
111 fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
112 fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
118 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
119 struct rte_qdma_rbp *rbp,
120 uint64_t src, uint64_t dest,
121 size_t len, uint32_t flags)
123 struct qdma_sdd *sdd;
125 sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
126 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
128 /* first frame list to source descriptor */
129 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
130 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
132 /* source and destination descriptor */
133 if (rbp && rbp->enable) {
135 sdd->read_cmd.portid = rbp->sportid;
136 sdd->rbpcmd_simple.pfid = rbp->spfid;
137 sdd->rbpcmd_simple.vfid = rbp->svfid;
140 sdd->read_cmd.rbp = rbp->srbp;
141 sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
143 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
147 sdd->write_cmd.portid = rbp->dportid;
148 sdd->rbpcmd_simple.pfid = rbp->dpfid;
149 sdd->rbpcmd_simple.vfid = rbp->dvfid;
152 sdd->write_cmd.rbp = rbp->drbp;
153 sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
155 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
159 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
161 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
164 /* source frame list to source buffer */
165 if (flags & RTE_QDMA_JOB_SRC_PHY) {
166 DPAA2_SET_FLE_ADDR(fle, src);
167 DPAA2_SET_FLE_BMT(fle);
169 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
171 DPAA2_SET_FLE_LEN(fle, len);
174 /* destination frame list to destination buffer */
175 if (flags & RTE_QDMA_JOB_DEST_PHY) {
176 DPAA2_SET_FLE_BMT(fle);
177 DPAA2_SET_FLE_ADDR(fle, dest);
179 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
181 DPAA2_SET_FLE_LEN(fle, len);
183 /* Final bit: 1, for last frame list */
184 DPAA2_SET_FLE_FIN(fle);
187 static inline int dpdmai_dev_set_fd_us(
188 struct qdma_virt_queue *qdma_vq,
190 struct rte_qdma_job *job)
192 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
193 struct rte_qdma_job **ppjob;
197 if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
198 iova = (size_t)job->dest;
200 iova = (size_t)job->src;
202 /* Set the metadata */
203 job->vq_id = qdma_vq->vq_id;
204 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
207 if ((rbp->drbp == 1) || (rbp->srbp == 1))
208 ret = qdma_populate_fd_pci((phys_addr_t) job->src,
209 (phys_addr_t) job->dest,
212 ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
213 (phys_addr_t) job->dest,
217 static inline int dpdmai_dev_set_fd_lf(
218 struct qdma_virt_queue *qdma_vq,
220 struct rte_qdma_job *job)
222 struct rte_qdma_rbp *rbp = &qdma_vq->rbp;
223 struct rte_qdma_job **ppjob;
224 struct qbman_fle *fle;
226 struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);
229 * Get an FLE/SDD from FLE pool.
230 * Note: IO metadata is before the FLE and SDD memory.
232 ret = rte_mempool_get(qdma_dev->fle_pool, (void **)(&ppjob));
234 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
238 /* Set the metadata */
239 job->vq_id = qdma_vq->vq_id;
242 fle = (struct qbman_fle *)(ppjob + 1);
244 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
245 DPAA2_SET_FD_COMPOUND_FMT(fd);
246 DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
249 memset(fle, 0, QDMA_FLE_POOL_SIZE);
250 dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
251 job->len, job->flags);
256 static inline uint16_t dpdmai_dev_get_job_us(
257 struct qdma_virt_queue *qdma_vq __rte_unused,
258 const struct qbman_fd *fd,
259 struct rte_qdma_job **job)
263 struct rte_qdma_job **ppjob;
265 if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
266 iova = (size_t)(((uint64_t)fd->simple_pci.daddr_hi) << 32
267 | (uint64_t)fd->simple_pci.daddr_lo);
269 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
270 | (uint64_t)fd->simple_pci.saddr_lo);
272 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
273 *job = (struct rte_qdma_job *)*ppjob;
274 (*job)->status = (fd->simple_pci.acc_err << 8) |
275 (fd->simple_pci.error);
276 vqid = (*job)->vq_id;
281 static inline uint16_t dpdmai_dev_get_job_lf(
282 struct qdma_virt_queue *qdma_vq,
283 const struct qbman_fd *fd,
284 struct rte_qdma_job **job)
286 struct rte_qdma_job **ppjob;
288 struct qdma_device *qdma_dev = QDMA_DEV_OF_VQ(qdma_vq);
291 * Fetch metadata from FLE. job and vq_id were set
292 * in metadata in the enqueue operation.
294 ppjob = (struct rte_qdma_job **)
295 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
298 *job = (struct rte_qdma_job *)*ppjob;
299 (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
300 (DPAA2_GET_FD_FRC(fd) & 0xFF);
301 vqid = (*job)->vq_id;
303 /* Free FLE to the pool */
304 rte_mempool_put(qdma_dev->fle_pool, (void *)ppjob);
309 /* Function to receive a QDMA job for a given device and queue*/
311 dpdmai_dev_dequeue_multijob_prefetch(
312 struct qdma_virt_queue *qdma_vq,
314 struct rte_qdma_job **job,
317 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
318 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
319 uint16_t rxq_id = qdma_pq->queue_id;
321 struct dpaa2_queue *rxq;
322 struct qbman_result *dq_storage, *dq_storage1 = NULL;
323 struct qbman_pull_desc pulldesc;
324 struct qbman_swp *swp;
325 struct queue_storage_info_t *q_storage;
327 uint8_t status, pending;
329 const struct qbman_fd *fd;
333 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
334 ret = dpaa2_affine_qbman_swp();
337 "Failed to allocate IO portal, tid: %d\n",
342 swp = DPAA2_PER_LCORE_PORTAL;
344 pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
345 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
347 q_storage = rxq->q_storage;
349 if (unlikely(!q_storage->active_dqs)) {
350 q_storage->toggle = 0;
351 dq_storage = q_storage->dq_storage[q_storage->toggle];
352 q_storage->last_num_pkts = pull_size;
353 qbman_pull_desc_clear(&pulldesc);
354 qbman_pull_desc_set_numframes(&pulldesc,
355 q_storage->last_num_pkts);
356 qbman_pull_desc_set_fq(&pulldesc, fqid);
357 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
358 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
359 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
360 while (!qbman_check_command_complete(
362 DPAA2_PER_LCORE_DPIO->index)))
364 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
367 if (qbman_swp_pull(swp, &pulldesc)) {
369 "VDQ command not issued.QBMAN busy\n");
370 /* Portal was busy, try again */
375 q_storage->active_dqs = dq_storage;
376 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
377 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
381 dq_storage = q_storage->active_dqs;
382 rte_prefetch0((void *)(size_t)(dq_storage));
383 rte_prefetch0((void *)(size_t)(dq_storage + 1));
385 /* Prepare next pull descriptor. This will give space for the
386 * prefething done on DQRR entries
388 q_storage->toggle ^= 1;
389 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
390 qbman_pull_desc_clear(&pulldesc);
391 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
392 qbman_pull_desc_set_fq(&pulldesc, fqid);
393 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
394 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
396 /* Check if the previous issued command is completed.
397 * Also seems like the SWP is shared between the Ethernet Driver
398 * and the SEC driver.
400 while (!qbman_check_command_complete(dq_storage))
402 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
403 clear_swp_active_dqs(q_storage->active_dpio_id);
408 /* Loop until the dq_storage is updated with
411 while (!qbman_check_new_result(dq_storage))
413 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
414 /* Check whether Last Pull command is Expired and
415 * setting Condition for Loop termination
417 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
419 /* Check for valid frame. */
420 status = qbman_result_DQ_flags(dq_storage);
421 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
424 fd = qbman_result_DQ_fd(dq_storage);
426 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);
428 vq_id[num_rx] = vqid;
434 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
435 while (!qbman_check_command_complete(
436 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
438 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
440 /* issue a volatile dequeue command for next pull */
442 if (qbman_swp_pull(swp, &pulldesc)) {
444 "VDQ command is not issued. QBMAN is busy (2)\n");
450 q_storage->active_dqs = dq_storage1;
451 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
452 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
458 dpdmai_dev_dequeue_multijob_no_prefetch(
459 struct qdma_virt_queue *qdma_vq,
461 struct rte_qdma_job **job,
464 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
465 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
466 uint16_t rxq_id = qdma_pq->queue_id;
468 struct dpaa2_queue *rxq;
469 struct qbman_result *dq_storage;
470 struct qbman_pull_desc pulldesc;
471 struct qbman_swp *swp;
473 uint8_t status, pending;
475 const struct qbman_fd *fd;
477 int ret, next_pull = nb_jobs, num_pulled = 0;
479 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
480 ret = dpaa2_affine_qbman_swp();
483 "Failed to allocate IO portal, tid: %d\n",
488 swp = DPAA2_PER_LCORE_PORTAL;
490 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
494 dq_storage = rxq->q_storage->dq_storage[0];
495 /* Prepare dequeue descriptor */
496 qbman_pull_desc_clear(&pulldesc);
497 qbman_pull_desc_set_fq(&pulldesc, fqid);
498 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
499 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
501 if (next_pull > dpaa2_dqrr_size) {
502 qbman_pull_desc_set_numframes(&pulldesc,
504 next_pull -= dpaa2_dqrr_size;
506 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
511 if (qbman_swp_pull(swp, &pulldesc)) {
513 "VDQ command not issued. QBMAN busy");
514 /* Portal was busy, try again */
520 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
521 /* Check if the previous issued command is completed. */
522 while (!qbman_check_command_complete(dq_storage))
529 /* Loop until dq_storage is updated
530 * with new token by QBMAN
532 while (!qbman_check_new_result(dq_storage))
534 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
536 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
538 /* Check for valid frame. */
539 status = qbman_result_DQ_flags(dq_storage);
540 if (unlikely((status &
541 QBMAN_DQ_STAT_VALIDFRAME) == 0))
544 fd = qbman_result_DQ_fd(dq_storage);
546 vqid = qdma_vq->get_job(qdma_vq, fd, &job[num_rx]);
548 vq_id[num_rx] = vqid;
555 /* Last VDQ provided all packets and more packets are requested */
556 } while (next_pull && num_pulled == dpaa2_dqrr_size);
562 dpdmai_dev_enqueue_multi(
563 struct qdma_virt_queue *qdma_vq,
564 struct rte_qdma_job **job,
567 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
568 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
569 uint16_t txq_id = qdma_pq->queue_id;
571 struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
572 struct dpaa2_queue *txq;
573 struct qbman_eq_desc eqdesc;
574 struct qbman_swp *swp;
576 uint32_t num_to_send = 0;
579 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
580 ret = dpaa2_affine_qbman_swp();
583 "Failed to allocate IO portal, tid: %d\n",
588 swp = DPAA2_PER_LCORE_PORTAL;
590 txq = &(dpdmai_dev->tx_queue[txq_id]);
592 /* Prepare enqueue descriptor */
593 qbman_eq_desc_clear(&eqdesc);
594 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
595 qbman_eq_desc_set_no_orp(&eqdesc, 0);
596 qbman_eq_desc_set_response(&eqdesc, 0, 0);
598 memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
600 while (nb_jobs > 0) {
603 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
604 dpaa2_eqcr_size : nb_jobs;
606 for (loop = 0; loop < num_to_send; loop++) {
607 ret = qdma_vq->set_fd(qdma_vq, &fd[loop], job[num_tx]);
609 /* Set nb_jobs to loop, so outer while loop
619 /* Enqueue the packet to the QBMAN */
620 uint32_t enqueue_loop = 0, retry_count = 0;
622 while (enqueue_loop < loop) {
623 ret = qbman_swp_enqueue_multiple(swp,
627 loop - enqueue_loop);
628 if (unlikely(ret < 0)) {
630 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
631 return num_tx - (loop - enqueue_loop);
642 static struct qdma_hw_queue *
643 alloc_hw_queue(uint32_t lcore_id)
645 struct qdma_hw_queue *queue = NULL;
647 DPAA2_QDMA_FUNC_TRACE();
649 /* Get a free queue from the list */
650 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
651 if (queue->num_users == 0) {
652 queue->lcore_id = lcore_id;
662 free_hw_queue(struct qdma_hw_queue *queue)
664 DPAA2_QDMA_FUNC_TRACE();
670 static struct qdma_hw_queue *
671 get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)
673 struct qdma_per_core_info *core_info;
674 struct qdma_hw_queue *queue, *temp;
675 uint32_t least_num_users;
676 int num_hw_queues, i;
678 DPAA2_QDMA_FUNC_TRACE();
680 core_info = &qdma_core_info[lcore_id];
681 num_hw_queues = core_info->num_hw_queues;
684 * Allocate a HW queue if there are less queues
685 * than maximum per core queues configured
687 if (num_hw_queues < qdma_dev->max_hw_queues_per_core) {
688 queue = alloc_hw_queue(lcore_id);
690 core_info->hw_queues[num_hw_queues] = queue;
691 core_info->num_hw_queues++;
696 queue = core_info->hw_queues[0];
697 /* In case there is no queue associated with the core return NULL */
701 /* Fetch the least loaded H/W queue */
702 least_num_users = core_info->hw_queues[0]->num_users;
703 for (i = 0; i < num_hw_queues; i++) {
704 temp = core_info->hw_queues[i];
705 if (temp->num_users < least_num_users)
716 put_hw_queue(struct qdma_hw_queue *queue)
718 struct qdma_per_core_info *core_info;
719 int lcore_id, num_hw_queues, i;
721 DPAA2_QDMA_FUNC_TRACE();
724 * If this is the last user of the queue free it.
725 * Also remove it from QDMA core info.
727 if (queue->num_users == 1) {
728 free_hw_queue(queue);
730 /* Remove the physical queue from core info */
731 lcore_id = queue->lcore_id;
732 core_info = &qdma_core_info[lcore_id];
733 num_hw_queues = core_info->num_hw_queues;
734 for (i = 0; i < num_hw_queues; i++) {
735 if (queue == core_info->hw_queues[i])
738 for (; i < num_hw_queues - 1; i++)
739 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
740 core_info->hw_queues[i] = NULL;
747 dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,
748 __rte_unused const char *attr_name,
749 uint64_t *attr_value)
751 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
752 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
753 struct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;
755 DPAA2_QDMA_FUNC_TRACE();
757 qdma_attr->num_hw_queues = qdma_dev->num_hw_queues;
763 dpaa2_qdma_reset(struct rte_rawdev *rawdev)
765 struct qdma_hw_queue *queue;
766 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
767 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
770 DPAA2_QDMA_FUNC_TRACE();
772 /* In case QDMA device is not in stopped state, return -EBUSY */
773 if (qdma_dev->state == 1) {
775 "Device is in running state. Stop before reset.");
779 /* In case there are pending jobs on any VQ, return -EBUSY */
780 for (i = 0; i < qdma_dev->max_vqs; i++) {
781 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
782 qdma_dev->vqs[i].num_dequeues)) {
783 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
788 /* Reset HW queues */
789 TAILQ_FOREACH(queue, &qdma_queue_list, next)
790 queue->num_users = 0;
792 /* Reset and free virtual queues */
793 for (i = 0; i < qdma_dev->max_vqs; i++) {
794 if (qdma_dev->vqs[i].status_ring)
795 rte_ring_free(qdma_dev->vqs[i].status_ring);
798 rte_free(qdma_dev->vqs);
799 qdma_dev->vqs = NULL;
801 /* Reset per core info */
802 memset(&qdma_core_info, 0,
803 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
805 /* Free the FLE pool */
806 if (qdma_dev->fle_pool)
807 rte_mempool_free(qdma_dev->fle_pool);
809 /* Reset QDMA device structure */
810 qdma_dev->max_hw_queues_per_core = 0;
811 qdma_dev->fle_pool = NULL;
812 qdma_dev->fle_pool_count = 0;
813 qdma_dev->max_vqs = 0;
819 dpaa2_qdma_configure(const struct rte_rawdev *rawdev,
820 rte_rawdev_obj_t config,
823 char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
824 struct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;
825 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
826 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
828 DPAA2_QDMA_FUNC_TRACE();
830 if (config_size != sizeof(*qdma_config))
833 /* In case QDMA device is not in stopped state, return -EBUSY */
834 if (qdma_dev->state == 1) {
836 "Device is in running state. Stop before config.");
840 /* Set max HW queue per core */
841 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
842 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
843 MAX_HW_QUEUE_PER_CORE);
846 qdma_dev->max_hw_queues_per_core =
847 qdma_config->max_hw_queues_per_core;
849 /* Allocate Virtual Queues */
850 sprintf(name, "qdma_%d_vq", rawdev->dev_id);
851 qdma_dev->vqs = rte_malloc(name,
852 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
853 RTE_CACHE_LINE_SIZE);
854 if (!qdma_dev->vqs) {
855 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
858 qdma_dev->max_vqs = qdma_config->max_vqs;
860 /* Allocate FLE pool; just append PID so that in case of
861 * multiprocess, the pool's don't collide.
863 snprintf(name, sizeof(name), "qdma_fle_pool%u",
865 qdma_dev->fle_pool = rte_mempool_create(name,
866 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
867 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
868 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
869 if (!qdma_dev->fle_pool) {
870 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
871 rte_free(qdma_dev->vqs);
872 qdma_dev->vqs = NULL;
875 qdma_dev->fle_pool_count = qdma_config->fle_pool_count;
881 dpaa2_qdma_start(struct rte_rawdev *rawdev)
883 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
884 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
886 DPAA2_QDMA_FUNC_TRACE();
894 check_devargs_handler(__rte_unused const char *key, const char *value,
895 __rte_unused void *opaque)
897 if (strcmp(value, "1"))
904 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
906 struct rte_kvargs *kvlist;
911 kvlist = rte_kvargs_parse(devargs->args, NULL);
915 if (!rte_kvargs_count(kvlist, key)) {
916 rte_kvargs_free(kvlist);
920 if (rte_kvargs_process(kvlist, key,
921 check_devargs_handler, NULL) < 0) {
922 rte_kvargs_free(kvlist);
925 rte_kvargs_free(kvlist);
931 dpaa2_qdma_queue_setup(struct rte_rawdev *rawdev,
932 __rte_unused uint16_t queue_id,
933 rte_rawdev_obj_t queue_conf,
938 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
939 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
940 struct rte_qdma_queue_config *q_config =
941 (struct rte_qdma_queue_config *)queue_conf;
943 DPAA2_QDMA_FUNC_TRACE();
945 if (conf_size != sizeof(*q_config))
948 rte_spinlock_lock(&qdma_dev->lock);
950 /* Get a free Virtual Queue */
951 for (i = 0; i < qdma_dev->max_vqs; i++) {
952 if (qdma_dev->vqs[i].in_use == 0)
956 /* Return in case no VQ is free */
957 if (i == qdma_dev->max_vqs) {
958 rte_spinlock_unlock(&qdma_dev->lock);
959 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
963 if (q_config->flags & RTE_QDMA_VQ_EXCLUSIVE_PQ) {
964 /* Allocate HW queue for a VQ */
965 qdma_dev->vqs[i].hw_queue = alloc_hw_queue(q_config->lcore_id);
966 qdma_dev->vqs[i].exclusive_hw_queue = 1;
968 /* Allocate a Ring for Virtual Queue in VQ mode */
969 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
970 qdma_dev->vqs[i].status_ring = rte_ring_create(ring_name,
971 qdma_dev->fle_pool_count, rte_socket_id(), 0);
972 if (!qdma_dev->vqs[i].status_ring) {
973 DPAA2_QDMA_ERR("Status ring creation failed for vq");
974 rte_spinlock_unlock(&qdma_dev->lock);
978 /* Get a HW queue (shared) for a VQ */
979 qdma_dev->vqs[i].hw_queue = get_hw_queue(qdma_dev,
981 qdma_dev->vqs[i].exclusive_hw_queue = 0;
984 if (qdma_dev->vqs[i].hw_queue == NULL) {
985 DPAA2_QDMA_ERR("No H/W queue available for VQ");
986 if (qdma_dev->vqs[i].status_ring)
987 rte_ring_free(qdma_dev->vqs[i].status_ring);
988 qdma_dev->vqs[i].status_ring = NULL;
989 rte_spinlock_unlock(&qdma_dev->lock);
993 qdma_dev->vqs[i].in_use = 1;
994 qdma_dev->vqs[i].lcore_id = q_config->lcore_id;
995 memset(&qdma_dev->vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
997 if (q_config->flags & RTE_QDMA_VQ_FD_LONG_FORMAT) {
998 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_lf;
999 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_lf;
1001 qdma_dev->vqs[i].set_fd = dpdmai_dev_set_fd_us;
1002 qdma_dev->vqs[i].get_job = dpdmai_dev_get_job_us;
1004 if (dpaa2_get_devargs(rawdev->device->devargs,
1005 DPAA2_QDMA_NO_PREFETCH) ||
1006 (getenv("DPAA2_NO_QDMA_PREFETCH_RX"))) {
1007 /* If no prefetch is configured. */
1008 qdma_dev->vqs[i].dequeue_job =
1009 dpdmai_dev_dequeue_multijob_no_prefetch;
1010 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1012 qdma_dev->vqs[i].dequeue_job =
1013 dpdmai_dev_dequeue_multijob_prefetch;
1016 qdma_dev->vqs[i].enqueue_job = dpdmai_dev_enqueue_multi;
1018 if (q_config->rbp != NULL)
1019 memcpy(&qdma_dev->vqs[i].rbp, q_config->rbp,
1020 sizeof(struct rte_qdma_rbp));
1022 rte_spinlock_unlock(&qdma_dev->lock);
1028 dpaa2_qdma_enqueue(struct rte_rawdev *rawdev,
1029 __rte_unused struct rte_rawdev_buf **buffers,
1030 unsigned int nb_jobs,
1031 rte_rawdev_obj_t context)
1033 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1034 struct rte_qdma_enqdeq *e_context =
1035 (struct rte_qdma_enqdeq *)context;
1036 struct qdma_virt_queue *qdma_vq =
1037 &dpdmai_dev->qdma_dev->vqs[e_context->vq_id];
1040 /* Return error in case of wrong lcore_id */
1041 if (rte_lcore_id() != qdma_vq->lcore_id) {
1042 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
1047 ret = qdma_vq->enqueue_job(qdma_vq, e_context->job, nb_jobs);
1049 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
1053 qdma_vq->num_enqueues += ret;
1059 dpaa2_qdma_dequeue(struct rte_rawdev *rawdev,
1060 __rte_unused struct rte_rawdev_buf **buffers,
1061 unsigned int nb_jobs,
1062 rte_rawdev_obj_t cntxt)
1064 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1065 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1066 struct rte_qdma_enqdeq *context =
1067 (struct rte_qdma_enqdeq *)cntxt;
1068 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[context->vq_id];
1069 struct qdma_virt_queue *temp_qdma_vq;
1071 unsigned int ring_count;
1073 /* Return error in case of wrong lcore_id */
1074 if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
1075 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
1080 /* Only dequeue when there are pending jobs on VQ */
1081 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1084 if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1085 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
1087 if (qdma_vq->exclusive_hw_queue) {
1088 /* In case of exclusive queue directly fetch from HW queue */
1089 ret = qdma_vq->dequeue_job(qdma_vq, NULL,
1090 context->job, nb_jobs);
1093 "Dequeue from DPDMAI device failed: %d", ret);
1096 qdma_vq->num_dequeues += ret;
1098 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
1100 * Get the QDMA completed jobs from the software ring.
1101 * In case they are not available on the ring poke the HW
1102 * to fetch completed jobs from corresponding HW queues
1104 ring_count = rte_ring_count(qdma_vq->status_ring);
1105 if (ring_count < nb_jobs) {
1106 /* TODO - How to have right budget */
1107 ret = qdma_vq->dequeue_job(qdma_vq,
1108 temp_vq_id, context->job, nb_jobs);
1109 for (i = 0; i < ret; i++) {
1110 temp_qdma_vq = &qdma_dev->vqs[temp_vq_id[i]];
1111 rte_ring_enqueue(temp_qdma_vq->status_ring,
1112 (void *)(context->job[i]));
1114 ring_count = rte_ring_count(
1115 qdma_vq->status_ring);
1119 /* Dequeue job from the software ring
1120 * to provide to the user
1122 ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1123 (void **)context->job,
1126 qdma_vq->num_dequeues += ret;
1134 rte_qdma_vq_stats(struct rte_rawdev *rawdev,
1136 struct rte_qdma_vq_stats *vq_status)
1138 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1139 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1140 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1142 if (qdma_vq->in_use) {
1143 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
1144 vq_status->lcore_id = qdma_vq->lcore_id;
1145 vq_status->num_enqueues = qdma_vq->num_enqueues;
1146 vq_status->num_dequeues = qdma_vq->num_dequeues;
1147 vq_status->num_pending_jobs = vq_status->num_enqueues -
1148 vq_status->num_dequeues;
1153 dpaa2_qdma_queue_release(struct rte_rawdev *rawdev,
1156 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1157 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1159 struct qdma_virt_queue *qdma_vq = &qdma_dev->vqs[vq_id];
1161 DPAA2_QDMA_FUNC_TRACE();
1163 /* In case there are pending jobs on any VQ, return -EBUSY */
1164 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1167 rte_spinlock_lock(&qdma_dev->lock);
1169 if (qdma_vq->exclusive_hw_queue)
1170 free_hw_queue(qdma_vq->hw_queue);
1172 if (qdma_vq->status_ring)
1173 rte_ring_free(qdma_vq->status_ring);
1175 put_hw_queue(qdma_vq->hw_queue);
1178 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1180 rte_spinlock_unlock(&qdma_dev->lock);
1186 dpaa2_qdma_stop(struct rte_rawdev *rawdev)
1188 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1189 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
1191 DPAA2_QDMA_FUNC_TRACE();
1193 qdma_dev->state = 0;
1197 dpaa2_qdma_close(struct rte_rawdev *rawdev)
1199 DPAA2_QDMA_FUNC_TRACE();
1201 dpaa2_qdma_reset(rawdev);
1206 static struct rte_rawdev_ops dpaa2_qdma_ops = {
1207 .dev_configure = dpaa2_qdma_configure,
1208 .dev_start = dpaa2_qdma_start,
1209 .dev_stop = dpaa2_qdma_stop,
1210 .dev_reset = dpaa2_qdma_reset,
1211 .dev_close = dpaa2_qdma_close,
1212 .queue_setup = dpaa2_qdma_queue_setup,
1213 .queue_release = dpaa2_qdma_queue_release,
1214 .attr_get = dpaa2_qdma_attr_get,
1215 .enqueue_bufs = dpaa2_qdma_enqueue,
1216 .dequeue_bufs = dpaa2_qdma_dequeue,
1220 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1222 struct qdma_hw_queue *queue;
1225 DPAA2_QDMA_FUNC_TRACE();
1227 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1228 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1231 "Memory allocation failed for QDMA queue");
1235 queue->dpdmai_dev = dpdmai_dev;
1236 queue->queue_id = i;
1238 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1239 dpdmai_dev->qdma_dev->num_hw_queues++;
1246 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1248 struct qdma_hw_queue *queue = NULL;
1249 struct qdma_hw_queue *tqueue = NULL;
1251 DPAA2_QDMA_FUNC_TRACE();
1253 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1254 if (queue->dpdmai_dev == dpdmai_dev) {
1255 TAILQ_REMOVE(&qdma_queue_list, queue, next);
1263 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1265 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1268 DPAA2_QDMA_FUNC_TRACE();
1270 /* Remove HW queues from global list */
1271 remove_hw_queues_from_list(dpdmai_dev);
1273 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1276 DPAA2_QDMA_ERR("dmdmai disable failed");
1278 /* Set up the DQRR storage for Rx */
1279 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1280 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1282 if (rxq->q_storage) {
1283 dpaa2_free_dq_storage(rxq->q_storage);
1284 rte_free(rxq->q_storage);
1288 /* Close the device at underlying layer*/
1289 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1291 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1297 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1299 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1300 struct dpdmai_rx_queue_cfg rx_queue_cfg;
1301 struct dpdmai_attr attr;
1302 struct dpdmai_rx_queue_attr rx_attr;
1303 struct dpdmai_tx_queue_attr tx_attr;
1306 DPAA2_QDMA_FUNC_TRACE();
1308 /* Open DPDMAI device */
1309 dpdmai_dev->dpdmai_id = dpdmai_id;
1310 dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
1311 dpdmai_dev->qdma_dev = &q_dev;
1312 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1313 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1315 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1319 /* Get DPDMAI attributes */
1320 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1321 dpdmai_dev->token, &attr);
1323 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1327 dpdmai_dev->num_queues = attr.num_of_queues;
1329 /* Set up Rx Queues */
1330 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1331 struct dpaa2_queue *rxq;
1333 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1334 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1337 i, 0, &rx_queue_cfg);
1339 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1344 /* Allocate DQ storage for the DPDMAI Rx queues */
1345 rxq = &(dpdmai_dev->rx_queue[i]);
1346 rxq->q_storage = rte_malloc("dq_storage",
1347 sizeof(struct queue_storage_info_t),
1348 RTE_CACHE_LINE_SIZE);
1349 if (!rxq->q_storage) {
1350 DPAA2_QDMA_ERR("q_storage allocation failed");
1355 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1356 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1358 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1363 /* Get Rx and Tx queues FQID's */
1364 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1365 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1366 dpdmai_dev->token, i, 0, &rx_attr);
1368 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1372 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1374 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1375 dpdmai_dev->token, i, 0, &tx_attr);
1377 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1381 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1384 /* Enable the device */
1385 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1388 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1392 /* Add the HW queue to the global list */
1393 ret = add_hw_queues_to_list(dpdmai_dev);
1395 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1399 if (!dpaa2_coherent_no_alloc_cache) {
1400 if (dpaa2_svr_family == SVR_LX2160A) {
1401 dpaa2_coherent_no_alloc_cache =
1402 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1403 dpaa2_coherent_alloc_cache =
1404 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1406 dpaa2_coherent_no_alloc_cache =
1407 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1408 dpaa2_coherent_alloc_cache =
1409 DPAA2_COHERENT_ALLOCATE_CACHE;
1413 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1415 rte_spinlock_init(&dpdmai_dev->qdma_dev->lock);
1419 dpaa2_dpdmai_dev_uninit(rawdev);
1424 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1425 struct rte_dpaa2_device *dpaa2_dev)
1427 struct rte_rawdev *rawdev;
1430 DPAA2_QDMA_FUNC_TRACE();
1432 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1433 sizeof(struct dpaa2_dpdmai_dev),
1436 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1440 dpaa2_dev->rawdev = rawdev;
1441 rawdev->dev_ops = &dpaa2_qdma_ops;
1442 rawdev->device = &dpaa2_dev->device;
1443 rawdev->driver_name = dpaa2_drv->driver.name;
1445 /* Invoke PMD device initialization function */
1446 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1448 rte_rawdev_pmd_release(rawdev);
1452 /* Reset the QDMA device */
1453 ret = dpaa2_qdma_reset(rawdev);
1455 DPAA2_QDMA_ERR("Resetting QDMA failed");
1463 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1465 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1468 DPAA2_QDMA_FUNC_TRACE();
1470 dpaa2_dpdmai_dev_uninit(rawdev);
1472 ret = rte_rawdev_pmd_release(rawdev);
1474 DPAA2_QDMA_ERR("Device cleanup failed");
1479 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1480 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1481 .drv_type = DPAA2_QDMA,
1482 .probe = rte_dpaa2_qdma_probe,
1483 .remove = rte_dpaa2_qdma_remove,
1486 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1487 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1488 "no_prefetch=<int> ");
1489 RTE_LOG_REGISTER(dpaa2_qdma_logtype, pmd.raw.dpaa2.qdma, INFO);