1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
17 #include <rte_kvargs.h>
19 #include <mc/fsl_dpdmai.h>
20 #include <portal/dpaa2_hw_pvt.h>
21 #include <portal/dpaa2_hw_dpio.h>
23 #include "rte_pmd_dpaa2_qdma.h"
24 #include "dpaa2_qdma.h"
25 #include "dpaa2_qdma_logs.h"
27 #define DPAA2_QDMA_NO_PREFETCH "no_prefetch"
29 uint32_t dpaa2_coherent_no_alloc_cache;
30 uint32_t dpaa2_coherent_alloc_cache;
33 static struct qdma_device qdma_dev;
35 /* QDMA H/W queues list */
36 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
37 static struct qdma_hw_queue_list qdma_queue_list
38 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
40 /* QDMA Virtual Queues */
41 static struct qdma_virt_queue *qdma_vqs;
43 /* QDMA per core data */
44 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
46 typedef int (dpdmai_dev_dequeue_multijob_t)(struct dpaa2_dpdmai_dev *dpdmai_dev,
49 struct rte_qdma_job **job,
52 dpdmai_dev_dequeue_multijob_t *dpdmai_dev_dequeue_multijob;
54 typedef uint16_t (dpdmai_dev_get_job_t)(const struct qbman_fd *fd,
55 struct rte_qdma_job **job);
56 typedef int (dpdmai_dev_set_fd_t)(struct qbman_fd *fd,
57 struct rte_qdma_job *job,
58 struct rte_qdma_rbp *rbp,
60 dpdmai_dev_get_job_t *dpdmai_dev_get_job;
61 dpdmai_dev_set_fd_t *dpdmai_dev_set_fd;
64 qdma_populate_fd_pci(phys_addr_t src, phys_addr_t dest,
65 uint32_t len, struct qbman_fd *fd,
66 struct rte_qdma_rbp *rbp)
68 fd->simple_pci.saddr_lo = lower_32_bits((uint64_t) (src));
69 fd->simple_pci.saddr_hi = upper_32_bits((uint64_t) (src));
71 fd->simple_pci.len_sl = len;
73 fd->simple_pci.bmt = 1;
74 fd->simple_pci.fmt = 3;
75 fd->simple_pci.sl = 1;
76 fd->simple_pci.ser = 1;
78 fd->simple_pci.sportid = rbp->sportid; /*pcie 3 */
79 fd->simple_pci.srbp = rbp->srbp;
81 fd->simple_pci.rdttype = 0;
83 fd->simple_pci.rdttype = dpaa2_coherent_alloc_cache;
85 /*dest is pcie memory */
86 fd->simple_pci.dportid = rbp->dportid; /*pcie 3 */
87 fd->simple_pci.drbp = rbp->drbp;
89 fd->simple_pci.wrttype = 0;
91 fd->simple_pci.wrttype = dpaa2_coherent_no_alloc_cache;
93 fd->simple_pci.daddr_lo = lower_32_bits((uint64_t) (dest));
94 fd->simple_pci.daddr_hi = upper_32_bits((uint64_t) (dest));
100 qdma_populate_fd_ddr(phys_addr_t src, phys_addr_t dest,
101 uint32_t len, struct qbman_fd *fd)
103 fd->simple_ddr.saddr_lo = lower_32_bits((uint64_t) (src));
104 fd->simple_ddr.saddr_hi = upper_32_bits((uint64_t) (src));
106 fd->simple_ddr.len = len;
108 fd->simple_ddr.bmt = 1;
109 fd->simple_ddr.fmt = 3;
110 fd->simple_ddr.sl = 1;
111 fd->simple_ddr.ser = 1;
113 * src If RBP=0 {NS,RDTTYPE[3:0]}: 0_1011
114 * Coherent copy of cacheable memory,
115 * lookup in downstream cache, no allocate
118 fd->simple_ddr.rns = 0;
119 fd->simple_ddr.rdttype = dpaa2_coherent_alloc_cache;
121 * dest If RBP=0 {NS,WRTTYPE[3:0]}: 0_0111
122 * Coherent write of cacheable memory,
123 * lookup in downstream cache, no allocate on miss
125 fd->simple_ddr.wns = 0;
126 fd->simple_ddr.wrttype = dpaa2_coherent_no_alloc_cache;
128 fd->simple_ddr.daddr_lo = lower_32_bits((uint64_t) (dest));
129 fd->simple_ddr.daddr_hi = upper_32_bits((uint64_t) (dest));
135 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
136 struct rte_qdma_rbp *rbp,
137 uint64_t src, uint64_t dest,
138 size_t len, uint32_t flags)
140 struct qdma_sdd *sdd;
142 sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
143 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
145 /* first frame list to source descriptor */
146 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
147 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
149 /* source and destination descriptor */
150 if (rbp && rbp->enable) {
152 sdd->read_cmd.portid = rbp->sportid;
153 sdd->rbpcmd_simple.pfid = rbp->spfid;
154 sdd->rbpcmd_simple.vfid = rbp->svfid;
157 sdd->read_cmd.rbp = rbp->srbp;
158 sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
160 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
164 sdd->write_cmd.portid = rbp->dportid;
165 sdd->rbpcmd_simple.pfid = rbp->dpfid;
166 sdd->rbpcmd_simple.vfid = rbp->dvfid;
169 sdd->write_cmd.rbp = rbp->drbp;
170 sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
172 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
176 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
178 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
181 /* source frame list to source buffer */
182 if (flags & RTE_QDMA_JOB_SRC_PHY) {
183 DPAA2_SET_FLE_ADDR(fle, src);
184 DPAA2_SET_FLE_BMT(fle);
186 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
188 DPAA2_SET_FLE_LEN(fle, len);
191 /* destination frame list to destination buffer */
192 if (flags & RTE_QDMA_JOB_DEST_PHY) {
193 DPAA2_SET_FLE_BMT(fle);
194 DPAA2_SET_FLE_ADDR(fle, dest);
196 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
198 DPAA2_SET_FLE_LEN(fle, len);
200 /* Final bit: 1, for last frame list */
201 DPAA2_SET_FLE_FIN(fle);
204 static inline int dpdmai_dev_set_fd_us(struct qbman_fd *fd,
205 struct rte_qdma_job *job,
206 struct rte_qdma_rbp *rbp,
209 struct rte_qdma_job **ppjob;
213 if (job->src & QDMA_RBP_UPPER_ADDRESS_MASK)
214 iova = (size_t)job->dest;
216 iova = (size_t)job->src;
218 /* Set the metadata */
220 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
223 if ((rbp->drbp == 1) || (rbp->srbp == 1))
224 ret = qdma_populate_fd_pci((phys_addr_t) job->src,
225 (phys_addr_t) job->dest,
228 ret = qdma_populate_fd_ddr((phys_addr_t) job->src,
229 (phys_addr_t) job->dest,
233 static inline int dpdmai_dev_set_fd_lf(struct qbman_fd *fd,
234 struct rte_qdma_job *job,
235 struct rte_qdma_rbp *rbp,
238 struct rte_qdma_job **ppjob;
239 struct qbman_fle *fle;
242 * Get an FLE/SDD from FLE pool.
243 * Note: IO metadata is before the FLE and SDD memory.
245 ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&ppjob));
247 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
251 /* Set the metadata */
255 fle = (struct qbman_fle *)(ppjob + 1);
257 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
258 DPAA2_SET_FD_COMPOUND_FMT(fd);
259 DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
262 memset(fle, 0, QDMA_FLE_POOL_SIZE);
263 dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
264 job->len, job->flags);
269 static inline uint16_t dpdmai_dev_get_job_us(const struct qbman_fd *fd,
270 struct rte_qdma_job **job)
274 struct rte_qdma_job **ppjob;
276 if (fd->simple_pci.saddr_hi & (QDMA_RBP_UPPER_ADDRESS_MASK >> 32))
277 iova = (size_t) (((uint64_t)fd->simple_pci.daddr_hi) << 32
278 | (uint64_t)fd->simple_pci.daddr_lo);
280 iova = (size_t)(((uint64_t)fd->simple_pci.saddr_hi) << 32
281 | (uint64_t)fd->simple_pci.saddr_lo);
283 ppjob = (struct rte_qdma_job **)DPAA2_IOVA_TO_VADDR(iova) - 1;
284 *job = (struct rte_qdma_job *)*ppjob;
285 (*job)->status = (fd->simple_pci.acc_err << 8) | (fd->simple_pci.error);
286 vqid = (*job)->vq_id;
291 static inline uint16_t dpdmai_dev_get_job_lf(const struct qbman_fd *fd,
292 struct rte_qdma_job **job)
294 struct rte_qdma_job **ppjob;
297 * Fetch metadata from FLE. job and vq_id were set
298 * in metadata in the enqueue operation.
300 ppjob = (struct rte_qdma_job **)
301 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
304 *job = (struct rte_qdma_job *)*ppjob;
305 (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
306 (DPAA2_GET_FD_FRC(fd) & 0xFF);
307 vqid = (*job)->vq_id;
309 /* Free FLE to the pool */
310 rte_mempool_put(qdma_dev.fle_pool, (void *)ppjob);
315 static struct qdma_hw_queue *
316 alloc_hw_queue(uint32_t lcore_id)
318 struct qdma_hw_queue *queue = NULL;
320 DPAA2_QDMA_FUNC_TRACE();
322 /* Get a free queue from the list */
323 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
324 if (queue->num_users == 0) {
325 queue->lcore_id = lcore_id;
335 free_hw_queue(struct qdma_hw_queue *queue)
337 DPAA2_QDMA_FUNC_TRACE();
343 static struct qdma_hw_queue *
344 get_hw_queue(uint32_t lcore_id)
346 struct qdma_per_core_info *core_info;
347 struct qdma_hw_queue *queue, *temp;
348 uint32_t least_num_users;
349 int num_hw_queues, i;
351 DPAA2_QDMA_FUNC_TRACE();
353 core_info = &qdma_core_info[lcore_id];
354 num_hw_queues = core_info->num_hw_queues;
357 * Allocate a HW queue if there are less queues
358 * than maximum per core queues configured
360 if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
361 queue = alloc_hw_queue(lcore_id);
363 core_info->hw_queues[num_hw_queues] = queue;
364 core_info->num_hw_queues++;
369 queue = core_info->hw_queues[0];
370 /* In case there is no queue associated with the core return NULL */
374 /* Fetch the least loaded H/W queue */
375 least_num_users = core_info->hw_queues[0]->num_users;
376 for (i = 0; i < num_hw_queues; i++) {
377 temp = core_info->hw_queues[i];
378 if (temp->num_users < least_num_users)
389 put_hw_queue(struct qdma_hw_queue *queue)
391 struct qdma_per_core_info *core_info;
392 int lcore_id, num_hw_queues, i;
394 DPAA2_QDMA_FUNC_TRACE();
397 * If this is the last user of the queue free it.
398 * Also remove it from QDMA core info.
400 if (queue->num_users == 1) {
401 free_hw_queue(queue);
403 /* Remove the physical queue from core info */
404 lcore_id = queue->lcore_id;
405 core_info = &qdma_core_info[lcore_id];
406 num_hw_queues = core_info->num_hw_queues;
407 for (i = 0; i < num_hw_queues; i++) {
408 if (queue == core_info->hw_queues[i])
411 for (; i < num_hw_queues - 1; i++)
412 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
413 core_info->hw_queues[i] = NULL;
422 DPAA2_QDMA_FUNC_TRACE();
424 rte_spinlock_init(&qdma_dev.lock);
430 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
432 DPAA2_QDMA_FUNC_TRACE();
434 qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
440 struct qdma_hw_queue *queue;
443 DPAA2_QDMA_FUNC_TRACE();
445 /* In case QDMA device is not in stopped state, return -EBUSY */
446 if (qdma_dev.state == 1) {
448 "Device is in running state. Stop before reset.");
452 /* In case there are pending jobs on any VQ, return -EBUSY */
453 for (i = 0; i < qdma_dev.max_vqs; i++) {
454 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
455 qdma_vqs[i].num_dequeues))
456 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
460 /* Reset HW queues */
461 TAILQ_FOREACH(queue, &qdma_queue_list, next)
462 queue->num_users = 0;
464 /* Reset and free virtual queues */
465 for (i = 0; i < qdma_dev.max_vqs; i++) {
466 if (qdma_vqs[i].status_ring)
467 rte_ring_free(qdma_vqs[i].status_ring);
473 /* Reset per core info */
474 memset(&qdma_core_info, 0,
475 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
477 /* Free the FLE pool */
478 if (qdma_dev.fle_pool)
479 rte_mempool_free(qdma_dev.fle_pool);
481 /* Reset QDMA device structure */
482 qdma_dev.mode = RTE_QDMA_MODE_HW;
483 qdma_dev.max_hw_queues_per_core = 0;
484 qdma_dev.fle_pool = NULL;
485 qdma_dev.fle_pool_count = 0;
486 qdma_dev.max_vqs = 0;
492 rte_qdma_configure(struct rte_qdma_config *qdma_config)
495 char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
497 DPAA2_QDMA_FUNC_TRACE();
499 /* In case QDMA device is not in stopped state, return -EBUSY */
500 if (qdma_dev.state == 1) {
502 "Device is in running state. Stop before config.");
506 /* Reset the QDMA device */
507 ret = rte_qdma_reset();
509 DPAA2_QDMA_ERR("Resetting QDMA failed");
514 qdma_dev.mode = qdma_config->mode;
516 /* Set max HW queue per core */
517 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
518 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
519 MAX_HW_QUEUE_PER_CORE);
522 qdma_dev.max_hw_queues_per_core =
523 qdma_config->max_hw_queues_per_core;
525 /* Allocate Virtual Queues */
526 qdma_vqs = rte_malloc("qdma_virtual_queues",
527 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
528 RTE_CACHE_LINE_SIZE);
530 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
533 qdma_dev.max_vqs = qdma_config->max_vqs;
535 /* Allocate FLE pool; just append PID so that in case of
536 * multiprocess, the pool's don't collide.
538 snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
540 qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
541 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
542 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
543 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
544 if (!qdma_dev.fle_pool) {
545 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
550 qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
552 if (qdma_config->format == RTE_QDMA_ULTRASHORT_FORMAT) {
553 dpdmai_dev_get_job = dpdmai_dev_get_job_us;
554 dpdmai_dev_set_fd = dpdmai_dev_set_fd_us;
556 dpdmai_dev_get_job = dpdmai_dev_get_job_lf;
557 dpdmai_dev_set_fd = dpdmai_dev_set_fd_lf;
565 DPAA2_QDMA_FUNC_TRACE();
573 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
578 DPAA2_QDMA_FUNC_TRACE();
580 rte_spinlock_lock(&qdma_dev.lock);
582 /* Get a free Virtual Queue */
583 for (i = 0; i < qdma_dev.max_vqs; i++) {
584 if (qdma_vqs[i].in_use == 0)
588 /* Return in case no VQ is free */
589 if (i == qdma_dev.max_vqs) {
590 rte_spinlock_unlock(&qdma_dev.lock);
591 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
595 if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
596 (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
597 /* Allocate HW queue for a VQ */
598 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
599 qdma_vqs[i].exclusive_hw_queue = 1;
601 /* Allocate a Ring for Virutal Queue in VQ mode */
602 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
603 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
604 qdma_dev.fle_pool_count, rte_socket_id(), 0);
605 if (!qdma_vqs[i].status_ring) {
606 DPAA2_QDMA_ERR("Status ring creation failed for vq");
607 rte_spinlock_unlock(&qdma_dev.lock);
611 /* Get a HW queue (shared) for a VQ */
612 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
613 qdma_vqs[i].exclusive_hw_queue = 0;
616 if (qdma_vqs[i].hw_queue == NULL) {
617 DPAA2_QDMA_ERR("No H/W queue available for VQ");
618 if (qdma_vqs[i].status_ring)
619 rte_ring_free(qdma_vqs[i].status_ring);
620 qdma_vqs[i].status_ring = NULL;
621 rte_spinlock_unlock(&qdma_dev.lock);
625 qdma_vqs[i].in_use = 1;
626 qdma_vqs[i].lcore_id = lcore_id;
627 memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
628 rte_spinlock_unlock(&qdma_dev.lock);
633 /*create vq for route-by-port*/
635 rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
636 struct rte_qdma_rbp *rbp)
640 i = rte_qdma_vq_create(lcore_id, flags);
642 memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
648 dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
651 struct rte_qdma_rbp *rbp,
652 struct rte_qdma_job **job,
655 struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
656 struct dpaa2_queue *txq;
657 struct qbman_eq_desc eqdesc;
658 struct qbman_swp *swp;
660 uint32_t num_to_send = 0;
663 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
664 ret = dpaa2_affine_qbman_swp();
667 "Failed to allocate IO portal, tid: %d\n",
672 swp = DPAA2_PER_LCORE_PORTAL;
674 txq = &(dpdmai_dev->tx_queue[txq_id]);
676 /* Prepare enqueue descriptor */
677 qbman_eq_desc_clear(&eqdesc);
678 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
679 qbman_eq_desc_set_no_orp(&eqdesc, 0);
680 qbman_eq_desc_set_response(&eqdesc, 0, 0);
682 memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));
684 while (nb_jobs > 0) {
687 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
688 dpaa2_eqcr_size : nb_jobs;
690 for (loop = 0; loop < num_to_send; loop++) {
691 ret = dpdmai_dev_set_fd(&fd[loop],
692 job[num_tx], rbp, vq_id);
694 /* Set nb_jobs to loop, so outer while loop
704 /* Enqueue the packet to the QBMAN */
705 uint32_t enqueue_loop = 0, retry_count = 0;
706 while (enqueue_loop < loop) {
707 ret = qbman_swp_enqueue_multiple(swp,
711 loop - enqueue_loop);
712 if (unlikely(ret < 0)) {
714 if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
715 return num_tx - (loop - enqueue_loop);
727 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
728 struct rte_qdma_job **job,
731 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
732 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
733 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
736 /* Return error in case of wrong lcore_id */
737 if (rte_lcore_id() != qdma_vq->lcore_id) {
738 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
743 ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
750 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
754 qdma_vq->num_enqueues += ret;
760 rte_qdma_vq_enqueue(uint16_t vq_id,
761 struct rte_qdma_job *job)
763 return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
766 /* Function to receive a QDMA job for a given device and queue*/
768 dpdmai_dev_dequeue_multijob_prefetch(
769 struct dpaa2_dpdmai_dev *dpdmai_dev,
772 struct rte_qdma_job **job,
775 struct dpaa2_queue *rxq;
776 struct qbman_result *dq_storage, *dq_storage1 = NULL;
777 struct qbman_pull_desc pulldesc;
778 struct qbman_swp *swp;
779 struct queue_storage_info_t *q_storage;
781 uint8_t status, pending;
783 const struct qbman_fd *fd;
787 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
788 ret = dpaa2_affine_qbman_swp();
791 "Failed to allocate IO portal, tid: %d\n",
796 swp = DPAA2_PER_LCORE_PORTAL;
798 pull_size = (nb_jobs > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_jobs;
799 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
801 q_storage = rxq->q_storage;
803 if (unlikely(!q_storage->active_dqs)) {
804 q_storage->toggle = 0;
805 dq_storage = q_storage->dq_storage[q_storage->toggle];
806 q_storage->last_num_pkts = pull_size;
807 qbman_pull_desc_clear(&pulldesc);
808 qbman_pull_desc_set_numframes(&pulldesc,
809 q_storage->last_num_pkts);
810 qbman_pull_desc_set_fq(&pulldesc, fqid);
811 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
812 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
813 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
814 while (!qbman_check_command_complete(
816 DPAA2_PER_LCORE_DPIO->index)))
818 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
821 if (qbman_swp_pull(swp, &pulldesc)) {
823 "VDQ command not issued.QBMAN busy\n");
824 /* Portal was busy, try again */
829 q_storage->active_dqs = dq_storage;
830 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
831 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index,
835 dq_storage = q_storage->active_dqs;
836 rte_prefetch0((void *)(size_t)(dq_storage));
837 rte_prefetch0((void *)(size_t)(dq_storage + 1));
839 /* Prepare next pull descriptor. This will give space for the
840 * prefething done on DQRR entries
842 q_storage->toggle ^= 1;
843 dq_storage1 = q_storage->dq_storage[q_storage->toggle];
844 qbman_pull_desc_clear(&pulldesc);
845 qbman_pull_desc_set_numframes(&pulldesc, pull_size);
846 qbman_pull_desc_set_fq(&pulldesc, fqid);
847 qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
848 (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
850 /* Check if the previous issued command is completed.
851 * Also seems like the SWP is shared between the Ethernet Driver
852 * and the SEC driver.
854 while (!qbman_check_command_complete(dq_storage))
856 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
857 clear_swp_active_dqs(q_storage->active_dpio_id);
862 /* Loop until the dq_storage is updated with
865 while (!qbman_check_new_result(dq_storage))
867 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
868 /* Check whether Last Pull command is Expired and
869 * setting Condition for Loop termination
871 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
873 /* Check for valid frame. */
874 status = qbman_result_DQ_flags(dq_storage);
875 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
878 fd = qbman_result_DQ_fd(dq_storage);
880 vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
882 vq_id[num_rx] = vqid;
888 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
889 while (!qbman_check_command_complete(
890 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
892 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
894 /* issue a volatile dequeue command for next pull */
896 if (qbman_swp_pull(swp, &pulldesc)) {
897 DPAA2_QDMA_DP_WARN("VDQ command is not issued."
898 "QBMAN is busy (2)\n");
904 q_storage->active_dqs = dq_storage1;
905 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
906 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
912 dpdmai_dev_dequeue_multijob_no_prefetch(
913 struct dpaa2_dpdmai_dev *dpdmai_dev,
916 struct rte_qdma_job **job,
919 struct dpaa2_queue *rxq;
920 struct qbman_result *dq_storage;
921 struct qbman_pull_desc pulldesc;
922 struct qbman_swp *swp;
924 uint8_t status, pending;
926 const struct qbman_fd *fd;
928 int ret, next_pull = nb_jobs, num_pulled = 0;
930 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
931 ret = dpaa2_affine_qbman_swp();
934 "Failed to allocate IO portal, tid: %d\n",
939 swp = DPAA2_PER_LCORE_PORTAL;
941 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
945 dq_storage = rxq->q_storage->dq_storage[0];
946 /* Prepare dequeue descriptor */
947 qbman_pull_desc_clear(&pulldesc);
948 qbman_pull_desc_set_fq(&pulldesc, fqid);
949 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
950 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
952 if (next_pull > dpaa2_dqrr_size) {
953 qbman_pull_desc_set_numframes(&pulldesc,
955 next_pull -= dpaa2_dqrr_size;
957 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
962 if (qbman_swp_pull(swp, &pulldesc)) {
963 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
964 /* Portal was busy, try again */
970 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
971 /* Check if the previous issued command is completed. */
972 while (!qbman_check_command_complete(dq_storage))
979 /* Loop until dq_storage is updated
980 * with new token by QBMAN
982 while (!qbman_check_new_result(dq_storage))
984 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
986 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
988 /* Check for valid frame. */
989 status = qbman_result_DQ_flags(dq_storage);
990 if (unlikely((status &
991 QBMAN_DQ_STAT_VALIDFRAME) == 0))
994 fd = qbman_result_DQ_fd(dq_storage);
996 vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
998 vq_id[num_rx] = vqid;
1005 /* Last VDQ provided all packets and more packets are requested */
1006 } while (next_pull && num_pulled == dpaa2_dqrr_size);
1012 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
1013 struct rte_qdma_job **job,
1016 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1017 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
1018 struct qdma_virt_queue *temp_qdma_vq;
1019 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
1020 int ring_count, ret = 0, i;
1022 /* Return error in case of wrong lcore_id */
1023 if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
1024 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
1029 /* Only dequeue when there are pending jobs on VQ */
1030 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
1033 if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
1034 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
1036 if (qdma_vq->exclusive_hw_queue) {
1037 /* In case of exclusive queue directly fetch from HW queue */
1038 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
1039 NULL, job, nb_jobs);
1042 "Dequeue from DPDMAI device failed: %d", ret);
1045 qdma_vq->num_dequeues += ret;
1047 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
1049 * Get the QDMA completed jobs from the software ring.
1050 * In case they are not available on the ring poke the HW
1051 * to fetch completed jobs from corresponding HW queues
1053 ring_count = rte_ring_count(qdma_vq->status_ring);
1054 if (ring_count < nb_jobs) {
1055 /* TODO - How to have right budget */
1056 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
1058 temp_vq_id, job, nb_jobs);
1059 for (i = 0; i < ret; i++) {
1060 temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
1061 rte_ring_enqueue(temp_qdma_vq->status_ring,
1064 ring_count = rte_ring_count(
1065 qdma_vq->status_ring);
1069 /* Dequeue job from the software ring
1070 * to provide to the user
1072 ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
1073 (void **)job, ring_count, NULL);
1075 qdma_vq->num_dequeues += ret;
1082 struct rte_qdma_job *
1083 rte_qdma_vq_dequeue(uint16_t vq_id)
1086 struct rte_qdma_job *job = NULL;
1088 ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
1090 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
1096 rte_qdma_vq_stats(uint16_t vq_id,
1097 struct rte_qdma_vq_stats *vq_status)
1099 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1101 if (qdma_vq->in_use) {
1102 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
1103 vq_status->lcore_id = qdma_vq->lcore_id;
1104 vq_status->num_enqueues = qdma_vq->num_enqueues;
1105 vq_status->num_dequeues = qdma_vq->num_dequeues;
1106 vq_status->num_pending_jobs = vq_status->num_enqueues -
1107 vq_status->num_dequeues;
1112 rte_qdma_vq_destroy(uint16_t vq_id)
1114 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1116 DPAA2_QDMA_FUNC_TRACE();
1118 /* In case there are pending jobs on any VQ, return -EBUSY */
1119 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1122 rte_spinlock_lock(&qdma_dev.lock);
1124 if (qdma_vq->exclusive_hw_queue)
1125 free_hw_queue(qdma_vq->hw_queue);
1127 if (qdma_vqs->status_ring)
1128 rte_ring_free(qdma_vqs->status_ring);
1130 put_hw_queue(qdma_vq->hw_queue);
1133 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1135 rte_spinlock_unlock(&qdma_dev.lock);
1141 rte_qdma_vq_destroy_rbp(uint16_t vq_id)
1143 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
1145 DPAA2_QDMA_FUNC_TRACE();
1147 /* In case there are pending jobs on any VQ, return -EBUSY */
1148 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
1151 rte_spinlock_lock(&qdma_dev.lock);
1153 if (qdma_vq->exclusive_hw_queue) {
1154 free_hw_queue(qdma_vq->hw_queue);
1156 if (qdma_vqs->status_ring)
1157 rte_ring_free(qdma_vqs->status_ring);
1159 put_hw_queue(qdma_vq->hw_queue);
1162 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
1164 rte_spinlock_unlock(&qdma_dev.lock);
1172 DPAA2_QDMA_FUNC_TRACE();
1178 rte_qdma_destroy(void)
1180 DPAA2_QDMA_FUNC_TRACE();
1185 static const struct rte_rawdev_ops dpaa2_qdma_ops;
1188 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1190 struct qdma_hw_queue *queue;
1193 DPAA2_QDMA_FUNC_TRACE();
1195 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1196 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
1199 "Memory allocation failed for QDMA queue");
1203 queue->dpdmai_dev = dpdmai_dev;
1204 queue->queue_id = i;
1206 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
1207 qdma_dev.num_hw_queues++;
1214 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
1216 struct qdma_hw_queue *queue = NULL;
1217 struct qdma_hw_queue *tqueue = NULL;
1219 DPAA2_QDMA_FUNC_TRACE();
1221 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
1222 if (queue->dpdmai_dev == dpdmai_dev) {
1223 TAILQ_REMOVE(&qdma_queue_list, queue, next);
1231 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
1233 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1236 DPAA2_QDMA_FUNC_TRACE();
1238 /* Remove HW queues from global list */
1239 remove_hw_queues_from_list(dpdmai_dev);
1241 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1244 DPAA2_QDMA_ERR("dmdmai disable failed");
1246 /* Set up the DQRR storage for Rx */
1247 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1248 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
1250 if (rxq->q_storage) {
1251 dpaa2_free_dq_storage(rxq->q_storage);
1252 rte_free(rxq->q_storage);
1256 /* Close the device at underlying layer*/
1257 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
1259 DPAA2_QDMA_ERR("Failure closing dpdmai device");
1265 check_devargs_handler(__rte_unused const char *key, const char *value,
1266 __rte_unused void *opaque)
1268 if (strcmp(value, "1"))
1275 dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
1277 struct rte_kvargs *kvlist;
1282 kvlist = rte_kvargs_parse(devargs->args, NULL);
1286 if (!rte_kvargs_count(kvlist, key)) {
1287 rte_kvargs_free(kvlist);
1291 if (rte_kvargs_process(kvlist, key,
1292 check_devargs_handler, NULL) < 0) {
1293 rte_kvargs_free(kvlist);
1296 rte_kvargs_free(kvlist);
1302 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
1304 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
1305 struct dpdmai_rx_queue_cfg rx_queue_cfg;
1306 struct dpdmai_attr attr;
1307 struct dpdmai_rx_queue_attr rx_attr;
1308 struct dpdmai_tx_queue_attr tx_attr;
1311 DPAA2_QDMA_FUNC_TRACE();
1313 /* Open DPDMAI device */
1314 dpdmai_dev->dpdmai_id = dpdmai_id;
1315 dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
1316 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1317 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
1319 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
1323 /* Get DPDMAI attributes */
1324 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1325 dpdmai_dev->token, &attr);
1327 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
1331 dpdmai_dev->num_queues = attr.num_of_queues;
1333 /* Set up Rx Queues */
1334 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1335 struct dpaa2_queue *rxq;
1337 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1338 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1341 i, 0, &rx_queue_cfg);
1343 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1348 /* Allocate DQ storage for the DPDMAI Rx queues */
1349 rxq = &(dpdmai_dev->rx_queue[i]);
1350 rxq->q_storage = rte_malloc("dq_storage",
1351 sizeof(struct queue_storage_info_t),
1352 RTE_CACHE_LINE_SIZE);
1353 if (!rxq->q_storage) {
1354 DPAA2_QDMA_ERR("q_storage allocation failed");
1359 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1360 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1362 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1367 /* Get Rx and Tx queues FQID's */
1368 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1369 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1370 dpdmai_dev->token, i, 0, &rx_attr);
1372 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1376 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1378 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1379 dpdmai_dev->token, i, 0, &tx_attr);
1381 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1385 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1388 /* Enable the device */
1389 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1392 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1396 /* Add the HW queue to the global list */
1397 ret = add_hw_queues_to_list(dpdmai_dev);
1399 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1403 if (dpaa2_get_devargs(rawdev->device->devargs,
1404 DPAA2_QDMA_NO_PREFETCH)) {
1405 /* If no prefetch is configured. */
1406 dpdmai_dev_dequeue_multijob =
1407 dpdmai_dev_dequeue_multijob_no_prefetch;
1408 DPAA2_QDMA_INFO("No Prefetch RX Mode enabled");
1410 dpdmai_dev_dequeue_multijob =
1411 dpdmai_dev_dequeue_multijob_prefetch;
1414 if (!dpaa2_coherent_no_alloc_cache) {
1415 if (dpaa2_svr_family == SVR_LX2160A) {
1416 dpaa2_coherent_no_alloc_cache =
1417 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1418 dpaa2_coherent_alloc_cache =
1419 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1421 dpaa2_coherent_no_alloc_cache =
1422 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1423 dpaa2_coherent_alloc_cache =
1424 DPAA2_COHERENT_ALLOCATE_CACHE;
1428 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1432 dpaa2_dpdmai_dev_uninit(rawdev);
1437 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1438 struct rte_dpaa2_device *dpaa2_dev)
1440 struct rte_rawdev *rawdev;
1443 DPAA2_QDMA_FUNC_TRACE();
1445 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1446 sizeof(struct dpaa2_dpdmai_dev),
1449 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1453 dpaa2_dev->rawdev = rawdev;
1454 rawdev->dev_ops = &dpaa2_qdma_ops;
1455 rawdev->device = &dpaa2_dev->device;
1456 rawdev->driver_name = dpaa2_drv->driver.name;
1458 /* Invoke PMD device initialization function */
1459 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1461 rte_rawdev_pmd_release(rawdev);
1469 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1471 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1474 DPAA2_QDMA_FUNC_TRACE();
1476 dpaa2_dpdmai_dev_uninit(rawdev);
1478 ret = rte_rawdev_pmd_release(rawdev);
1480 DPAA2_QDMA_ERR("Device cleanup failed");
1485 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1486 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1487 .drv_type = DPAA2_QDMA,
1488 .probe = rte_dpaa2_qdma_probe,
1489 .remove = rte_dpaa2_qdma_remove,
1492 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1493 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
1494 "no_prefetch=<int> ");
1495 RTE_LOG_REGISTER(dpaa2_qdma_logtype, pmd.raw.dpaa2.qdma, INFO);