1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
18 #include <mc/fsl_dpdmai.h>
19 #include <portal/dpaa2_hw_pvt.h>
20 #include <portal/dpaa2_hw_dpio.h>
22 #include "rte_pmd_dpaa2_qdma.h"
23 #include "dpaa2_qdma.h"
24 #include "dpaa2_qdma_logs.h"
26 /* Dynamic log type identifier */
27 int dpaa2_qdma_logtype;
29 uint32_t dpaa2_coherent_no_alloc_cache;
30 uint32_t dpaa2_coherent_alloc_cache;
33 static struct qdma_device qdma_dev;
35 /* QDMA H/W queues list */
36 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
37 static struct qdma_hw_queue_list qdma_queue_list
38 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
40 /* QDMA Virtual Queues */
41 static struct qdma_virt_queue *qdma_vqs;
43 /* QDMA per core data */
44 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
46 static struct qdma_hw_queue *
47 alloc_hw_queue(uint32_t lcore_id)
49 struct qdma_hw_queue *queue = NULL;
51 DPAA2_QDMA_FUNC_TRACE();
53 /* Get a free queue from the list */
54 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
55 if (queue->num_users == 0) {
56 queue->lcore_id = lcore_id;
66 free_hw_queue(struct qdma_hw_queue *queue)
68 DPAA2_QDMA_FUNC_TRACE();
74 static struct qdma_hw_queue *
75 get_hw_queue(uint32_t lcore_id)
77 struct qdma_per_core_info *core_info;
78 struct qdma_hw_queue *queue, *temp;
79 uint32_t least_num_users;
82 DPAA2_QDMA_FUNC_TRACE();
84 core_info = &qdma_core_info[lcore_id];
85 num_hw_queues = core_info->num_hw_queues;
88 * Allocate a HW queue if there are less queues
89 * than maximum per core queues configured
91 if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
92 queue = alloc_hw_queue(lcore_id);
94 core_info->hw_queues[num_hw_queues] = queue;
95 core_info->num_hw_queues++;
100 queue = core_info->hw_queues[0];
101 /* In case there is no queue associated with the core return NULL */
105 /* Fetch the least loaded H/W queue */
106 least_num_users = core_info->hw_queues[0]->num_users;
107 for (i = 0; i < num_hw_queues; i++) {
108 temp = core_info->hw_queues[i];
109 if (temp->num_users < least_num_users)
120 put_hw_queue(struct qdma_hw_queue *queue)
122 struct qdma_per_core_info *core_info;
123 int lcore_id, num_hw_queues, i;
125 DPAA2_QDMA_FUNC_TRACE();
128 * If this is the last user of the queue free it.
129 * Also remove it from QDMA core info.
131 if (queue->num_users == 1) {
132 free_hw_queue(queue);
134 /* Remove the physical queue from core info */
135 lcore_id = queue->lcore_id;
136 core_info = &qdma_core_info[lcore_id];
137 num_hw_queues = core_info->num_hw_queues;
138 for (i = 0; i < num_hw_queues; i++) {
139 if (queue == core_info->hw_queues[i])
142 for (; i < num_hw_queues - 1; i++)
143 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
144 core_info->hw_queues[i] = NULL;
153 DPAA2_QDMA_FUNC_TRACE();
155 rte_spinlock_init(&qdma_dev.lock);
161 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
163 DPAA2_QDMA_FUNC_TRACE();
165 qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
171 struct qdma_hw_queue *queue;
174 DPAA2_QDMA_FUNC_TRACE();
176 /* In case QDMA device is not in stopped state, return -EBUSY */
177 if (qdma_dev.state == 1) {
179 "Device is in running state. Stop before reset.");
183 /* In case there are pending jobs on any VQ, return -EBUSY */
184 for (i = 0; i < qdma_dev.max_vqs; i++) {
185 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
186 qdma_vqs[i].num_dequeues))
187 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
191 /* Reset HW queues */
192 TAILQ_FOREACH(queue, &qdma_queue_list, next)
193 queue->num_users = 0;
195 /* Reset and free virtual queues */
196 for (i = 0; i < qdma_dev.max_vqs; i++) {
197 if (qdma_vqs[i].status_ring)
198 rte_ring_free(qdma_vqs[i].status_ring);
204 /* Reset per core info */
205 memset(&qdma_core_info, 0,
206 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
208 /* Free the FLE pool */
209 if (qdma_dev.fle_pool)
210 rte_mempool_free(qdma_dev.fle_pool);
212 /* Reset QDMA device structure */
213 qdma_dev.mode = RTE_QDMA_MODE_HW;
214 qdma_dev.max_hw_queues_per_core = 0;
215 qdma_dev.fle_pool = NULL;
216 qdma_dev.fle_pool_count = 0;
217 qdma_dev.max_vqs = 0;
223 rte_qdma_configure(struct rte_qdma_config *qdma_config)
226 char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
228 DPAA2_QDMA_FUNC_TRACE();
230 /* In case QDMA device is not in stopped state, return -EBUSY */
231 if (qdma_dev.state == 1) {
233 "Device is in running state. Stop before config.");
237 /* Reset the QDMA device */
238 ret = rte_qdma_reset();
240 DPAA2_QDMA_ERR("Resetting QDMA failed");
245 qdma_dev.mode = qdma_config->mode;
247 /* Set max HW queue per core */
248 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
249 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
250 MAX_HW_QUEUE_PER_CORE);
253 qdma_dev.max_hw_queues_per_core =
254 qdma_config->max_hw_queues_per_core;
256 /* Allocate Virtual Queues */
257 qdma_vqs = rte_malloc("qdma_virtual_queues",
258 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
259 RTE_CACHE_LINE_SIZE);
261 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
264 qdma_dev.max_vqs = qdma_config->max_vqs;
266 /* Allocate FLE pool; just append PID so that in case of
267 * multiprocess, the pool's don't collide.
269 snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
271 qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
272 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
273 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
274 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
275 if (!qdma_dev.fle_pool) {
276 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
281 qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
289 DPAA2_QDMA_FUNC_TRACE();
297 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
302 DPAA2_QDMA_FUNC_TRACE();
304 rte_spinlock_lock(&qdma_dev.lock);
306 /* Get a free Virtual Queue */
307 for (i = 0; i < qdma_dev.max_vqs; i++) {
308 if (qdma_vqs[i].in_use == 0)
312 /* Return in case no VQ is free */
313 if (i == qdma_dev.max_vqs) {
314 rte_spinlock_unlock(&qdma_dev.lock);
315 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
319 if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
320 (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
321 /* Allocate HW queue for a VQ */
322 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
323 qdma_vqs[i].exclusive_hw_queue = 1;
325 /* Allocate a Ring for Virutal Queue in VQ mode */
326 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
327 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
328 qdma_dev.fle_pool_count, rte_socket_id(), 0);
329 if (!qdma_vqs[i].status_ring) {
330 DPAA2_QDMA_ERR("Status ring creation failed for vq");
331 rte_spinlock_unlock(&qdma_dev.lock);
335 /* Get a HW queue (shared) for a VQ */
336 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
337 qdma_vqs[i].exclusive_hw_queue = 0;
340 if (qdma_vqs[i].hw_queue == NULL) {
341 DPAA2_QDMA_ERR("No H/W queue available for VQ");
342 if (qdma_vqs[i].status_ring)
343 rte_ring_free(qdma_vqs[i].status_ring);
344 qdma_vqs[i].status_ring = NULL;
345 rte_spinlock_unlock(&qdma_dev.lock);
349 qdma_vqs[i].in_use = 1;
350 qdma_vqs[i].lcore_id = lcore_id;
351 memset(&qdma_vqs[i].rbp, 0, sizeof(struct rte_qdma_rbp));
352 rte_spinlock_unlock(&qdma_dev.lock);
357 /*create vq for route-by-port*/
359 rte_qdma_vq_create_rbp(uint32_t lcore_id, uint32_t flags,
360 struct rte_qdma_rbp *rbp)
364 i = rte_qdma_vq_create(lcore_id, flags);
366 memcpy(&qdma_vqs[i].rbp, rbp, sizeof(struct rte_qdma_rbp));
372 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
373 struct rte_qdma_rbp *rbp,
374 uint64_t src, uint64_t dest,
375 size_t len, uint32_t flags)
377 struct qdma_sdd *sdd;
379 DPAA2_QDMA_FUNC_TRACE();
381 sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
382 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
384 /* first frame list to source descriptor */
385 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
386 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
388 /* source and destination descriptor */
389 if (rbp && rbp->enable) {
391 sdd->read_cmd.portid = rbp->sportid;
392 sdd->rbpcmd_simple.pfid = rbp->spfid;
393 sdd->rbpcmd_simple.vfid = rbp->svfid;
396 sdd->read_cmd.rbp = rbp->srbp;
397 sdd->read_cmd.rdtype = DPAA2_RBP_MEM_RW;
399 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
403 sdd->write_cmd.portid = rbp->dportid;
404 sdd->rbpcmd_simple.pfid = rbp->dpfid;
405 sdd->rbpcmd_simple.vfid = rbp->dvfid;
408 sdd->write_cmd.rbp = rbp->drbp;
409 sdd->write_cmd.wrttype = DPAA2_RBP_MEM_RW;
411 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
415 sdd->read_cmd.rdtype = dpaa2_coherent_no_alloc_cache;
417 sdd->write_cmd.wrttype = dpaa2_coherent_alloc_cache;
420 /* source frame list to source buffer */
421 if (flags & RTE_QDMA_JOB_SRC_PHY) {
422 DPAA2_SET_FLE_ADDR(fle, src);
423 DPAA2_SET_FLE_BMT(fle);
425 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
427 DPAA2_SET_FLE_LEN(fle, len);
430 /* destination frame list to destination buffer */
431 if (flags & RTE_QDMA_JOB_DEST_PHY) {
432 DPAA2_SET_FLE_BMT(fle);
433 DPAA2_SET_FLE_ADDR(fle, dest);
435 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
437 DPAA2_SET_FLE_LEN(fle, len);
439 /* Final bit: 1, for last frame list */
440 DPAA2_SET_FLE_FIN(fle);
443 static inline uint16_t dpdmai_dev_set_fd(struct qbman_fd *fd,
444 struct rte_qdma_job *job,
445 struct rte_qdma_rbp *rbp,
448 struct qdma_io_meta *io_meta;
449 struct qbman_fle *fle;
452 * Get an FLE/SDD from FLE pool.
453 * Note: IO metadata is before the FLE and SDD memory.
455 ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
457 DPAA2_QDMA_DP_DEBUG("Memory alloc failed for FLE");
461 /* Set the metadata */
462 io_meta->cnxt = (size_t)job;
465 fle = (struct qbman_fle *)(io_meta + 1);
467 DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
468 DPAA2_SET_FD_COMPOUND_FMT(fd);
469 DPAA2_SET_FD_FRC(fd, QDMA_SER_CTX);
472 memset(fle, 0, QDMA_FLE_POOL_SIZE);
473 dpaa2_qdma_populate_fle(fle, rbp, job->src, job->dest,
474 job->len, job->flags);
480 dpdmai_dev_enqueue_multi(struct dpaa2_dpdmai_dev *dpdmai_dev,
483 struct rte_qdma_rbp *rbp,
484 struct rte_qdma_job **job,
487 struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
488 struct dpaa2_queue *txq;
489 struct qbman_eq_desc eqdesc;
490 struct qbman_swp *swp;
492 uint32_t num_to_send = 0;
495 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
496 ret = dpaa2_affine_qbman_swp();
498 DPAA2_QDMA_ERR("Failure in affining portal");
502 swp = DPAA2_PER_LCORE_PORTAL;
504 txq = &(dpdmai_dev->tx_queue[txq_id]);
506 /* Prepare enqueue descriptor */
507 qbman_eq_desc_clear(&eqdesc);
508 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
509 qbman_eq_desc_set_no_orp(&eqdesc, 0);
510 qbman_eq_desc_set_response(&eqdesc, 0, 0);
512 memset(fd, 0, RTE_QDMA_BURST_NB_MAX * sizeof(struct qbman_fd));
514 while (nb_jobs > 0) {
517 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
518 dpaa2_eqcr_size : nb_jobs;
520 for (loop = 0; loop < num_to_send; loop++) {
521 ret = dpdmai_dev_set_fd(&fd[loop],
522 job[num_tx], rbp, vq_id);
524 /* Set nb_jobs to loop, so outer while loop
534 /* Enqueue the packet to the QBMAN */
535 uint32_t enqueue_loop = 0;
536 while (enqueue_loop < loop) {
537 enqueue_loop += qbman_swp_enqueue_multiple(swp,
541 loop - enqueue_loop);
549 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
550 struct rte_qdma_job **job,
553 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
554 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
555 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
558 DPAA2_QDMA_FUNC_TRACE();
560 /* Return error in case of wrong lcore_id */
561 if (rte_lcore_id() != qdma_vq->lcore_id) {
562 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
567 ret = dpdmai_dev_enqueue_multi(dpdmai_dev,
574 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
578 qdma_vq->num_enqueues += ret;
584 rte_qdma_vq_enqueue(uint16_t vq_id,
585 struct rte_qdma_job *job)
587 return rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
590 static inline uint16_t dpdmai_dev_get_job(const struct qbman_fd *fd,
591 struct rte_qdma_job **job)
593 struct qbman_fle *fle;
594 struct qdma_io_meta *io_meta;
597 * Fetch metadata from FLE. job and vq_id were set
598 * in metadata in the enqueue operation.
600 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
601 io_meta = (struct qdma_io_meta *)(fle) - 1;
603 *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
604 (*job)->status = (DPAA2_GET_FD_ERR(fd) << 8) |
605 (DPAA2_GET_FD_FRC(fd) & 0xFF);
609 /* Free FLE to the pool */
610 rte_mempool_put(qdma_dev.fle_pool, io_meta);
616 dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
619 struct rte_qdma_job **job,
622 struct dpaa2_queue *rxq;
623 struct qbman_result *dq_storage;
624 struct qbman_pull_desc pulldesc;
625 struct qbman_swp *swp;
627 uint8_t status, pending;
629 const struct qbman_fd *fd;
631 int ret, next_pull = nb_jobs, num_pulled = 0;
633 DPAA2_QDMA_FUNC_TRACE();
635 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
636 ret = dpaa2_affine_qbman_swp();
638 DPAA2_QDMA_ERR("Failure in affining portal");
642 swp = DPAA2_PER_LCORE_PORTAL;
644 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
648 dq_storage = rxq->q_storage->dq_storage[0];
649 /* Prepare dequeue descriptor */
650 qbman_pull_desc_clear(&pulldesc);
651 qbman_pull_desc_set_fq(&pulldesc, fqid);
652 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
653 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
655 if (next_pull > dpaa2_dqrr_size) {
656 qbman_pull_desc_set_numframes(&pulldesc,
658 next_pull -= dpaa2_dqrr_size;
660 qbman_pull_desc_set_numframes(&pulldesc, next_pull);
665 if (qbman_swp_pull(swp, &pulldesc)) {
666 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
667 /* Portal was busy, try again */
673 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
674 /* Check if the previous issued command is completed. */
675 while (!qbman_check_command_complete(dq_storage))
682 /* Loop until dq_storage is updated
683 * with new token by QBMAN
685 while (!qbman_check_new_result(dq_storage))
687 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
689 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
691 /* Check for valid frame. */
692 status = qbman_result_DQ_flags(dq_storage);
693 if (unlikely((status &
694 QBMAN_DQ_STAT_VALIDFRAME) == 0))
697 fd = qbman_result_DQ_fd(dq_storage);
699 vqid = dpdmai_dev_get_job(fd, &job[num_rx]);
701 vq_id[num_rx] = vqid;
708 /* Last VDQ provided all packets and more packets are requested */
709 } while (next_pull && num_pulled == dpaa2_dqrr_size);
715 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
716 struct rte_qdma_job **job,
719 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
720 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
721 struct qdma_virt_queue *temp_qdma_vq;
722 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
723 int ring_count, ret = 0, i;
725 /* Return error in case of wrong lcore_id */
726 if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
727 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
732 /* Only dequeue when there are pending jobs on VQ */
733 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
736 if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
737 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
739 if (qdma_vq->exclusive_hw_queue) {
740 /* In case of exclusive queue directly fetch from HW queue */
741 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
745 "Dequeue from DPDMAI device failed: %d", ret);
748 qdma_vq->num_dequeues += ret;
750 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
752 * Get the QDMA completed jobs from the software ring.
753 * In case they are not available on the ring poke the HW
754 * to fetch completed jobs from corresponding HW queues
756 ring_count = rte_ring_count(qdma_vq->status_ring);
757 if (ring_count < nb_jobs) {
758 /* TODO - How to have right budget */
759 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
761 temp_vq_id, job, nb_jobs);
762 for (i = 0; i < ret; i++) {
763 temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
764 rte_ring_enqueue(temp_qdma_vq->status_ring,
767 ring_count = rte_ring_count(
768 qdma_vq->status_ring);
772 /* Dequeue job from the software ring
773 * to provide to the user
775 ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
776 (void **)job, ring_count, NULL);
778 qdma_vq->num_dequeues += ret;
785 struct rte_qdma_job *
786 rte_qdma_vq_dequeue(uint16_t vq_id)
789 struct rte_qdma_job *job = NULL;
791 ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
793 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
799 rte_qdma_vq_stats(uint16_t vq_id,
800 struct rte_qdma_vq_stats *vq_status)
802 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
804 DPAA2_QDMA_FUNC_TRACE();
806 if (qdma_vq->in_use) {
807 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
808 vq_status->lcore_id = qdma_vq->lcore_id;
809 vq_status->num_enqueues = qdma_vq->num_enqueues;
810 vq_status->num_dequeues = qdma_vq->num_dequeues;
811 vq_status->num_pending_jobs = vq_status->num_enqueues -
812 vq_status->num_dequeues;
817 rte_qdma_vq_destroy(uint16_t vq_id)
819 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
821 DPAA2_QDMA_FUNC_TRACE();
823 /* In case there are pending jobs on any VQ, return -EBUSY */
824 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
827 rte_spinlock_lock(&qdma_dev.lock);
829 if (qdma_vq->exclusive_hw_queue)
830 free_hw_queue(qdma_vq->hw_queue);
832 if (qdma_vqs->status_ring)
833 rte_ring_free(qdma_vqs->status_ring);
835 put_hw_queue(qdma_vq->hw_queue);
838 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
840 rte_spinlock_lock(&qdma_dev.lock);
846 rte_qdma_vq_destroy_rbp(uint16_t vq_id)
848 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
850 DPAA2_QDMA_FUNC_TRACE();
852 /* In case there are pending jobs on any VQ, return -EBUSY */
853 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
856 rte_spinlock_lock(&qdma_dev.lock);
858 if (qdma_vq->exclusive_hw_queue) {
859 free_hw_queue(qdma_vq->hw_queue);
861 if (qdma_vqs->status_ring)
862 rte_ring_free(qdma_vqs->status_ring);
864 put_hw_queue(qdma_vq->hw_queue);
867 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
869 rte_spinlock_lock(&qdma_dev.lock);
877 DPAA2_QDMA_FUNC_TRACE();
883 rte_qdma_destroy(void)
885 DPAA2_QDMA_FUNC_TRACE();
890 static const struct rte_rawdev_ops dpaa2_qdma_ops;
893 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
895 struct qdma_hw_queue *queue;
898 DPAA2_QDMA_FUNC_TRACE();
900 for (i = 0; i < dpdmai_dev->num_queues; i++) {
901 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
904 "Memory allocation failed for QDMA queue");
908 queue->dpdmai_dev = dpdmai_dev;
911 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
912 qdma_dev.num_hw_queues++;
919 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
921 struct qdma_hw_queue *queue = NULL;
922 struct qdma_hw_queue *tqueue = NULL;
924 DPAA2_QDMA_FUNC_TRACE();
926 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
927 if (queue->dpdmai_dev == dpdmai_dev) {
928 TAILQ_REMOVE(&qdma_queue_list, queue, next);
936 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
938 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
941 DPAA2_QDMA_FUNC_TRACE();
943 /* Remove HW queues from global list */
944 remove_hw_queues_from_list(dpdmai_dev);
946 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
949 DPAA2_QDMA_ERR("dmdmai disable failed");
951 /* Set up the DQRR storage for Rx */
952 for (i = 0; i < dpdmai_dev->num_queues; i++) {
953 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
955 if (rxq->q_storage) {
956 dpaa2_free_dq_storage(rxq->q_storage);
957 rte_free(rxq->q_storage);
961 /* Close the device at underlying layer*/
962 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
964 DPAA2_QDMA_ERR("Failure closing dpdmai device");
970 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
972 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
973 struct dpdmai_rx_queue_cfg rx_queue_cfg;
974 struct dpdmai_attr attr;
975 struct dpdmai_rx_queue_attr rx_attr;
976 struct dpdmai_tx_queue_attr tx_attr;
979 DPAA2_QDMA_FUNC_TRACE();
981 /* Open DPDMAI device */
982 dpdmai_dev->dpdmai_id = dpdmai_id;
983 dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
984 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
985 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
987 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
991 /* Get DPDMAI attributes */
992 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
993 dpdmai_dev->token, &attr);
995 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
999 dpdmai_dev->num_queues = attr.num_of_queues;
1001 /* Set up Rx Queues */
1002 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1003 struct dpaa2_queue *rxq;
1005 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
1006 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
1009 i, 0, &rx_queue_cfg);
1011 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
1016 /* Allocate DQ storage for the DPDMAI Rx queues */
1017 rxq = &(dpdmai_dev->rx_queue[i]);
1018 rxq->q_storage = rte_malloc("dq_storage",
1019 sizeof(struct queue_storage_info_t),
1020 RTE_CACHE_LINE_SIZE);
1021 if (!rxq->q_storage) {
1022 DPAA2_QDMA_ERR("q_storage allocation failed");
1027 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
1028 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
1030 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
1035 /* Get Rx and Tx queues FQID's */
1036 for (i = 0; i < dpdmai_dev->num_queues; i++) {
1037 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1038 dpdmai_dev->token, i, 0, &rx_attr);
1040 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1044 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
1046 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1047 dpdmai_dev->token, i, 0, &tx_attr);
1049 DPAA2_QDMA_ERR("Reading device failed with err: %d",
1053 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
1056 /* Enable the device */
1057 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
1060 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
1064 /* Add the HW queue to the global list */
1065 ret = add_hw_queues_to_list(dpdmai_dev);
1067 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
1071 if (!dpaa2_coherent_no_alloc_cache) {
1072 if (dpaa2_svr_family == SVR_LX2160A) {
1073 dpaa2_coherent_no_alloc_cache =
1074 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
1075 dpaa2_coherent_alloc_cache =
1076 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
1078 dpaa2_coherent_no_alloc_cache =
1079 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
1080 dpaa2_coherent_alloc_cache =
1081 DPAA2_COHERENT_ALLOCATE_CACHE;
1085 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
1089 dpaa2_dpdmai_dev_uninit(rawdev);
1094 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
1095 struct rte_dpaa2_device *dpaa2_dev)
1097 struct rte_rawdev *rawdev;
1100 DPAA2_QDMA_FUNC_TRACE();
1102 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
1103 sizeof(struct dpaa2_dpdmai_dev),
1106 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
1110 dpaa2_dev->rawdev = rawdev;
1111 rawdev->dev_ops = &dpaa2_qdma_ops;
1112 rawdev->device = &dpaa2_dev->device;
1113 rawdev->driver_name = dpaa2_drv->driver.name;
1115 /* Invoke PMD device initialization function */
1116 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
1118 rte_rawdev_pmd_release(rawdev);
1126 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
1128 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
1131 DPAA2_QDMA_FUNC_TRACE();
1133 dpaa2_dpdmai_dev_uninit(rawdev);
1135 ret = rte_rawdev_pmd_release(rawdev);
1137 DPAA2_QDMA_ERR("Device cleanup failed");
1142 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1143 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1144 .drv_type = DPAA2_QDMA,
1145 .probe = rte_dpaa2_qdma_probe,
1146 .remove = rte_dpaa2_qdma_remove,
1149 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1151 RTE_INIT(dpaa2_qdma_init_log)
1153 dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
1154 if (dpaa2_qdma_logtype >= 0)
1155 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);