1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
16 #include <rte_prefetch.h>
18 #include <mc/fsl_dpdmai.h>
19 #include <portal/dpaa2_hw_pvt.h>
20 #include <portal/dpaa2_hw_dpio.h>
22 #include "dpaa2_qdma.h"
23 #include "dpaa2_qdma_logs.h"
24 #include "rte_pmd_dpaa2_qdma.h"
26 /* Dynamic log type identifier */
27 int dpaa2_qdma_logtype;
30 static struct qdma_device qdma_dev;
32 /* QDMA H/W queues list */
33 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
34 static struct qdma_hw_queue_list qdma_queue_list
35 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
37 /* QDMA Virtual Queues */
38 static struct qdma_virt_queue *qdma_vqs;
40 /* QDMA per core data */
41 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
43 static struct qdma_hw_queue *
44 alloc_hw_queue(uint32_t lcore_id)
46 struct qdma_hw_queue *queue = NULL;
48 DPAA2_QDMA_FUNC_TRACE();
50 /* Get a free queue from the list */
51 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
52 if (queue->num_users == 0) {
53 queue->lcore_id = lcore_id;
63 free_hw_queue(struct qdma_hw_queue *queue)
65 DPAA2_QDMA_FUNC_TRACE();
71 static struct qdma_hw_queue *
72 get_hw_queue(uint32_t lcore_id)
74 struct qdma_per_core_info *core_info;
75 struct qdma_hw_queue *queue, *temp;
76 uint32_t least_num_users;
79 DPAA2_QDMA_FUNC_TRACE();
81 core_info = &qdma_core_info[lcore_id];
82 num_hw_queues = core_info->num_hw_queues;
85 * Allocate a HW queue if there are less queues
86 * than maximum per core queues configured
88 if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
89 queue = alloc_hw_queue(lcore_id);
91 core_info->hw_queues[num_hw_queues] = queue;
92 core_info->num_hw_queues++;
97 queue = core_info->hw_queues[0];
98 /* In case there is no queue associated with the core return NULL */
102 /* Fetch the least loaded H/W queue */
103 least_num_users = core_info->hw_queues[0]->num_users;
104 for (i = 0; i < num_hw_queues; i++) {
105 temp = core_info->hw_queues[i];
106 if (temp->num_users < least_num_users)
117 put_hw_queue(struct qdma_hw_queue *queue)
119 struct qdma_per_core_info *core_info;
120 int lcore_id, num_hw_queues, i;
122 DPAA2_QDMA_FUNC_TRACE();
125 * If this is the last user of the queue free it.
126 * Also remove it from QDMA core info.
128 if (queue->num_users == 1) {
129 free_hw_queue(queue);
131 /* Remove the physical queue from core info */
132 lcore_id = queue->lcore_id;
133 core_info = &qdma_core_info[lcore_id];
134 num_hw_queues = core_info->num_hw_queues;
135 for (i = 0; i < num_hw_queues; i++) {
136 if (queue == core_info->hw_queues[i])
139 for (; i < num_hw_queues - 1; i++)
140 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
141 core_info->hw_queues[i] = NULL;
150 DPAA2_QDMA_FUNC_TRACE();
152 rte_spinlock_init(&qdma_dev.lock);
158 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
160 DPAA2_QDMA_FUNC_TRACE();
162 qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
168 struct qdma_hw_queue *queue;
171 DPAA2_QDMA_FUNC_TRACE();
173 /* In case QDMA device is not in stopped state, return -EBUSY */
174 if (qdma_dev.state == 1) {
176 "Device is in running state. Stop before reset.");
180 /* In case there are pending jobs on any VQ, return -EBUSY */
181 for (i = 0; i < qdma_dev.max_vqs; i++) {
182 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
183 qdma_vqs[i].num_dequeues))
184 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
188 /* Reset HW queues */
189 TAILQ_FOREACH(queue, &qdma_queue_list, next)
190 queue->num_users = 0;
192 /* Reset and free virtual queues */
193 for (i = 0; i < qdma_dev.max_vqs; i++) {
194 if (qdma_vqs[i].status_ring)
195 rte_ring_free(qdma_vqs[i].status_ring);
201 /* Reset per core info */
202 memset(&qdma_core_info, 0,
203 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
205 /* Free the FLE pool */
206 if (qdma_dev.fle_pool)
207 rte_mempool_free(qdma_dev.fle_pool);
209 /* Reset QDMA device structure */
210 qdma_dev.mode = RTE_QDMA_MODE_HW;
211 qdma_dev.max_hw_queues_per_core = 0;
212 qdma_dev.fle_pool = NULL;
213 qdma_dev.fle_pool_count = 0;
214 qdma_dev.max_vqs = 0;
220 rte_qdma_configure(struct rte_qdma_config *qdma_config)
223 char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
225 DPAA2_QDMA_FUNC_TRACE();
227 /* In case QDMA device is not in stopped state, return -EBUSY */
228 if (qdma_dev.state == 1) {
230 "Device is in running state. Stop before config.");
234 /* Reset the QDMA device */
235 ret = rte_qdma_reset();
237 DPAA2_QDMA_ERR("Resetting QDMA failed");
242 qdma_dev.mode = qdma_config->mode;
244 /* Set max HW queue per core */
245 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
246 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
247 MAX_HW_QUEUE_PER_CORE);
250 qdma_dev.max_hw_queues_per_core =
251 qdma_config->max_hw_queues_per_core;
253 /* Allocate Virtual Queues */
254 qdma_vqs = rte_malloc("qdma_virtual_queues",
255 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
256 RTE_CACHE_LINE_SIZE);
258 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
261 qdma_dev.max_vqs = qdma_config->max_vqs;
263 /* Allocate FLE pool; just append PID so that in case of
264 * multiprocess, the pool's don't collide.
266 snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
268 qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
269 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
270 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
271 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
272 if (!qdma_dev.fle_pool) {
273 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
278 qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
286 DPAA2_QDMA_FUNC_TRACE();
294 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
299 DPAA2_QDMA_FUNC_TRACE();
301 rte_spinlock_lock(&qdma_dev.lock);
303 /* Get a free Virtual Queue */
304 for (i = 0; i < qdma_dev.max_vqs; i++) {
305 if (qdma_vqs[i].in_use == 0)
309 /* Return in case no VQ is free */
310 if (i == qdma_dev.max_vqs) {
311 rte_spinlock_unlock(&qdma_dev.lock);
312 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
316 if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
317 (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
318 /* Allocate HW queue for a VQ */
319 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
320 qdma_vqs[i].exclusive_hw_queue = 1;
322 /* Allocate a Ring for Virutal Queue in VQ mode */
323 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
324 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
325 qdma_dev.fle_pool_count, rte_socket_id(), 0);
326 if (!qdma_vqs[i].status_ring) {
327 DPAA2_QDMA_ERR("Status ring creation failed for vq");
328 rte_spinlock_unlock(&qdma_dev.lock);
332 /* Get a HW queue (shared) for a VQ */
333 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
334 qdma_vqs[i].exclusive_hw_queue = 0;
337 if (qdma_vqs[i].hw_queue == NULL) {
338 DPAA2_QDMA_ERR("No H/W queue available for VQ");
339 if (qdma_vqs[i].status_ring)
340 rte_ring_free(qdma_vqs[i].status_ring);
341 qdma_vqs[i].status_ring = NULL;
342 rte_spinlock_unlock(&qdma_dev.lock);
346 qdma_vqs[i].in_use = 1;
347 qdma_vqs[i].lcore_id = lcore_id;
349 rte_spinlock_unlock(&qdma_dev.lock);
355 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
356 uint64_t src, uint64_t dest,
357 size_t len, uint32_t flags)
359 struct qdma_sdd *sdd;
361 DPAA2_QDMA_FUNC_TRACE();
363 sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
364 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
366 /* first frame list to source descriptor */
367 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
368 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
370 /* source and destination descriptor */
371 DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
373 DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
376 /* source frame list to source buffer */
377 if (flags & RTE_QDMA_JOB_SRC_PHY) {
378 DPAA2_SET_FLE_ADDR(fle, src);
379 DPAA2_SET_FLE_BMT(fle);
381 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
383 DPAA2_SET_FLE_LEN(fle, len);
386 /* destination frame list to destination buffer */
387 if (flags & RTE_QDMA_JOB_DEST_PHY) {
388 DPAA2_SET_FLE_BMT(fle);
389 DPAA2_SET_FLE_ADDR(fle, dest);
391 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
393 DPAA2_SET_FLE_LEN(fle, len);
395 /* Final bit: 1, for last frame list */
396 DPAA2_SET_FLE_FIN(fle);
400 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
401 struct rte_qdma_job **job,
404 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
405 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
406 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
407 struct qdma_io_meta *io_meta;
408 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
409 struct dpaa2_queue *txq;
410 struct qbman_fle *fle;
411 struct qbman_eq_desc eqdesc;
412 struct qbman_swp *swp;
414 uint32_t num_to_send = 0;
416 uint16_t num_txed = 0;
418 /* Return error in case of wrong lcore_id */
419 if (rte_lcore_id() != qdma_vq->lcore_id) {
420 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
425 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
426 ret = dpaa2_affine_qbman_swp();
428 DPAA2_QDMA_ERR("Failure in affining portal");
432 swp = DPAA2_PER_LCORE_PORTAL;
434 txq = &(dpdmai_dev->tx_queue[qdma_pq->queue_id]);
436 /* Prepare enqueue descriptor */
437 qbman_eq_desc_clear(&eqdesc);
438 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
439 qbman_eq_desc_set_no_orp(&eqdesc, 0);
440 qbman_eq_desc_set_response(&eqdesc, 0, 0);
442 while (nb_jobs > 0) {
445 num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
446 dpaa2_eqcr_size : nb_jobs;
448 for (loop = 0; loop < num_to_send; loop++) {
450 * Get an FLE/SDD from FLE pool.
451 * Note: IO metadata is before the FLE and SDD memory.
453 ret = rte_mempool_get(qdma_dev.fle_pool,
454 (void **)(&io_meta));
456 DPAA2_QDMA_DP_WARN("Me alloc failed for FLE");
460 /* Set the metadata */
461 io_meta->cnxt = (size_t)job[num_tx];
464 fle = (struct qbman_fle *)(io_meta + 1);
466 /* populate Frame descriptor */
467 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
468 DPAA2_SET_FD_ADDR(&fd_arr[loop],
469 DPAA2_VADDR_TO_IOVA(fle));
470 DPAA2_SET_FD_COMPOUND_FMT(&fd_arr[loop]);
471 DPAA2_SET_FD_FRC(&fd_arr[loop], QDMA_SER_CTX);
474 memset(fle, 0, QDMA_FLE_POOL_SIZE);
475 dpaa2_qdma_populate_fle(fle, job[num_tx]->src,
483 /* Enqueue the packet to the QBMAN */
484 uint32_t enqueue_loop = 0;
485 while (enqueue_loop < num_to_send) {
486 enqueue_loop += qbman_swp_enqueue_multiple(swp,
488 &fd_arr[enqueue_loop],
490 num_to_send - enqueue_loop);
493 num_txed += num_to_send;
494 nb_jobs -= num_to_send;
496 qdma_vq->num_enqueues += num_txed;
501 rte_qdma_vq_enqueue(uint16_t vq_id,
502 struct rte_qdma_job *job)
506 ret = rte_qdma_vq_enqueue_multi(vq_id, &job, 1);
508 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
515 /* Function to receive a QDMA job for a given device and queue*/
517 dpdmai_dev_dequeue_multijob(struct dpaa2_dpdmai_dev *dpdmai_dev,
520 struct rte_qdma_job **job,
523 struct qdma_io_meta *io_meta;
524 struct dpaa2_queue *rxq;
525 struct qbman_result *dq_storage;
526 struct qbman_pull_desc pulldesc;
527 const struct qbman_fd *fd;
528 struct qbman_swp *swp;
529 struct qbman_fle *fle;
534 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
535 ret = dpaa2_affine_qbman_swp();
537 DPAA2_QDMA_ERR("Failure in affining portal");
541 swp = DPAA2_PER_LCORE_PORTAL;
542 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
543 dq_storage = rxq->q_storage->dq_storage[0];
546 /* Prepare dequeue descriptor */
547 qbman_pull_desc_clear(&pulldesc);
548 qbman_pull_desc_set_fq(&pulldesc, fqid);
549 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
550 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
551 if (nb_jobs > dpaa2_dqrr_size)
552 qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
554 qbman_pull_desc_set_numframes(&pulldesc, nb_jobs);
557 if (qbman_swp_pull(swp, &pulldesc)) {
558 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
564 rte_prefetch0((void *)((size_t)(dq_storage + 1)));
565 /* Check if the previous issued command is completed. */
566 while (!qbman_check_command_complete(dq_storage))
572 /* Loop until the dq_storage is updated with
575 while (!qbman_check_new_result(dq_storage))
578 rte_prefetch0((void *)((size_t)(dq_storage + 2)));
579 /* Check whether Last Pull command is Expired and
580 * setting Condition for Loop termination
582 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
584 /* Check for valid frame. */
585 status = qbman_result_DQ_flags(dq_storage);
586 if (unlikely((status &
587 QBMAN_DQ_STAT_VALIDFRAME) == 0))
590 fd = qbman_result_DQ_fd(dq_storage);
593 * Fetch metadata from FLE. job and vq_id were set
594 * in metadata in the enqueue operation.
596 fle = (struct qbman_fle *)
597 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
598 io_meta = (struct qdma_io_meta *)(fle) - 1;
600 vq_id[num_pulled] = io_meta->id;
602 job[num_pulled] = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
603 job[num_pulled]->status = DPAA2_GET_FD_ERR(fd);
605 /* Free FLE to the pool */
606 rte_mempool_put(qdma_dev.fle_pool, io_meta);
610 } while (pending && (num_pulled <= dpaa2_dqrr_size));
616 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
617 struct rte_qdma_job **job,
620 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
621 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
622 struct qdma_virt_queue *temp_qdma_vq;
623 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
624 int ring_count, ret = 0, i;
626 /* Return error in case of wrong lcore_id */
627 if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
628 DPAA2_QDMA_WARN("QDMA dequeue for vqid %d on wrong core",
633 /* Only dequeue when there are pending jobs on VQ */
634 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
637 if (qdma_vq->num_enqueues < (qdma_vq->num_dequeues + nb_jobs))
638 nb_jobs = (qdma_vq->num_enqueues - qdma_vq->num_dequeues);
640 if (qdma_vq->exclusive_hw_queue) {
641 /* In case of exclusive queue directly fetch from HW queue */
642 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev, qdma_pq->queue_id,
646 "Dequeue from DPDMAI device failed: %d", ret);
649 qdma_vq->num_dequeues += ret;
651 uint16_t temp_vq_id[RTE_QDMA_BURST_NB_MAX];
653 * Get the QDMA completed jobs from the software ring.
654 * In case they are not available on the ring poke the HW
655 * to fetch completed jobs from corresponding HW queues
657 ring_count = rte_ring_count(qdma_vq->status_ring);
658 if (ring_count < nb_jobs) {
659 /* TODO - How to have right budget */
660 ret = dpdmai_dev_dequeue_multijob(dpdmai_dev,
662 temp_vq_id, job, nb_jobs);
663 for (i = 0; i < ret; i++) {
664 temp_qdma_vq = &qdma_vqs[temp_vq_id[i]];
665 rte_ring_enqueue(temp_qdma_vq->status_ring,
667 ring_count = rte_ring_count(
668 qdma_vq->status_ring);
673 /* Dequeue job from the software ring
674 * to provide to the user
676 ret = rte_ring_dequeue_bulk(qdma_vq->status_ring,
677 (void **)job, ring_count, NULL);
679 qdma_vq->num_dequeues += ret;
686 struct rte_qdma_job *
687 rte_qdma_vq_dequeue(uint16_t vq_id)
690 struct rte_qdma_job *job = NULL;
692 ret = rte_qdma_vq_dequeue_multi(vq_id, &job, 1);
694 DPAA2_QDMA_DP_WARN("DPDMAI device dequeue failed: %d", ret);
700 rte_qdma_vq_stats(uint16_t vq_id,
701 struct rte_qdma_vq_stats *vq_status)
703 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
705 DPAA2_QDMA_FUNC_TRACE();
707 if (qdma_vq->in_use) {
708 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
709 vq_status->lcore_id = qdma_vq->lcore_id;
710 vq_status->num_enqueues = qdma_vq->num_enqueues;
711 vq_status->num_dequeues = qdma_vq->num_dequeues;
712 vq_status->num_pending_jobs = vq_status->num_enqueues -
713 vq_status->num_dequeues;
718 rte_qdma_vq_destroy(uint16_t vq_id)
720 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
722 DPAA2_QDMA_FUNC_TRACE();
724 /* In case there are pending jobs on any VQ, return -EBUSY */
725 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
728 rte_spinlock_lock(&qdma_dev.lock);
730 if (qdma_vq->exclusive_hw_queue)
731 free_hw_queue(qdma_vq->hw_queue);
733 if (qdma_vqs->status_ring)
734 rte_ring_free(qdma_vqs->status_ring);
736 put_hw_queue(qdma_vq->hw_queue);
739 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
741 rte_spinlock_lock(&qdma_dev.lock);
749 DPAA2_QDMA_FUNC_TRACE();
755 rte_qdma_destroy(void)
757 DPAA2_QDMA_FUNC_TRACE();
762 static const struct rte_rawdev_ops dpaa2_qdma_ops;
765 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
767 struct qdma_hw_queue *queue;
770 DPAA2_QDMA_FUNC_TRACE();
772 for (i = 0; i < dpdmai_dev->num_queues; i++) {
773 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
776 "Memory allocation failed for QDMA queue");
780 queue->dpdmai_dev = dpdmai_dev;
783 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
784 qdma_dev.num_hw_queues++;
791 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
793 struct qdma_hw_queue *queue = NULL;
794 struct qdma_hw_queue *tqueue = NULL;
796 DPAA2_QDMA_FUNC_TRACE();
798 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
799 if (queue->dpdmai_dev == dpdmai_dev) {
800 TAILQ_REMOVE(&qdma_queue_list, queue, next);
808 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
810 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
813 DPAA2_QDMA_FUNC_TRACE();
815 /* Remove HW queues from global list */
816 remove_hw_queues_from_list(dpdmai_dev);
818 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
821 DPAA2_QDMA_ERR("dmdmai disable failed");
823 /* Set up the DQRR storage for Rx */
824 for (i = 0; i < dpdmai_dev->num_queues; i++) {
825 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
827 if (rxq->q_storage) {
828 dpaa2_free_dq_storage(rxq->q_storage);
829 rte_free(rxq->q_storage);
833 /* Close the device at underlying layer*/
834 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
836 DPAA2_QDMA_ERR("Failure closing dpdmai device");
842 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
844 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
845 struct dpdmai_rx_queue_cfg rx_queue_cfg;
846 struct dpdmai_attr attr;
847 struct dpdmai_rx_queue_attr rx_attr;
848 struct dpdmai_tx_queue_attr tx_attr;
851 DPAA2_QDMA_FUNC_TRACE();
853 /* Open DPDMAI device */
854 dpdmai_dev->dpdmai_id = dpdmai_id;
855 dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
856 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
857 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
859 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
863 /* Get DPDMAI attributes */
864 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
865 dpdmai_dev->token, &attr);
867 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
871 dpdmai_dev->num_queues = attr.num_of_queues;
873 /* Set up Rx Queues */
874 for (i = 0; i < dpdmai_dev->num_queues; i++) {
875 struct dpaa2_queue *rxq;
877 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
878 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
881 i, 0, &rx_queue_cfg);
883 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
888 /* Allocate DQ storage for the DPDMAI Rx queues */
889 rxq = &(dpdmai_dev->rx_queue[i]);
890 rxq->q_storage = rte_malloc("dq_storage",
891 sizeof(struct queue_storage_info_t),
892 RTE_CACHE_LINE_SIZE);
893 if (!rxq->q_storage) {
894 DPAA2_QDMA_ERR("q_storage allocation failed");
899 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
900 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
902 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
907 /* Get Rx and Tx queues FQID's */
908 for (i = 0; i < dpdmai_dev->num_queues; i++) {
909 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
910 dpdmai_dev->token, i, 0, &rx_attr);
912 DPAA2_QDMA_ERR("Reading device failed with err: %d",
916 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
918 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
919 dpdmai_dev->token, i, 0, &tx_attr);
921 DPAA2_QDMA_ERR("Reading device failed with err: %d",
925 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
928 /* Enable the device */
929 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
932 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
936 /* Add the HW queue to the global list */
937 ret = add_hw_queues_to_list(dpdmai_dev);
939 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
942 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
946 dpaa2_dpdmai_dev_uninit(rawdev);
951 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
952 struct rte_dpaa2_device *dpaa2_dev)
954 struct rte_rawdev *rawdev;
957 DPAA2_QDMA_FUNC_TRACE();
959 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
960 sizeof(struct dpaa2_dpdmai_dev),
963 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
967 dpaa2_dev->rawdev = rawdev;
968 rawdev->dev_ops = &dpaa2_qdma_ops;
969 rawdev->device = &dpaa2_dev->device;
970 rawdev->driver_name = dpaa2_drv->driver.name;
972 /* Invoke PMD device initialization function */
973 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
975 rte_rawdev_pmd_release(rawdev);
983 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
985 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
988 DPAA2_QDMA_FUNC_TRACE();
990 dpaa2_dpdmai_dev_uninit(rawdev);
992 ret = rte_rawdev_pmd_release(rawdev);
994 DPAA2_QDMA_ERR("Device cleanup failed");
999 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
1000 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
1001 .drv_type = DPAA2_QDMA,
1002 .probe = rte_dpaa2_qdma_probe,
1003 .remove = rte_dpaa2_qdma_remove,
1006 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
1008 RTE_INIT(dpaa2_qdma_init_log)
1010 dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
1011 if (dpaa2_qdma_logtype >= 0)
1012 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);