1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
17 #include <mc/fsl_dpdmai.h>
18 #include <portal/dpaa2_hw_pvt.h>
19 #include <portal/dpaa2_hw_dpio.h>
21 #include "dpaa2_qdma.h"
22 #include "dpaa2_qdma_logs.h"
23 #include "rte_pmd_dpaa2_qdma.h"
25 /* Dynamic log type identifier */
26 int dpaa2_qdma_logtype;
29 static struct qdma_device qdma_dev;
31 /* QDMA H/W queues list */
32 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
33 static struct qdma_hw_queue_list qdma_queue_list
34 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
36 /* QDMA Virtual Queues */
37 static struct qdma_virt_queue *qdma_vqs;
39 /* QDMA per core data */
40 static struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
42 static struct qdma_hw_queue *
43 alloc_hw_queue(uint32_t lcore_id)
45 struct qdma_hw_queue *queue = NULL;
47 DPAA2_QDMA_FUNC_TRACE();
49 /* Get a free queue from the list */
50 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
51 if (queue->num_users == 0) {
52 queue->lcore_id = lcore_id;
62 free_hw_queue(struct qdma_hw_queue *queue)
64 DPAA2_QDMA_FUNC_TRACE();
70 static struct qdma_hw_queue *
71 get_hw_queue(uint32_t lcore_id)
73 struct qdma_per_core_info *core_info;
74 struct qdma_hw_queue *queue, *temp;
75 uint32_t least_num_users;
78 DPAA2_QDMA_FUNC_TRACE();
80 core_info = &qdma_core_info[lcore_id];
81 num_hw_queues = core_info->num_hw_queues;
84 * Allocate a HW queue if there are less queues
85 * than maximum per core queues configured
87 if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
88 queue = alloc_hw_queue(lcore_id);
90 core_info->hw_queues[num_hw_queues] = queue;
91 core_info->num_hw_queues++;
96 queue = core_info->hw_queues[0];
97 /* In case there is no queue associated with the core return NULL */
101 /* Fetch the least loaded H/W queue */
102 least_num_users = core_info->hw_queues[0]->num_users;
103 for (i = 0; i < num_hw_queues; i++) {
104 temp = core_info->hw_queues[i];
105 if (temp->num_users < least_num_users)
116 put_hw_queue(struct qdma_hw_queue *queue)
118 struct qdma_per_core_info *core_info;
119 int lcore_id, num_hw_queues, i;
121 DPAA2_QDMA_FUNC_TRACE();
124 * If this is the last user of the queue free it.
125 * Also remove it from QDMA core info.
127 if (queue->num_users == 1) {
128 free_hw_queue(queue);
130 /* Remove the physical queue from core info */
131 lcore_id = queue->lcore_id;
132 core_info = &qdma_core_info[lcore_id];
133 num_hw_queues = core_info->num_hw_queues;
134 for (i = 0; i < num_hw_queues; i++) {
135 if (queue == core_info->hw_queues[i])
138 for (; i < num_hw_queues - 1; i++)
139 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
140 core_info->hw_queues[i] = NULL;
149 DPAA2_QDMA_FUNC_TRACE();
151 rte_spinlock_init(&qdma_dev.lock);
157 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
159 DPAA2_QDMA_FUNC_TRACE();
161 qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
167 struct qdma_hw_queue *queue;
170 DPAA2_QDMA_FUNC_TRACE();
172 /* In case QDMA device is not in stopped state, return -EBUSY */
173 if (qdma_dev.state == 1) {
175 "Device is in running state. Stop before reset.");
179 /* In case there are pending jobs on any VQ, return -EBUSY */
180 for (i = 0; i < qdma_dev.max_vqs; i++) {
181 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
182 qdma_vqs[i].num_dequeues))
183 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
187 /* Reset HW queues */
188 TAILQ_FOREACH(queue, &qdma_queue_list, next)
189 queue->num_users = 0;
191 /* Reset and free virtual queues */
192 for (i = 0; i < qdma_dev.max_vqs; i++) {
193 if (qdma_vqs[i].status_ring)
194 rte_ring_free(qdma_vqs[i].status_ring);
200 /* Reset per core info */
201 memset(&qdma_core_info, 0,
202 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
204 /* Free the FLE pool */
205 if (qdma_dev.fle_pool)
206 rte_mempool_free(qdma_dev.fle_pool);
208 /* Reset QDMA device structure */
209 qdma_dev.mode = RTE_QDMA_MODE_HW;
210 qdma_dev.max_hw_queues_per_core = 0;
211 qdma_dev.fle_pool = NULL;
212 qdma_dev.fle_pool_count = 0;
213 qdma_dev.max_vqs = 0;
219 rte_qdma_configure(struct rte_qdma_config *qdma_config)
222 char fle_pool_name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
224 DPAA2_QDMA_FUNC_TRACE();
226 /* In case QDMA device is not in stopped state, return -EBUSY */
227 if (qdma_dev.state == 1) {
229 "Device is in running state. Stop before config.");
233 /* Reset the QDMA device */
234 ret = rte_qdma_reset();
236 DPAA2_QDMA_ERR("Resetting QDMA failed");
241 qdma_dev.mode = qdma_config->mode;
243 /* Set max HW queue per core */
244 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
245 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
246 MAX_HW_QUEUE_PER_CORE);
249 qdma_dev.max_hw_queues_per_core =
250 qdma_config->max_hw_queues_per_core;
252 /* Allocate Virtual Queues */
253 qdma_vqs = rte_malloc("qdma_virtual_queues",
254 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
255 RTE_CACHE_LINE_SIZE);
257 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
260 qdma_dev.max_vqs = qdma_config->max_vqs;
262 /* Allocate FLE pool; just append PID so that in case of
263 * multiprocess, the pool's don't collide.
265 snprintf(fle_pool_name, sizeof(fle_pool_name), "qdma_fle_pool%u",
267 qdma_dev.fle_pool = rte_mempool_create(fle_pool_name,
268 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
269 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
270 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
271 if (!qdma_dev.fle_pool) {
272 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
277 qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
285 DPAA2_QDMA_FUNC_TRACE();
293 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
298 DPAA2_QDMA_FUNC_TRACE();
300 rte_spinlock_lock(&qdma_dev.lock);
302 /* Get a free Virtual Queue */
303 for (i = 0; i < qdma_dev.max_vqs; i++) {
304 if (qdma_vqs[i].in_use == 0)
308 /* Return in case no VQ is free */
309 if (i == qdma_dev.max_vqs) {
310 rte_spinlock_unlock(&qdma_dev.lock);
311 DPAA2_QDMA_ERR("Unable to get lock on QDMA device");
315 if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
316 (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
317 /* Allocate HW queue for a VQ */
318 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
319 qdma_vqs[i].exclusive_hw_queue = 1;
321 /* Allocate a Ring for Virutal Queue in VQ mode */
322 snprintf(ring_name, sizeof(ring_name), "status ring %d", i);
323 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
324 qdma_dev.fle_pool_count, rte_socket_id(), 0);
325 if (!qdma_vqs[i].status_ring) {
326 DPAA2_QDMA_ERR("Status ring creation failed for vq");
327 rte_spinlock_unlock(&qdma_dev.lock);
331 /* Get a HW queue (shared) for a VQ */
332 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
333 qdma_vqs[i].exclusive_hw_queue = 0;
336 if (qdma_vqs[i].hw_queue == NULL) {
337 DPAA2_QDMA_ERR("No H/W queue available for VQ");
338 if (qdma_vqs[i].status_ring)
339 rte_ring_free(qdma_vqs[i].status_ring);
340 qdma_vqs[i].status_ring = NULL;
341 rte_spinlock_unlock(&qdma_dev.lock);
345 qdma_vqs[i].in_use = 1;
346 qdma_vqs[i].lcore_id = lcore_id;
348 rte_spinlock_unlock(&qdma_dev.lock);
354 dpaa2_qdma_populate_fle(struct qbman_fle *fle,
355 uint64_t src, uint64_t dest,
356 size_t len, uint32_t flags)
358 struct qdma_sdd *sdd;
360 DPAA2_QDMA_FUNC_TRACE();
362 sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
363 (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
365 /* first frame list to source descriptor */
366 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
367 DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
369 /* source and destination descriptor */
370 DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
372 DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
375 /* source frame list to source buffer */
376 if (flags & RTE_QDMA_JOB_SRC_PHY) {
377 DPAA2_SET_FLE_ADDR(fle, src);
378 DPAA2_SET_FLE_BMT(fle);
380 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
382 DPAA2_SET_FLE_LEN(fle, len);
385 /* destination frame list to destination buffer */
386 if (flags & RTE_QDMA_JOB_DEST_PHY) {
387 DPAA2_SET_FLE_BMT(fle);
388 DPAA2_SET_FLE_ADDR(fle, dest);
390 DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
392 DPAA2_SET_FLE_LEN(fle, len);
394 /* Final bit: 1, for last frame list */
395 DPAA2_SET_FLE_FIN(fle);
399 dpdmai_dev_enqueue(struct dpaa2_dpdmai_dev *dpdmai_dev,
402 struct rte_qdma_job *job)
404 struct qdma_io_meta *io_meta;
406 struct dpaa2_queue *txq;
407 struct qbman_fle *fle;
408 struct qbman_eq_desc eqdesc;
409 struct qbman_swp *swp;
412 DPAA2_QDMA_FUNC_TRACE();
414 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
415 ret = dpaa2_affine_qbman_swp();
417 DPAA2_QDMA_ERR("Failure in affining portal");
421 swp = DPAA2_PER_LCORE_PORTAL;
423 txq = &(dpdmai_dev->tx_queue[txq_id]);
425 /* Prepare enqueue descriptor */
426 qbman_eq_desc_clear(&eqdesc);
427 qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
428 qbman_eq_desc_set_no_orp(&eqdesc, 0);
429 qbman_eq_desc_set_response(&eqdesc, 0, 0);
432 * Get an FLE/SDD from FLE pool.
433 * Note: IO metadata is before the FLE and SDD memory.
435 ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
437 DPAA2_QDMA_DP_WARN("Memory alloc failed for FLE");
441 /* Set the metadata */
442 io_meta->cnxt = (size_t)job;
445 fle = (struct qbman_fle *)(io_meta + 1);
447 /* populate Frame descriptor */
448 memset(&fd, 0, sizeof(struct qbman_fd));
449 DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(fle));
450 DPAA2_SET_FD_COMPOUND_FMT(&fd);
451 DPAA2_SET_FD_FRC(&fd, QDMA_SER_CTX);
454 memset(fle, 0, QDMA_FLE_POOL_SIZE);
455 dpaa2_qdma_populate_fle(fle, job->src, job->dest, job->len, job->flags);
457 /* Enqueue the packet to the QBMAN */
459 ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
460 if (ret < 0 && ret != -EBUSY)
461 DPAA2_QDMA_ERR("Transmit failure with err: %d", ret);
462 } while (ret == -EBUSY);
464 DPAA2_QDMA_DP_DEBUG("Successfully transmitted a packet");
469 int __rte_experimental
470 rte_qdma_vq_enqueue_multi(uint16_t vq_id,
471 struct rte_qdma_job **job,
476 DPAA2_QDMA_FUNC_TRACE();
478 for (i = 0; i < nb_jobs; i++) {
479 ret = rte_qdma_vq_enqueue(vq_id, job[i]);
487 int __rte_experimental
488 rte_qdma_vq_enqueue(uint16_t vq_id,
489 struct rte_qdma_job *job)
491 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
492 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
493 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
496 DPAA2_QDMA_FUNC_TRACE();
498 /* Return error in case of wrong lcore_id */
499 if (rte_lcore_id() != qdma_vq->lcore_id) {
500 DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
505 ret = dpdmai_dev_enqueue(dpdmai_dev, qdma_pq->queue_id, vq_id, job);
507 DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
511 qdma_vq->num_enqueues++;
516 /* Function to receive a QDMA job for a given device and queue*/
518 dpdmai_dev_dequeue(struct dpaa2_dpdmai_dev *dpdmai_dev,
521 struct rte_qdma_job **job)
523 struct qdma_io_meta *io_meta;
524 struct dpaa2_queue *rxq;
525 struct qbman_result *dq_storage;
526 struct qbman_pull_desc pulldesc;
527 const struct qbman_fd *fd;
528 struct qbman_swp *swp;
529 struct qbman_fle *fle;
534 DPAA2_QDMA_FUNC_TRACE();
536 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
537 ret = dpaa2_affine_qbman_swp();
539 DPAA2_QDMA_ERR("Failure in affining portal");
543 swp = DPAA2_PER_LCORE_PORTAL;
545 rxq = &(dpdmai_dev->rx_queue[rxq_id]);
546 dq_storage = rxq->q_storage->dq_storage[0];
549 /* Prepare dequeue descriptor */
550 qbman_pull_desc_clear(&pulldesc);
551 qbman_pull_desc_set_fq(&pulldesc, fqid);
552 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
553 (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
554 qbman_pull_desc_set_numframes(&pulldesc, 1);
557 if (qbman_swp_pull(swp, &pulldesc)) {
558 DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
564 /* Check if previous issued command is completed. */
565 while (!qbman_check_command_complete(dq_storage))
567 /* Loop until dq_storage is updated with new token by QBMAN */
568 while (!qbman_check_new_result(dq_storage))
571 /* Check for valid frame. */
572 status = qbman_result_DQ_flags(dq_storage);
573 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
574 DPAA2_QDMA_DP_DEBUG("No frame is delivered");
579 fd = qbman_result_DQ_fd(dq_storage);
582 * Fetch metadata from FLE. job and vq_id were set
583 * in metadata in the enqueue operation.
585 fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
586 io_meta = (struct qdma_io_meta *)(fle) - 1;
588 *vq_id = io_meta->id;
590 *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
591 (*job)->status = DPAA2_GET_FD_ERR(fd);
593 /* Free FLE to the pool */
594 rte_mempool_put(qdma_dev.fle_pool, io_meta);
596 DPAA2_QDMA_DP_DEBUG("packet received");
601 int __rte_experimental
602 rte_qdma_vq_dequeue_multi(uint16_t vq_id,
603 struct rte_qdma_job **job,
608 DPAA2_QDMA_FUNC_TRACE();
610 for (i = 0; i < nb_jobs; i++) {
611 job[i] = rte_qdma_vq_dequeue(vq_id);
619 struct rte_qdma_job * __rte_experimental
620 rte_qdma_vq_dequeue(uint16_t vq_id)
622 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
623 struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
624 struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
625 struct rte_qdma_job *job = NULL;
626 struct qdma_virt_queue *temp_qdma_vq;
627 int dequeue_budget = QDMA_DEQUEUE_BUDGET;
628 int ring_count, ret, i;
631 DPAA2_QDMA_FUNC_TRACE();
633 /* Return error in case of wrong lcore_id */
634 if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
635 DPAA2_QDMA_ERR("QDMA dequeue for vqid %d on wrong core",
640 /* Only dequeue when there are pending jobs on VQ */
641 if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
644 if (qdma_vq->exclusive_hw_queue) {
645 /* In case of exclusive queue directly fetch from HW queue */
646 ret = dpdmai_dev_dequeue(dpdmai_dev, qdma_pq->queue_id,
650 "Dequeue from DPDMAI device failed: %d", ret);
655 * Get the QDMA completed jobs from the software ring.
656 * In case they are not available on the ring poke the HW
657 * to fetch completed jobs from corresponding HW queues
659 ring_count = rte_ring_count(qdma_vq->status_ring);
660 if (ring_count == 0) {
661 /* TODO - How to have right budget */
662 for (i = 0; i < dequeue_budget; i++) {
663 ret = dpdmai_dev_dequeue(dpdmai_dev,
664 qdma_pq->queue_id, &temp_vq_id, &job);
667 temp_qdma_vq = &qdma_vqs[temp_vq_id];
668 rte_ring_enqueue(temp_qdma_vq->status_ring,
670 ring_count = rte_ring_count(
671 qdma_vq->status_ring);
677 /* Dequeue job from the software ring to provide to the user */
678 rte_ring_dequeue(qdma_vq->status_ring, (void **)&job);
680 qdma_vq->num_dequeues++;
687 rte_qdma_vq_stats(uint16_t vq_id,
688 struct rte_qdma_vq_stats *vq_status)
690 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
692 DPAA2_QDMA_FUNC_TRACE();
694 if (qdma_vq->in_use) {
695 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
696 vq_status->lcore_id = qdma_vq->lcore_id;
697 vq_status->num_enqueues = qdma_vq->num_enqueues;
698 vq_status->num_dequeues = qdma_vq->num_dequeues;
699 vq_status->num_pending_jobs = vq_status->num_enqueues -
700 vq_status->num_dequeues;
705 rte_qdma_vq_destroy(uint16_t vq_id)
707 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
709 DPAA2_QDMA_FUNC_TRACE();
711 /* In case there are pending jobs on any VQ, return -EBUSY */
712 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
715 rte_spinlock_lock(&qdma_dev.lock);
717 if (qdma_vq->exclusive_hw_queue)
718 free_hw_queue(qdma_vq->hw_queue);
720 if (qdma_vqs->status_ring)
721 rte_ring_free(qdma_vqs->status_ring);
723 put_hw_queue(qdma_vq->hw_queue);
726 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
728 rte_spinlock_lock(&qdma_dev.lock);
736 DPAA2_QDMA_FUNC_TRACE();
742 rte_qdma_destroy(void)
744 DPAA2_QDMA_FUNC_TRACE();
749 static const struct rte_rawdev_ops dpaa2_qdma_ops;
752 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
754 struct qdma_hw_queue *queue;
757 DPAA2_QDMA_FUNC_TRACE();
759 for (i = 0; i < dpdmai_dev->num_queues; i++) {
760 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
763 "Memory allocation failed for QDMA queue");
767 queue->dpdmai_dev = dpdmai_dev;
770 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
771 qdma_dev.num_hw_queues++;
778 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
780 struct qdma_hw_queue *queue = NULL;
781 struct qdma_hw_queue *tqueue = NULL;
783 DPAA2_QDMA_FUNC_TRACE();
785 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
786 if (queue->dpdmai_dev == dpdmai_dev) {
787 TAILQ_REMOVE(&qdma_queue_list, queue, next);
795 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
797 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
800 DPAA2_QDMA_FUNC_TRACE();
802 /* Remove HW queues from global list */
803 remove_hw_queues_from_list(dpdmai_dev);
805 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
808 DPAA2_QDMA_ERR("dmdmai disable failed");
810 /* Set up the DQRR storage for Rx */
811 for (i = 0; i < dpdmai_dev->num_queues; i++) {
812 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
814 if (rxq->q_storage) {
815 dpaa2_free_dq_storage(rxq->q_storage);
816 rte_free(rxq->q_storage);
820 /* Close the device at underlying layer*/
821 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
823 DPAA2_QDMA_ERR("Failure closing dpdmai device");
829 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
831 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
832 struct dpdmai_rx_queue_cfg rx_queue_cfg;
833 struct dpdmai_attr attr;
834 struct dpdmai_rx_queue_attr rx_attr;
835 struct dpdmai_tx_queue_attr tx_attr;
838 DPAA2_QDMA_FUNC_TRACE();
840 /* Open DPDMAI device */
841 dpdmai_dev->dpdmai_id = dpdmai_id;
842 dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
843 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
844 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
846 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
850 /* Get DPDMAI attributes */
851 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
852 dpdmai_dev->token, &attr);
854 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
858 dpdmai_dev->num_queues = attr.num_of_queues;
860 /* Set up Rx Queues */
861 for (i = 0; i < dpdmai_dev->num_queues; i++) {
862 struct dpaa2_queue *rxq;
864 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
865 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
868 i, 0, &rx_queue_cfg);
870 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
875 /* Allocate DQ storage for the DPDMAI Rx queues */
876 rxq = &(dpdmai_dev->rx_queue[i]);
877 rxq->q_storage = rte_malloc("dq_storage",
878 sizeof(struct queue_storage_info_t),
879 RTE_CACHE_LINE_SIZE);
880 if (!rxq->q_storage) {
881 DPAA2_QDMA_ERR("q_storage allocation failed");
886 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
887 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
889 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
894 /* Get Rx and Tx queues FQID's */
895 for (i = 0; i < dpdmai_dev->num_queues; i++) {
896 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
897 dpdmai_dev->token, i, 0, &rx_attr);
899 DPAA2_QDMA_ERR("Reading device failed with err: %d",
903 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
905 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
906 dpdmai_dev->token, i, 0, &tx_attr);
908 DPAA2_QDMA_ERR("Reading device failed with err: %d",
912 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
915 /* Enable the device */
916 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
919 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
923 /* Add the HW queue to the global list */
924 ret = add_hw_queues_to_list(dpdmai_dev);
926 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
929 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
933 dpaa2_dpdmai_dev_uninit(rawdev);
938 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
939 struct rte_dpaa2_device *dpaa2_dev)
941 struct rte_rawdev *rawdev;
944 DPAA2_QDMA_FUNC_TRACE();
946 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
947 sizeof(struct dpaa2_dpdmai_dev),
950 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
954 dpaa2_dev->rawdev = rawdev;
955 rawdev->dev_ops = &dpaa2_qdma_ops;
956 rawdev->device = &dpaa2_dev->device;
957 rawdev->driver_name = dpaa2_drv->driver.name;
959 /* Invoke PMD device initialization function */
960 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
962 rte_rawdev_pmd_release(rawdev);
970 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
972 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
975 DPAA2_QDMA_FUNC_TRACE();
977 dpaa2_dpdmai_dev_uninit(rawdev);
979 ret = rte_rawdev_pmd_release(rawdev);
981 DPAA2_QDMA_ERR("Device cleanup failed");
986 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
987 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
988 .drv_type = DPAA2_QDMA,
989 .probe = rte_dpaa2_qdma_probe,
990 .remove = rte_dpaa2_qdma_remove,
993 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
995 RTE_INIT(dpaa2_qdma_init_log)
997 dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
998 if (dpaa2_qdma_logtype >= 0)
999 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);