1 /* SPDX-License-Identifier: BSD-3-Clause
9 #include <rte_atomic.h>
10 #include <rte_lcore.h>
11 #include <rte_rawdev.h>
12 #include <rte_rawdev_pmd.h>
13 #include <rte_malloc.h>
15 #include <rte_mempool.h>
17 #include <mc/fsl_dpdmai.h>
18 #include <portal/dpaa2_hw_pvt.h>
19 #include <portal/dpaa2_hw_dpio.h>
21 #include "dpaa2_qdma.h"
22 #include "dpaa2_qdma_logs.h"
23 #include "rte_pmd_dpaa2_qdma.h"
25 /* Dynamic log type identifier */
26 int dpaa2_qdma_logtype;
29 static struct qdma_device qdma_dev;
31 /* QDMA H/W queues list */
32 TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
33 static struct qdma_hw_queue_list qdma_queue_list
34 = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
36 /* QDMA Virtual Queues */
37 struct qdma_virt_queue *qdma_vqs;
39 /* QDMA per core data */
40 struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
42 static struct qdma_hw_queue *
43 alloc_hw_queue(uint32_t lcore_id)
45 struct qdma_hw_queue *queue = NULL;
47 DPAA2_QDMA_FUNC_TRACE();
49 /* Get a free queue from the list */
50 TAILQ_FOREACH(queue, &qdma_queue_list, next) {
51 if (queue->num_users == 0) {
52 queue->lcore_id = lcore_id;
62 free_hw_queue(struct qdma_hw_queue *queue)
64 DPAA2_QDMA_FUNC_TRACE();
70 static struct qdma_hw_queue *
71 get_hw_queue(uint32_t lcore_id)
73 struct qdma_per_core_info *core_info;
74 struct qdma_hw_queue *queue, *temp;
75 uint32_t least_num_users;
78 DPAA2_QDMA_FUNC_TRACE();
80 core_info = &qdma_core_info[lcore_id];
81 num_hw_queues = core_info->num_hw_queues;
84 * Allocate a HW queue if there are less queues
85 * than maximum per core queues configured
87 if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
88 queue = alloc_hw_queue(lcore_id);
90 core_info->hw_queues[num_hw_queues] = queue;
91 core_info->num_hw_queues++;
96 queue = core_info->hw_queues[0];
97 /* In case there is no queue associated with the core return NULL */
101 /* Fetch the least loaded H/W queue */
102 least_num_users = core_info->hw_queues[0]->num_users;
103 for (i = 0; i < num_hw_queues; i++) {
104 temp = core_info->hw_queues[i];
105 if (temp->num_users < least_num_users)
116 put_hw_queue(struct qdma_hw_queue *queue)
118 struct qdma_per_core_info *core_info;
119 int lcore_id, num_hw_queues, i;
121 DPAA2_QDMA_FUNC_TRACE();
124 * If this is the last user of the queue free it.
125 * Also remove it from QDMA core info.
127 if (queue->num_users == 1) {
128 free_hw_queue(queue);
130 /* Remove the physical queue from core info */
131 lcore_id = queue->lcore_id;
132 core_info = &qdma_core_info[lcore_id];
133 num_hw_queues = core_info->num_hw_queues;
134 for (i = 0; i < num_hw_queues; i++) {
135 if (queue == core_info->hw_queues[i])
138 for (; i < num_hw_queues - 1; i++)
139 core_info->hw_queues[i] = core_info->hw_queues[i + 1];
140 core_info->hw_queues[i] = NULL;
146 int __rte_experimental
149 DPAA2_QDMA_FUNC_TRACE();
151 rte_spinlock_init(&qdma_dev.lock);
156 void __rte_experimental
157 rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
159 DPAA2_QDMA_FUNC_TRACE();
161 qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
164 int __rte_experimental
167 struct qdma_hw_queue *queue;
170 DPAA2_QDMA_FUNC_TRACE();
172 /* In case QDMA device is not in stopped state, return -EBUSY */
173 if (qdma_dev.state == 1) {
175 "Device is in running state. Stop before reset.");
179 /* In case there are pending jobs on any VQ, return -EBUSY */
180 for (i = 0; i < qdma_dev.max_vqs; i++) {
181 if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
182 qdma_vqs[i].num_dequeues))
183 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
187 /* Reset HW queues */
188 TAILQ_FOREACH(queue, &qdma_queue_list, next)
189 queue->num_users = 0;
191 /* Reset and free virtual queues */
192 for (i = 0; i < qdma_dev.max_vqs; i++) {
193 if (qdma_vqs[i].status_ring)
194 rte_ring_free(qdma_vqs[i].status_ring);
200 /* Reset per core info */
201 memset(&qdma_core_info, 0,
202 sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
204 /* Free the FLE pool */
205 if (qdma_dev.fle_pool)
206 rte_mempool_free(qdma_dev.fle_pool);
208 /* Reset QDMA device structure */
209 qdma_dev.mode = RTE_QDMA_MODE_HW;
210 qdma_dev.max_hw_queues_per_core = 0;
211 qdma_dev.fle_pool = NULL;
212 qdma_dev.fle_pool_count = 0;
213 qdma_dev.max_vqs = 0;
218 int __rte_experimental
219 rte_qdma_configure(struct rte_qdma_config *qdma_config)
223 DPAA2_QDMA_FUNC_TRACE();
225 /* In case QDMA device is not in stopped state, return -EBUSY */
226 if (qdma_dev.state == 1) {
228 "Device is in running state. Stop before config.");
232 /* Reset the QDMA device */
233 ret = rte_qdma_reset();
235 DPAA2_QDMA_ERR("Resetting QDMA failed");
240 qdma_dev.mode = qdma_config->mode;
242 /* Set max HW queue per core */
243 if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
244 DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
245 MAX_HW_QUEUE_PER_CORE);
248 qdma_dev.max_hw_queues_per_core =
249 qdma_config->max_hw_queues_per_core;
251 /* Allocate Virtual Queues */
252 qdma_vqs = rte_malloc("qdma_virtual_queues",
253 (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
254 RTE_CACHE_LINE_SIZE);
256 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
259 qdma_dev.max_vqs = qdma_config->max_vqs;
261 /* Allocate FLE pool */
262 qdma_dev.fle_pool = rte_mempool_create("qdma_fle_pool",
263 qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
264 QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
265 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
266 if (!qdma_dev.fle_pool) {
267 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
272 qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
277 int __rte_experimental
280 DPAA2_QDMA_FUNC_TRACE();
287 int __rte_experimental
288 rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
293 DPAA2_QDMA_FUNC_TRACE();
295 rte_spinlock_lock(&qdma_dev.lock);
297 /* Get a free Virtual Queue */
298 for (i = 0; i < qdma_dev.max_vqs; i++) {
299 if (qdma_vqs[i].in_use == 0)
303 /* Return in case no VQ is free */
304 if (i == qdma_dev.max_vqs) {
305 rte_spinlock_unlock(&qdma_dev.lock);
309 if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
310 (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
311 /* Allocate HW queue for a VQ */
312 qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
313 qdma_vqs[i].exclusive_hw_queue = 1;
315 /* Allocate a Ring for Virutal Queue in VQ mode */
316 sprintf(ring_name, "status ring %d", i);
317 qdma_vqs[i].status_ring = rte_ring_create(ring_name,
318 qdma_dev.fle_pool_count, rte_socket_id(), 0);
319 if (!qdma_vqs[i].status_ring) {
320 DPAA2_QDMA_ERR("Status ring creation failed for vq");
321 rte_spinlock_unlock(&qdma_dev.lock);
325 /* Get a HW queue (shared) for a VQ */
326 qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
327 qdma_vqs[i].exclusive_hw_queue = 0;
330 if (qdma_vqs[i].hw_queue == NULL) {
331 DPAA2_QDMA_ERR("No H/W queue available for VQ");
332 if (qdma_vqs[i].status_ring)
333 rte_ring_free(qdma_vqs[i].status_ring);
334 qdma_vqs[i].status_ring = NULL;
335 rte_spinlock_unlock(&qdma_dev.lock);
339 qdma_vqs[i].in_use = 1;
340 qdma_vqs[i].lcore_id = lcore_id;
342 rte_spinlock_unlock(&qdma_dev.lock);
347 void __rte_experimental
348 rte_qdma_vq_stats(uint16_t vq_id,
349 struct rte_qdma_vq_stats *vq_status)
351 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
353 DPAA2_QDMA_FUNC_TRACE();
355 if (qdma_vq->in_use) {
356 vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
357 vq_status->lcore_id = qdma_vq->lcore_id;
358 vq_status->num_enqueues = qdma_vq->num_enqueues;
359 vq_status->num_dequeues = qdma_vq->num_dequeues;
360 vq_status->num_pending_jobs = vq_status->num_enqueues -
361 vq_status->num_dequeues;
365 int __rte_experimental
366 rte_qdma_vq_destroy(uint16_t vq_id)
368 struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
370 DPAA2_QDMA_FUNC_TRACE();
372 /* In case there are pending jobs on any VQ, return -EBUSY */
373 if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
376 rte_spinlock_lock(&qdma_dev.lock);
378 if (qdma_vq->exclusive_hw_queue)
379 free_hw_queue(qdma_vq->hw_queue);
381 if (qdma_vqs->status_ring)
382 rte_ring_free(qdma_vqs->status_ring);
384 put_hw_queue(qdma_vq->hw_queue);
387 memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
389 rte_spinlock_lock(&qdma_dev.lock);
394 void __rte_experimental
397 DPAA2_QDMA_FUNC_TRACE();
402 void __rte_experimental
403 rte_qdma_destroy(void)
405 DPAA2_QDMA_FUNC_TRACE();
410 static const struct rte_rawdev_ops dpaa2_qdma_ops;
413 add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
415 struct qdma_hw_queue *queue;
418 DPAA2_QDMA_FUNC_TRACE();
420 for (i = 0; i < dpdmai_dev->num_queues; i++) {
421 queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
424 "Memory allocation failed for QDMA queue");
428 queue->dpdmai_dev = dpdmai_dev;
431 TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
432 qdma_dev.num_hw_queues++;
439 remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
441 struct qdma_hw_queue *queue = NULL;
442 struct qdma_hw_queue *tqueue = NULL;
444 DPAA2_QDMA_FUNC_TRACE();
446 TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
447 if (queue->dpdmai_dev == dpdmai_dev) {
448 TAILQ_REMOVE(&qdma_queue_list, queue, next);
456 dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
458 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
461 DPAA2_QDMA_FUNC_TRACE();
463 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
466 /* Remove HW queues from global list */
467 remove_hw_queues_from_list(dpdmai_dev);
469 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
472 DPAA2_QDMA_ERR("dmdmai disable failed");
474 /* Set up the DQRR storage for Rx */
475 for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
476 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
478 if (rxq->q_storage) {
479 dpaa2_free_dq_storage(rxq->q_storage);
480 rte_free(rxq->q_storage);
484 /* Close the device at underlying layer*/
485 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
487 DPAA2_QDMA_ERR("Failure closing dpdmai device");
493 dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
495 struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
496 struct dpdmai_rx_queue_cfg rx_queue_cfg;
497 struct dpdmai_attr attr;
498 struct dpdmai_rx_queue_attr rx_attr;
499 struct dpdmai_tx_queue_attr tx_attr;
502 DPAA2_QDMA_FUNC_TRACE();
504 /* For secondary processes, the primary has done all the work */
505 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
508 /* Open DPDMAI device */
509 dpdmai_dev->dpdmai_id = dpdmai_id;
510 dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
511 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
512 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
514 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
518 /* Get DPDMAI attributes */
519 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
520 dpdmai_dev->token, &attr);
522 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
526 dpdmai_dev->num_queues = attr.num_of_priorities;
528 /* Set up Rx Queues */
529 for (i = 0; i < attr.num_of_priorities; i++) {
530 struct dpaa2_queue *rxq;
532 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
533 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
538 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
543 /* Allocate DQ storage for the DPDMAI Rx queues */
544 rxq = &(dpdmai_dev->rx_queue[i]);
545 rxq->q_storage = rte_malloc("dq_storage",
546 sizeof(struct queue_storage_info_t),
547 RTE_CACHE_LINE_SIZE);
548 if (!rxq->q_storage) {
549 DPAA2_QDMA_ERR("q_storage allocation failed");
554 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
555 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
557 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
562 /* Get Rx and Tx queues FQID's */
563 for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
564 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
565 dpdmai_dev->token, i, &rx_attr);
567 DPAA2_QDMA_ERR("Reading device failed with err: %d",
571 dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
573 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
574 dpdmai_dev->token, i, &tx_attr);
576 DPAA2_QDMA_ERR("Reading device failed with err: %d",
580 dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
583 /* Enable the device */
584 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
587 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
591 /* Add the HW queue to the global list */
592 ret = add_hw_queues_to_list(dpdmai_dev);
594 DPAA2_QDMA_ERR("Adding H/W queue to list failed");
597 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
601 dpaa2_dpdmai_dev_uninit(rawdev);
606 rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
607 struct rte_dpaa2_device *dpaa2_dev)
609 struct rte_rawdev *rawdev;
612 DPAA2_QDMA_FUNC_TRACE();
614 rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
615 sizeof(struct dpaa2_dpdmai_dev),
618 DPAA2_QDMA_ERR("Unable to allocate rawdevice");
622 dpaa2_dev->rawdev = rawdev;
623 rawdev->dev_ops = &dpaa2_qdma_ops;
624 rawdev->device = &dpaa2_dev->device;
625 rawdev->driver_name = dpaa2_drv->driver.name;
627 /* Invoke PMD device initialization function */
628 ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
630 rte_rawdev_pmd_release(rawdev);
638 rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
640 struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
643 DPAA2_QDMA_FUNC_TRACE();
645 dpaa2_dpdmai_dev_uninit(rawdev);
647 ret = rte_rawdev_pmd_release(rawdev);
649 DPAA2_QDMA_ERR("Device cleanup failed");
654 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
655 .drv_type = DPAA2_QDMA,
656 .probe = rte_dpaa2_qdma_probe,
657 .remove = rte_dpaa2_qdma_remove,
660 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
662 RTE_INIT(dpaa2_qdma_init_log);
664 dpaa2_qdma_init_log(void)
666 dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
667 if (dpaa2_qdma_logtype >= 0)
668 rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);