1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2022 NXP
7 #include <rte_dmadev.h>
8 #include <rte_dmadev_pmd.h>
9 #include <rte_kvargs.h>
11 #include <mc/fsl_dpdmai.h>
13 #include "rte_pmd_dpaa2_qdma.h"
14 #include "dpaa2_qdma.h"
15 #include "dpaa2_qdma_logs.h"
16 /* Dynamic log type identifier */
17 int dpaa2_qdma_logtype;
19 uint32_t dpaa2_coherent_no_alloc_cache;
20 uint32_t dpaa2_coherent_alloc_cache;
23 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
24 struct rte_dma_info *dev_info,
28 RTE_SET_USED(info_sz);
30 dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
31 RTE_DMA_CAPA_MEM_TO_DEV |
32 RTE_DMA_CAPA_DEV_TO_DEV |
33 RTE_DMA_CAPA_DEV_TO_MEM |
35 RTE_DMA_CAPA_OPS_COPY;
36 dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
37 dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
38 dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
44 dpaa2_qdma_configure(struct rte_dma_dev *dev,
45 const struct rte_dma_conf *dev_conf,
48 char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
49 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
50 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
52 DPAA2_QDMA_FUNC_TRACE();
54 RTE_SET_USED(conf_sz);
56 /* In case QDMA device is not in stopped state, return -EBUSY */
57 if (qdma_dev->state == 1) {
59 "Device is in running state. Stop before config.");
63 /* Allocate Virtual Queues */
64 sprintf(name, "qdma_%d_vq", dev->data->dev_id);
65 qdma_dev->vqs = rte_malloc(name,
66 (sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
69 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
72 qdma_dev->num_vqs = dev_conf->nb_vchans;
77 /* Enable FD in Ultra Short format */
79 rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
81 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
82 struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
83 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
85 qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
88 /* Enable internal SG processing */
90 rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
92 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
93 struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
94 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
96 qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
101 rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
102 struct rte_dpaa2_qdma_rbp *rbp_config)
104 struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
105 struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
106 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
108 memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
109 sizeof(struct rte_dpaa2_qdma_rbp));
113 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
114 const struct rte_dma_vchan_conf *conf,
117 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
118 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
122 int fd_long_format = 1;
125 DPAA2_QDMA_FUNC_TRACE();
127 RTE_SET_USED(conf_sz);
129 if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
132 if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
135 if (dev->data->dev_conf.enable_silent)
136 qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
139 if (qdma_dev->num_vqs != 1) {
141 "qDMA SG format only supports physical queue!");
144 if (!fd_long_format) {
146 "qDMA SG format only supports long FD format!");
149 pool_size = QDMA_FLE_SG_POOL_SIZE;
151 pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
154 if (qdma_dev->num_vqs == 1)
155 qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
157 /* Allocate a Ring for Virtual Queue in VQ mode */
158 snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
159 dev->data->dev_id, vchan);
160 qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
161 conf->nb_desc, rte_socket_id(), 0);
162 if (!qdma_dev->vqs[vchan].status_ring) {
163 DPAA2_QDMA_ERR("Status ring creation failed for vq");
168 snprintf(pool_name, sizeof(pool_name),
169 "qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
170 qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
171 conf->nb_desc, pool_size,
172 QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
173 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
174 if (!qdma_dev->vqs[vchan].fle_pool) {
175 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
179 snprintf(pool_name, sizeof(pool_name),
180 "qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
181 qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
182 conf->nb_desc, pool_size,
183 QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
184 NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
185 if (!qdma_dev->vqs[vchan].job_pool) {
186 DPAA2_QDMA_ERR("qdma_job_pool create failed");
190 qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
191 qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
197 dpaa2_qdma_start(struct rte_dma_dev *dev)
199 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
200 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
202 DPAA2_QDMA_FUNC_TRACE();
210 dpaa2_qdma_stop(struct rte_dma_dev *dev)
212 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
213 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
215 DPAA2_QDMA_FUNC_TRACE();
223 dpaa2_qdma_reset(struct rte_dma_dev *dev)
225 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
226 struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
229 DPAA2_QDMA_FUNC_TRACE();
231 /* In case QDMA device is not in stopped state, return -EBUSY */
232 if (qdma_dev->state == 1) {
234 "Device is in running state. Stop before reset.");
238 /* In case there are pending jobs on any VQ, return -EBUSY */
239 for (i = 0; i < qdma_dev->num_vqs; i++) {
240 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
241 qdma_dev->vqs[i].num_dequeues)) {
242 DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
247 /* Reset and free virtual queues */
248 for (i = 0; i < qdma_dev->num_vqs; i++) {
249 if (qdma_dev->vqs[i].status_ring)
250 rte_ring_free(qdma_dev->vqs[i].status_ring);
253 rte_free(qdma_dev->vqs);
254 qdma_dev->vqs = NULL;
256 /* Reset QDMA device structure */
257 qdma_dev->num_vqs = 0;
263 dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
265 DPAA2_QDMA_FUNC_TRACE();
267 dpaa2_qdma_reset(dev);
272 static struct rte_dma_dev_ops dpaa2_qdma_ops = {
273 .dev_info_get = dpaa2_qdma_info_get,
274 .dev_configure = dpaa2_qdma_configure,
275 .dev_start = dpaa2_qdma_start,
276 .dev_stop = dpaa2_qdma_stop,
277 .dev_close = dpaa2_qdma_close,
278 .vchan_setup = dpaa2_qdma_vchan_setup,
282 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
284 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
287 DPAA2_QDMA_FUNC_TRACE();
289 ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
292 DPAA2_QDMA_ERR("dmdmai disable failed");
294 /* Set up the DQRR storage for Rx */
295 struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
297 if (rxq->q_storage) {
298 dpaa2_free_dq_storage(rxq->q_storage);
299 rte_free(rxq->q_storage);
302 /* Close the device at underlying layer*/
303 ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
305 DPAA2_QDMA_ERR("Failure closing dpdmai device");
311 dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
313 struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
314 struct dpdmai_rx_queue_cfg rx_queue_cfg;
315 struct dpdmai_attr attr;
316 struct dpdmai_rx_queue_attr rx_attr;
317 struct dpdmai_tx_queue_attr tx_attr;
318 struct dpaa2_queue *rxq;
321 DPAA2_QDMA_FUNC_TRACE();
323 /* Open DPDMAI device */
324 dpdmai_dev->dpdmai_id = dpdmai_id;
325 dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
326 dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
327 RTE_CACHE_LINE_SIZE);
328 ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
329 dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
331 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
335 /* Get DPDMAI attributes */
336 ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
337 dpdmai_dev->token, &attr);
339 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
343 dpdmai_dev->num_queues = attr.num_of_queues;
345 /* Set up Rx Queue */
346 memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
347 ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
350 0, 0, &rx_queue_cfg);
352 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
357 /* Allocate DQ storage for the DPDMAI Rx queues */
358 rxq = &(dpdmai_dev->rx_queue[0]);
359 rxq->q_storage = rte_malloc("dq_storage",
360 sizeof(struct queue_storage_info_t),
361 RTE_CACHE_LINE_SIZE);
362 if (!rxq->q_storage) {
363 DPAA2_QDMA_ERR("q_storage allocation failed");
368 memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
369 ret = dpaa2_alloc_dq_storage(rxq->q_storage);
371 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
375 /* Get Rx and Tx queues FQID */
376 ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
377 dpdmai_dev->token, 0, 0, &rx_attr);
379 DPAA2_QDMA_ERR("Reading device failed with err: %d",
383 dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
385 ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
386 dpdmai_dev->token, 0, 0, &tx_attr);
388 DPAA2_QDMA_ERR("Reading device failed with err: %d",
392 dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
394 /* Enable the device */
395 ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
398 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
402 if (!dpaa2_coherent_no_alloc_cache) {
403 if (dpaa2_svr_family == SVR_LX2160A) {
404 dpaa2_coherent_no_alloc_cache =
405 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
406 dpaa2_coherent_alloc_cache =
407 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
409 dpaa2_coherent_no_alloc_cache =
410 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
411 dpaa2_coherent_alloc_cache =
412 DPAA2_COHERENT_ALLOCATE_CACHE;
416 DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
418 /* Reset the QDMA device */
419 ret = dpaa2_qdma_reset(dev);
421 DPAA2_QDMA_ERR("Resetting QDMA failed");
427 dpaa2_dpdmai_dev_uninit(dev);
432 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
433 struct rte_dpaa2_device *dpaa2_dev)
435 struct rte_dma_dev *dmadev;
438 DPAA2_QDMA_FUNC_TRACE();
440 RTE_SET_USED(dpaa2_drv);
442 dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
444 sizeof(struct dpaa2_dpdmai_dev));
446 DPAA2_QDMA_ERR("Unable to allocate dmadevice");
450 dpaa2_dev->dmadev = dmadev;
451 dmadev->dev_ops = &dpaa2_qdma_ops;
452 dmadev->device = &dpaa2_dev->device;
453 dmadev->fp_obj->dev_private = dmadev->data->dev_private;
455 /* Invoke PMD device initialization function */
456 ret = dpaa2_dpdmai_dev_init(dmadev, dpaa2_dev->object_id);
458 rte_dma_pmd_release(dpaa2_dev->device.name);
462 dmadev->state = RTE_DMA_DEV_READY;
467 dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
469 struct rte_dma_dev *dmadev = dpaa2_dev->dmadev;
472 DPAA2_QDMA_FUNC_TRACE();
474 dpaa2_dpdmai_dev_uninit(dmadev);
476 ret = rte_dma_pmd_release(dpaa2_dev->device.name);
478 DPAA2_QDMA_ERR("Device cleanup failed");
483 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd;
485 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
486 .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
487 .drv_type = DPAA2_QDMA,
488 .probe = dpaa2_qdma_probe,
489 .remove = dpaa2_qdma_remove,
492 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
493 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
494 "no_prefetch=<int> ");
495 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO);