dma/dpaa2: add driver-specific configuration API
[dpdk.git] / drivers / dma / dpaa2 / dpaa2_qdma.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 NXP
3  */
4
5 #include <rte_eal.h>
6 #include <rte_fslmc.h>
7 #include <rte_dmadev.h>
8 #include <rte_dmadev_pmd.h>
9 #include <rte_kvargs.h>
10
11 #include <mc/fsl_dpdmai.h>
12
13 #include "rte_pmd_dpaa2_qdma.h"
14 #include "dpaa2_qdma.h"
15 #include "dpaa2_qdma_logs.h"
16 /* Dynamic log type identifier */
17 int dpaa2_qdma_logtype;
18
19 uint32_t dpaa2_coherent_no_alloc_cache;
20 uint32_t dpaa2_coherent_alloc_cache;
21
22 static int
23 dpaa2_qdma_info_get(const struct rte_dma_dev *dev,
24                     struct rte_dma_info *dev_info,
25                     uint32_t info_sz)
26 {
27         RTE_SET_USED(dev);
28         RTE_SET_USED(info_sz);
29
30         dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
31                              RTE_DMA_CAPA_MEM_TO_DEV |
32                              RTE_DMA_CAPA_DEV_TO_DEV |
33                              RTE_DMA_CAPA_DEV_TO_MEM |
34                              RTE_DMA_CAPA_SILENT |
35                              RTE_DMA_CAPA_OPS_COPY;
36         dev_info->max_vchans = DPAA2_QDMA_MAX_VHANS;
37         dev_info->max_desc = DPAA2_QDMA_MAX_DESC;
38         dev_info->min_desc = DPAA2_QDMA_MIN_DESC;
39
40         return 0;
41 }
42
43 static int
44 dpaa2_qdma_configure(struct rte_dma_dev *dev,
45                      const struct rte_dma_conf *dev_conf,
46                      uint32_t conf_sz)
47 {
48         char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
49         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
50         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
51
52         DPAA2_QDMA_FUNC_TRACE();
53
54         RTE_SET_USED(conf_sz);
55
56         /* In case QDMA device is not in stopped state, return -EBUSY */
57         if (qdma_dev->state == 1) {
58                 DPAA2_QDMA_ERR(
59                         "Device is in running state. Stop before config.");
60                 return -1;
61         }
62
63         /* Allocate Virtual Queues */
64         sprintf(name, "qdma_%d_vq", dev->data->dev_id);
65         qdma_dev->vqs = rte_malloc(name,
66                         (sizeof(struct qdma_virt_queue) * dev_conf->nb_vchans),
67                         RTE_CACHE_LINE_SIZE);
68         if (!qdma_dev->vqs) {
69                 DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
70                 return -ENOMEM;
71         }
72         qdma_dev->num_vqs = dev_conf->nb_vchans;
73
74         return 0;
75 }
76
77 /* Enable FD in Ultra Short format */
78 void
79 rte_dpaa2_qdma_vchan_fd_us_enable(int16_t dev_id, uint16_t vchan)
80 {
81         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
82         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
83         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
84
85         qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SHORT_FORMAT;
86 }
87
88 /* Enable internal SG processing */
89 void
90 rte_dpaa2_qdma_vchan_internal_sg_enable(int16_t dev_id, uint16_t vchan)
91 {
92         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
93         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
94         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
95
96         qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_FD_SG_FORMAT;
97 }
98
99 /* Enable RBP */
100 void
101 rte_dpaa2_qdma_vchan_rbp_enable(int16_t dev_id, uint16_t vchan,
102                                 struct rte_dpaa2_qdma_rbp *rbp_config)
103 {
104         struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
105         struct dpaa2_dpdmai_dev *dpdmai_dev = obj->dev_private;
106         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
107
108         memcpy(&qdma_dev->vqs[vchan].rbp, rbp_config,
109                         sizeof(struct rte_dpaa2_qdma_rbp));
110 }
111
112 static int
113 dpaa2_qdma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
114                        const struct rte_dma_vchan_conf *conf,
115                        uint32_t conf_sz)
116 {
117         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
118         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
119         uint32_t pool_size;
120         char ring_name[32];
121         char pool_name[64];
122         int fd_long_format = 1;
123         int sg_enable = 0;
124
125         DPAA2_QDMA_FUNC_TRACE();
126
127         RTE_SET_USED(conf_sz);
128
129         if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SG_FORMAT)
130                 sg_enable = 1;
131
132         if (qdma_dev->vqs[vchan].flags & DPAA2_QDMA_VQ_FD_SHORT_FORMAT)
133                 fd_long_format = 0;
134
135         if (dev->data->dev_conf.enable_silent)
136                 qdma_dev->vqs[vchan].flags |= DPAA2_QDMA_VQ_NO_RESPONSE;
137
138         if (sg_enable) {
139                 if (qdma_dev->num_vqs != 1) {
140                         DPAA2_QDMA_ERR(
141                                 "qDMA SG format only supports physical queue!");
142                         return -ENODEV;
143                 }
144                 if (!fd_long_format) {
145                         DPAA2_QDMA_ERR(
146                                 "qDMA SG format only supports long FD format!");
147                         return -ENODEV;
148                 }
149                 pool_size = QDMA_FLE_SG_POOL_SIZE;
150         } else {
151                 pool_size = QDMA_FLE_SINGLE_POOL_SIZE;
152         }
153
154         if (qdma_dev->num_vqs == 1)
155                 qdma_dev->vqs[vchan].exclusive_hw_queue = 1;
156         else {
157                 /* Allocate a Ring for Virtual Queue in VQ mode */
158                 snprintf(ring_name, sizeof(ring_name), "status ring %d %d",
159                          dev->data->dev_id, vchan);
160                 qdma_dev->vqs[vchan].status_ring = rte_ring_create(ring_name,
161                         conf->nb_desc, rte_socket_id(), 0);
162                 if (!qdma_dev->vqs[vchan].status_ring) {
163                         DPAA2_QDMA_ERR("Status ring creation failed for vq");
164                         return rte_errno;
165                 }
166         }
167
168         snprintf(pool_name, sizeof(pool_name),
169                 "qdma_fle_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
170         qdma_dev->vqs[vchan].fle_pool = rte_mempool_create(pool_name,
171                         conf->nb_desc, pool_size,
172                         QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
173                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
174         if (!qdma_dev->vqs[vchan].fle_pool) {
175                 DPAA2_QDMA_ERR("qdma_fle_pool create failed");
176                 return -ENOMEM;
177         }
178
179         snprintf(pool_name, sizeof(pool_name),
180                 "qdma_job_pool_dev%d_qid%d", dpdmai_dev->dpdmai_id, vchan);
181         qdma_dev->vqs[vchan].job_pool = rte_mempool_create(pool_name,
182                         conf->nb_desc, pool_size,
183                         QDMA_FLE_CACHE_SIZE(conf->nb_desc), 0,
184                         NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
185         if (!qdma_dev->vqs[vchan].job_pool) {
186                 DPAA2_QDMA_ERR("qdma_job_pool create failed");
187                 return -ENOMEM;
188         }
189
190         qdma_dev->vqs[vchan].dpdmai_dev = dpdmai_dev;
191         qdma_dev->vqs[vchan].nb_desc = conf->nb_desc;
192
193         return 0;
194 }
195
196 static int
197 dpaa2_qdma_start(struct rte_dma_dev *dev)
198 {
199         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
200         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
201
202         DPAA2_QDMA_FUNC_TRACE();
203
204         qdma_dev->state = 1;
205
206         return 0;
207 }
208
209 static int
210 dpaa2_qdma_stop(struct rte_dma_dev *dev)
211 {
212         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
213         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
214
215         DPAA2_QDMA_FUNC_TRACE();
216
217         qdma_dev->state = 0;
218
219         return 0;
220 }
221
222 static int
223 dpaa2_qdma_reset(struct rte_dma_dev *dev)
224 {
225         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
226         struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
227         int i;
228
229         DPAA2_QDMA_FUNC_TRACE();
230
231         /* In case QDMA device is not in stopped state, return -EBUSY */
232         if (qdma_dev->state == 1) {
233                 DPAA2_QDMA_ERR(
234                         "Device is in running state. Stop before reset.");
235                 return -EBUSY;
236         }
237
238         /* In case there are pending jobs on any VQ, return -EBUSY */
239         for (i = 0; i < qdma_dev->num_vqs; i++) {
240                 if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
241                     qdma_dev->vqs[i].num_dequeues)) {
242                         DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
243                         return -EBUSY;
244                 }
245         }
246
247         /* Reset and free virtual queues */
248         for (i = 0; i < qdma_dev->num_vqs; i++) {
249                 if (qdma_dev->vqs[i].status_ring)
250                         rte_ring_free(qdma_dev->vqs[i].status_ring);
251         }
252         if (qdma_dev->vqs)
253                 rte_free(qdma_dev->vqs);
254         qdma_dev->vqs = NULL;
255
256         /* Reset QDMA device structure */
257         qdma_dev->num_vqs = 0;
258
259         return 0;
260 }
261
262 static int
263 dpaa2_qdma_close(__rte_unused struct rte_dma_dev *dev)
264 {
265         DPAA2_QDMA_FUNC_TRACE();
266
267         dpaa2_qdma_reset(dev);
268
269         return 0;
270 }
271
272 static struct rte_dma_dev_ops dpaa2_qdma_ops = {
273         .dev_info_get     = dpaa2_qdma_info_get,
274         .dev_configure    = dpaa2_qdma_configure,
275         .dev_start        = dpaa2_qdma_start,
276         .dev_stop         = dpaa2_qdma_stop,
277         .dev_close        = dpaa2_qdma_close,
278         .vchan_setup      = dpaa2_qdma_vchan_setup,
279 };
280
281 static int
282 dpaa2_dpdmai_dev_uninit(struct rte_dma_dev *dev)
283 {
284         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
285         int ret;
286
287         DPAA2_QDMA_FUNC_TRACE();
288
289         ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
290                              dpdmai_dev->token);
291         if (ret)
292                 DPAA2_QDMA_ERR("dmdmai disable failed");
293
294         /* Set up the DQRR storage for Rx */
295         struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[0]);
296
297         if (rxq->q_storage) {
298                 dpaa2_free_dq_storage(rxq->q_storage);
299                 rte_free(rxq->q_storage);
300         }
301
302         /* Close the device at underlying layer*/
303         ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
304         if (ret)
305                 DPAA2_QDMA_ERR("Failure closing dpdmai device");
306
307         return 0;
308 }
309
310 static int
311 dpaa2_dpdmai_dev_init(struct rte_dma_dev *dev, int dpdmai_id)
312 {
313         struct dpaa2_dpdmai_dev *dpdmai_dev = dev->data->dev_private;
314         struct dpdmai_rx_queue_cfg rx_queue_cfg;
315         struct dpdmai_attr attr;
316         struct dpdmai_rx_queue_attr rx_attr;
317         struct dpdmai_tx_queue_attr tx_attr;
318         struct dpaa2_queue *rxq;
319         int ret;
320
321         DPAA2_QDMA_FUNC_TRACE();
322
323         /* Open DPDMAI device */
324         dpdmai_dev->dpdmai_id = dpdmai_id;
325         dpdmai_dev->dpdmai.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
326         dpdmai_dev->qdma_dev = rte_malloc(NULL, sizeof(struct qdma_device),
327                                           RTE_CACHE_LINE_SIZE);
328         ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
329                           dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
330         if (ret) {
331                 DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
332                 return ret;
333         }
334
335         /* Get DPDMAI attributes */
336         ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
337                                     dpdmai_dev->token, &attr);
338         if (ret) {
339                 DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
340                                ret);
341                 goto init_err;
342         }
343         dpdmai_dev->num_queues = attr.num_of_queues;
344
345         /* Set up Rx Queue */
346         memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
347         ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
348                                   CMD_PRI_LOW,
349                                   dpdmai_dev->token,
350                                   0, 0, &rx_queue_cfg);
351         if (ret) {
352                 DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
353                                ret);
354                 goto init_err;
355         }
356
357         /* Allocate DQ storage for the DPDMAI Rx queues */
358         rxq = &(dpdmai_dev->rx_queue[0]);
359         rxq->q_storage = rte_malloc("dq_storage",
360                                     sizeof(struct queue_storage_info_t),
361                                     RTE_CACHE_LINE_SIZE);
362         if (!rxq->q_storage) {
363                 DPAA2_QDMA_ERR("q_storage allocation failed");
364                 ret = -ENOMEM;
365                 goto init_err;
366         }
367
368         memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
369         ret = dpaa2_alloc_dq_storage(rxq->q_storage);
370         if (ret) {
371                 DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
372                 goto init_err;
373         }
374
375         /* Get Rx and Tx queues FQID */
376         ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
377                                   dpdmai_dev->token, 0, 0, &rx_attr);
378         if (ret) {
379                 DPAA2_QDMA_ERR("Reading device failed with err: %d",
380                                ret);
381                 goto init_err;
382         }
383         dpdmai_dev->rx_queue[0].fqid = rx_attr.fqid;
384
385         ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
386                                   dpdmai_dev->token, 0, 0, &tx_attr);
387         if (ret) {
388                 DPAA2_QDMA_ERR("Reading device failed with err: %d",
389                                ret);
390                 goto init_err;
391         }
392         dpdmai_dev->tx_queue[0].fqid = tx_attr.fqid;
393
394         /* Enable the device */
395         ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
396                             dpdmai_dev->token);
397         if (ret) {
398                 DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
399                 goto init_err;
400         }
401
402         if (!dpaa2_coherent_no_alloc_cache) {
403                 if (dpaa2_svr_family == SVR_LX2160A) {
404                         dpaa2_coherent_no_alloc_cache =
405                                 DPAA2_LX2_COHERENT_NO_ALLOCATE_CACHE;
406                         dpaa2_coherent_alloc_cache =
407                                 DPAA2_LX2_COHERENT_ALLOCATE_CACHE;
408                 } else {
409                         dpaa2_coherent_no_alloc_cache =
410                                 DPAA2_COHERENT_NO_ALLOCATE_CACHE;
411                         dpaa2_coherent_alloc_cache =
412                                 DPAA2_COHERENT_ALLOCATE_CACHE;
413                 }
414         }
415
416         DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
417
418         /* Reset the QDMA device */
419         ret = dpaa2_qdma_reset(dev);
420         if (ret) {
421                 DPAA2_QDMA_ERR("Resetting QDMA failed");
422                 goto init_err;
423         }
424
425         return 0;
426 init_err:
427         dpaa2_dpdmai_dev_uninit(dev);
428         return ret;
429 }
430
431 static int
432 dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
433                  struct rte_dpaa2_device *dpaa2_dev)
434 {
435         struct rte_dma_dev *dmadev;
436         int ret;
437
438         DPAA2_QDMA_FUNC_TRACE();
439
440         RTE_SET_USED(dpaa2_drv);
441
442         dmadev = rte_dma_pmd_allocate(dpaa2_dev->device.name,
443                                       rte_socket_id(),
444                                       sizeof(struct dpaa2_dpdmai_dev));
445         if (!dmadev) {
446                 DPAA2_QDMA_ERR("Unable to allocate dmadevice");
447                 return -EINVAL;
448         }
449
450         dpaa2_dev->dmadev = dmadev;
451         dmadev->dev_ops = &dpaa2_qdma_ops;
452         dmadev->device = &dpaa2_dev->device;
453         dmadev->fp_obj->dev_private = dmadev->data->dev_private;
454
455         /* Invoke PMD device initialization function */
456         ret = dpaa2_dpdmai_dev_init(dmadev, dpaa2_dev->object_id);
457         if (ret) {
458                 rte_dma_pmd_release(dpaa2_dev->device.name);
459                 return ret;
460         }
461
462         dmadev->state = RTE_DMA_DEV_READY;
463         return 0;
464 }
465
466 static int
467 dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
468 {
469         struct rte_dma_dev *dmadev = dpaa2_dev->dmadev;
470         int ret;
471
472         DPAA2_QDMA_FUNC_TRACE();
473
474         dpaa2_dpdmai_dev_uninit(dmadev);
475
476         ret = rte_dma_pmd_release(dpaa2_dev->device.name);
477         if (ret)
478                 DPAA2_QDMA_ERR("Device cleanup failed");
479
480         return 0;
481 }
482
483 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd;
484
485 static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
486         .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
487         .drv_type = DPAA2_QDMA,
488         .probe = dpaa2_qdma_probe,
489         .remove = dpaa2_qdma_remove,
490 };
491
492 RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
493 RTE_PMD_REGISTER_PARAM_STRING(dpaa2_qdma,
494         "no_prefetch=<int> ");
495 RTE_LOG_REGISTER_DEFAULT(dpaa_qdma2_logtype, INFO);