9223ae1c95e98d3957ddac6abf569a8b9c02b4b5
[dpdk.git] / lib / dmadev / rte_dmadev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_eal.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
14
15 #include "rte_dmadev.h"
16 #include "rte_dmadev_pmd.h"
17
18 static int16_t dma_devices_max;
19
20 struct rte_dma_dev *rte_dma_devices;
21
22 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
23 #define RTE_DMA_LOG(level, ...) \
24         rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
25                 RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
26
27 int
28 rte_dma_dev_max(size_t dev_max)
29 {
30         /* This function may be called before rte_eal_init(), so no rte library
31          * function can be called in this function.
32          */
33         if (dev_max == 0 || dev_max > INT16_MAX)
34                 return -EINVAL;
35
36         if (dma_devices_max > 0)
37                 return -EINVAL;
38
39         dma_devices_max = dev_max;
40
41         return 0;
42 }
43
44 static int
45 dma_check_name(const char *name)
46 {
47         size_t name_len;
48
49         if (name == NULL) {
50                 RTE_DMA_LOG(ERR, "Name can't be NULL");
51                 return -EINVAL;
52         }
53
54         name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
55         if (name_len == 0) {
56                 RTE_DMA_LOG(ERR, "Zero length DMA device name");
57                 return -EINVAL;
58         }
59         if (name_len >= RTE_DEV_NAME_MAX_LEN) {
60                 RTE_DMA_LOG(ERR, "DMA device name is too long");
61                 return -EINVAL;
62         }
63
64         return 0;
65 }
66
67 static int16_t
68 dma_find_free_id(void)
69 {
70         int16_t i;
71
72         if (rte_dma_devices == NULL)
73                 return -1;
74
75         for (i = 0; i < dma_devices_max; i++) {
76                 if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
77                         return i;
78         }
79
80         return -1;
81 }
82
83 static struct rte_dma_dev*
84 dma_find_by_name(const char *name)
85 {
86         int16_t i;
87
88         if (rte_dma_devices == NULL)
89                 return NULL;
90
91         for (i = 0; i < dma_devices_max; i++) {
92                 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
93                     (!strcmp(name, rte_dma_devices[i].dev_name)))
94                         return &rte_dma_devices[i];
95         }
96
97         return NULL;
98 }
99
100 static int
101 dma_dev_data_prepare(void)
102 {
103         size_t size;
104
105         if (rte_dma_devices != NULL)
106                 return 0;
107
108         size = dma_devices_max * sizeof(struct rte_dma_dev);
109         rte_dma_devices = malloc(size);
110         if (rte_dma_devices == NULL)
111                 return -ENOMEM;
112         memset(rte_dma_devices, 0, size);
113
114         return 0;
115 }
116
117 static int
118 dma_data_prepare(void)
119 {
120         if (dma_devices_max == 0)
121                 dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
122         return dma_dev_data_prepare();
123 }
124
125 static struct rte_dma_dev *
126 dma_allocate(const char *name, int numa_node, size_t private_data_size)
127 {
128         struct rte_dma_dev *dev;
129         void *dev_private;
130         int16_t dev_id;
131         int ret;
132
133         ret = dma_data_prepare();
134         if (ret < 0) {
135                 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
136                 return NULL;
137         }
138
139         dev = dma_find_by_name(name);
140         if (dev != NULL) {
141                 RTE_DMA_LOG(ERR, "DMA device already allocated");
142                 return NULL;
143         }
144
145         dev_private = rte_zmalloc_socket(name, private_data_size,
146                                          RTE_CACHE_LINE_SIZE, numa_node);
147         if (dev_private == NULL) {
148                 RTE_DMA_LOG(ERR, "Cannot allocate private data");
149                 return NULL;
150         }
151
152         dev_id = dma_find_free_id();
153         if (dev_id < 0) {
154                 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
155                 rte_free(dev_private);
156                 return NULL;
157         }
158
159         dev = &rte_dma_devices[dev_id];
160         rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
161         dev->dev_id = dev_id;
162         dev->numa_node = numa_node;
163         dev->dev_private = dev_private;
164
165         return dev;
166 }
167
168 static void
169 dma_release(struct rte_dma_dev *dev)
170 {
171         rte_free(dev->dev_private);
172         memset(dev, 0, sizeof(struct rte_dma_dev));
173 }
174
175 struct rte_dma_dev *
176 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
177 {
178         struct rte_dma_dev *dev;
179
180         if (dma_check_name(name) != 0 || private_data_size == 0)
181                 return NULL;
182
183         dev = dma_allocate(name, numa_node, private_data_size);
184         if (dev == NULL)
185                 return NULL;
186
187         dev->state = RTE_DMA_DEV_REGISTERED;
188
189         return dev;
190 }
191
192 int
193 rte_dma_pmd_release(const char *name)
194 {
195         struct rte_dma_dev *dev;
196
197         if (dma_check_name(name) != 0)
198                 return -EINVAL;
199
200         dev = dma_find_by_name(name);
201         if (dev == NULL)
202                 return -EINVAL;
203
204         if (dev->state == RTE_DMA_DEV_READY)
205                 return rte_dma_close(dev->dev_id);
206
207         dma_release(dev);
208         return 0;
209 }
210
211 int
212 rte_dma_get_dev_id_by_name(const char *name)
213 {
214         struct rte_dma_dev *dev;
215
216         if (dma_check_name(name) != 0)
217                 return -EINVAL;
218
219         dev = dma_find_by_name(name);
220         if (dev == NULL)
221                 return -EINVAL;
222
223         return dev->dev_id;
224 }
225
226 bool
227 rte_dma_is_valid(int16_t dev_id)
228 {
229         return (dev_id >= 0) && (dev_id < dma_devices_max) &&
230                 rte_dma_devices != NULL &&
231                 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
232 }
233
234 uint16_t
235 rte_dma_count_avail(void)
236 {
237         uint16_t count = 0;
238         uint16_t i;
239
240         if (rte_dma_devices == NULL)
241                 return count;
242
243         for (i = 0; i < dma_devices_max; i++) {
244                 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
245                         count++;
246         }
247
248         return count;
249 }
250
251 int
252 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
253 {
254         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
255         int ret;
256
257         if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
258                 return -EINVAL;
259
260         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
261         memset(dev_info, 0, sizeof(struct rte_dma_info));
262         ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
263                                             sizeof(struct rte_dma_info));
264         if (ret != 0)
265                 return ret;
266
267         dev_info->dev_name = dev->dev_name;
268         dev_info->numa_node = dev->device->numa_node;
269         dev_info->nb_vchans = dev->dev_conf.nb_vchans;
270
271         return 0;
272 }
273
274 int
275 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
276 {
277         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
278         struct rte_dma_info dev_info;
279         int ret;
280
281         if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
282                 return -EINVAL;
283
284         if (dev->dev_started != 0) {
285                 RTE_DMA_LOG(ERR,
286                         "Device %d must be stopped to allow configuration",
287                         dev_id);
288                 return -EBUSY;
289         }
290
291         ret = rte_dma_info_get(dev_id, &dev_info);
292         if (ret != 0) {
293                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
294                 return -EINVAL;
295         }
296         if (dev_conf->nb_vchans == 0) {
297                 RTE_DMA_LOG(ERR,
298                         "Device %d configure zero vchans", dev_id);
299                 return -EINVAL;
300         }
301         if (dev_conf->nb_vchans > dev_info.max_vchans) {
302                 RTE_DMA_LOG(ERR,
303                         "Device %d configure too many vchans", dev_id);
304                 return -EINVAL;
305         }
306         if (dev_conf->enable_silent &&
307             !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
308                 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
309                 return -EINVAL;
310         }
311
312         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
313         ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
314                                              sizeof(struct rte_dma_conf));
315         if (ret == 0)
316                 memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
317
318         return ret;
319 }
320
321 int
322 rte_dma_start(int16_t dev_id)
323 {
324         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
325         int ret;
326
327         if (!rte_dma_is_valid(dev_id))
328                 return -EINVAL;
329
330         if (dev->dev_conf.nb_vchans == 0) {
331                 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
332                 return -EINVAL;
333         }
334
335         if (dev->dev_started != 0) {
336                 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
337                 return 0;
338         }
339
340         if (dev->dev_ops->dev_start == NULL)
341                 goto mark_started;
342
343         ret = (*dev->dev_ops->dev_start)(dev);
344         if (ret != 0)
345                 return ret;
346
347 mark_started:
348         dev->dev_started = 1;
349         return 0;
350 }
351
352 int
353 rte_dma_stop(int16_t dev_id)
354 {
355         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
356         int ret;
357
358         if (!rte_dma_is_valid(dev_id))
359                 return -EINVAL;
360
361         if (dev->dev_started == 0) {
362                 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
363                 return 0;
364         }
365
366         if (dev->dev_ops->dev_stop == NULL)
367                 goto mark_stopped;
368
369         ret = (*dev->dev_ops->dev_stop)(dev);
370         if (ret != 0)
371                 return ret;
372
373 mark_stopped:
374         dev->dev_started = 0;
375         return 0;
376 }
377
378 int
379 rte_dma_close(int16_t dev_id)
380 {
381         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
382         int ret;
383
384         if (!rte_dma_is_valid(dev_id))
385                 return -EINVAL;
386
387         /* Device must be stopped before it can be closed */
388         if (dev->dev_started == 1) {
389                 RTE_DMA_LOG(ERR,
390                         "Device %d must be stopped before closing", dev_id);
391                 return -EBUSY;
392         }
393
394         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
395         ret = (*dev->dev_ops->dev_close)(dev);
396         if (ret == 0)
397                 dma_release(dev);
398
399         return ret;
400 }
401
402 int
403 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
404                     const struct rte_dma_vchan_conf *conf)
405 {
406         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
407         struct rte_dma_info dev_info;
408         bool src_is_dev, dst_is_dev;
409         int ret;
410
411         if (!rte_dma_is_valid(dev_id) || conf == NULL)
412                 return -EINVAL;
413
414         if (dev->dev_started != 0) {
415                 RTE_DMA_LOG(ERR,
416                         "Device %d must be stopped to allow configuration",
417                         dev_id);
418                 return -EBUSY;
419         }
420
421         ret = rte_dma_info_get(dev_id, &dev_info);
422         if (ret != 0) {
423                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
424                 return -EINVAL;
425         }
426         if (dev->dev_conf.nb_vchans == 0) {
427                 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
428                 return -EINVAL;
429         }
430         if (vchan >= dev_info.nb_vchans) {
431                 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
432                 return -EINVAL;
433         }
434         if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
435             conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
436             conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
437             conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
438                 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
439                 return -EINVAL;
440         }
441         if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
442             !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
443                 RTE_DMA_LOG(ERR,
444                         "Device %d don't support mem2mem transfer", dev_id);
445                 return -EINVAL;
446         }
447         if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
448             !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
449                 RTE_DMA_LOG(ERR,
450                         "Device %d don't support mem2dev transfer", dev_id);
451                 return -EINVAL;
452         }
453         if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
454             !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
455                 RTE_DMA_LOG(ERR,
456                         "Device %d don't support dev2mem transfer", dev_id);
457                 return -EINVAL;
458         }
459         if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
460             !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
461                 RTE_DMA_LOG(ERR,
462                         "Device %d don't support dev2dev transfer", dev_id);
463                 return -EINVAL;
464         }
465         if (conf->nb_desc < dev_info.min_desc ||
466             conf->nb_desc > dev_info.max_desc) {
467                 RTE_DMA_LOG(ERR,
468                         "Device %d number of descriptors invalid", dev_id);
469                 return -EINVAL;
470         }
471         src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
472                      conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
473         if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
474             (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
475                 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
476                 return -EINVAL;
477         }
478         dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
479                      conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
480         if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
481             (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
482                 RTE_DMA_LOG(ERR,
483                         "Device %d destination port type invalid", dev_id);
484                 return -EINVAL;
485         }
486
487         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
488         return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
489                                         sizeof(struct rte_dma_vchan_conf));
490 }
491
492 int
493 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
494 {
495         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
496
497         if (!rte_dma_is_valid(dev_id) || stats == NULL)
498                 return -EINVAL;
499
500         if (vchan >= dev->dev_conf.nb_vchans &&
501             vchan != RTE_DMA_ALL_VCHAN) {
502                 RTE_DMA_LOG(ERR,
503                         "Device %d vchan %u out of range", dev_id, vchan);
504                 return -EINVAL;
505         }
506
507         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
508         memset(stats, 0, sizeof(struct rte_dma_stats));
509         return (*dev->dev_ops->stats_get)(dev, vchan, stats,
510                                           sizeof(struct rte_dma_stats));
511 }
512
513 int
514 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
515 {
516         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
517
518         if (!rte_dma_is_valid(dev_id))
519                 return -EINVAL;
520
521         if (vchan >= dev->dev_conf.nb_vchans &&
522             vchan != RTE_DMA_ALL_VCHAN) {
523                 RTE_DMA_LOG(ERR,
524                         "Device %d vchan %u out of range", dev_id, vchan);
525                 return -EINVAL;
526         }
527
528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
529         return (*dev->dev_ops->stats_reset)(dev, vchan);
530 }
531
532 static const char *
533 dma_capability_name(uint64_t capability)
534 {
535         static const struct {
536                 uint64_t capability;
537                 const char *name;
538         } capa_names[] = {
539                 { RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
540                 { RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
541                 { RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
542                 { RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
543                 { RTE_DMA_CAPA_SVA,         "sva"     },
544                 { RTE_DMA_CAPA_SILENT,      "silent"  },
545                 { RTE_DMA_CAPA_OPS_COPY,    "copy"    },
546                 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
547                 { RTE_DMA_CAPA_OPS_FILL,    "fill"    },
548         };
549
550         const char *name = "unknown";
551         uint32_t i;
552
553         for (i = 0; i < RTE_DIM(capa_names); i++) {
554                 if (capability == capa_names[i].capability) {
555                         name = capa_names[i].name;
556                         break;
557                 }
558         }
559
560         return name;
561 }
562
563 static void
564 dma_dump_capability(FILE *f, uint64_t dev_capa)
565 {
566         uint64_t capa;
567
568         (void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
569         while (dev_capa > 0) {
570                 capa = 1ull << __builtin_ctzll(dev_capa);
571                 (void)fprintf(f, " %s", dma_capability_name(capa));
572                 dev_capa &= ~capa;
573         }
574         (void)fprintf(f, "\n");
575 }
576
577 int
578 rte_dma_dump(int16_t dev_id, FILE *f)
579 {
580         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
581         struct rte_dma_info dev_info;
582         int ret;
583
584         if (!rte_dma_is_valid(dev_id) || f == NULL)
585                 return -EINVAL;
586
587         ret = rte_dma_info_get(dev_id, &dev_info);
588         if (ret != 0) {
589                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
590                 return -EINVAL;
591         }
592
593         (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
594                 dev->dev_id,
595                 dev->dev_name,
596                 dev->dev_started ? "started" : "stopped");
597         dma_dump_capability(f, dev_info.dev_capa);
598         (void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
599         (void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
600         (void)fprintf(f, "  silent_mode: %s\n",
601                 dev->dev_conf.enable_silent ? "on" : "off");
602
603         if (dev->dev_ops->dev_dump != NULL)
604                 return (*dev->dev_ops->dev_dump)(dev, f);
605
606         return 0;
607 }