3f9154e61902a223d33edf17fa3700fdfc7505d1
[dpdk.git] / lib / dmadev / rte_dmadev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_eal.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
14
15 #include "rte_dmadev.h"
16 #include "rte_dmadev_pmd.h"
17
18 static int16_t dma_devices_max;
19
20 struct rte_dma_fp_object *rte_dma_fp_objs;
21 struct rte_dma_dev *rte_dma_devices;
22 static struct {
23         /* Hold the dev_max information of the primary process. This field is
24          * set by the primary process and is read by the secondary process.
25          */
26         int16_t dev_max;
27         struct rte_dma_dev_data data[0];
28 } *dma_devices_shared_data;
29
30 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
31 #define RTE_DMA_LOG(level, ...) \
32         rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
33                 RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
34
35 int
36 rte_dma_dev_max(size_t dev_max)
37 {
38         /* This function may be called before rte_eal_init(), so no rte library
39          * function can be called in this function.
40          */
41         if (dev_max == 0 || dev_max > INT16_MAX)
42                 return -EINVAL;
43
44         if (dma_devices_max > 0)
45                 return -EINVAL;
46
47         dma_devices_max = dev_max;
48
49         return 0;
50 }
51
52 static int
53 dma_check_name(const char *name)
54 {
55         size_t name_len;
56
57         if (name == NULL) {
58                 RTE_DMA_LOG(ERR, "Name can't be NULL");
59                 return -EINVAL;
60         }
61
62         name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
63         if (name_len == 0) {
64                 RTE_DMA_LOG(ERR, "Zero length DMA device name");
65                 return -EINVAL;
66         }
67         if (name_len >= RTE_DEV_NAME_MAX_LEN) {
68                 RTE_DMA_LOG(ERR, "DMA device name is too long");
69                 return -EINVAL;
70         }
71
72         return 0;
73 }
74
75 static int16_t
76 dma_find_free_id(void)
77 {
78         int16_t i;
79
80         if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
81                 return -1;
82
83         for (i = 0; i < dma_devices_max; i++) {
84                 if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
85                         return i;
86         }
87
88         return -1;
89 }
90
91 static struct rte_dma_dev*
92 dma_find_by_name(const char *name)
93 {
94         int16_t i;
95
96         if (rte_dma_devices == NULL)
97                 return NULL;
98
99         for (i = 0; i < dma_devices_max; i++) {
100                 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
101                     (!strcmp(name, rte_dma_devices[i].data->dev_name)))
102                         return &rte_dma_devices[i];
103         }
104
105         return NULL;
106 }
107
108 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
109
110 static int
111 dma_fp_data_prepare(void)
112 {
113         size_t size;
114         void *ptr;
115         int i;
116
117         if (rte_dma_fp_objs != NULL)
118                 return 0;
119
120         /* Fast-path object must align cacheline, but the return value of malloc
121          * may not be aligned to the cache line. Therefore, extra memory is
122          * applied for realignment.
123          * note: We do not call posix_memalign/aligned_alloc because it is
124          * version dependent on libc.
125          */
126         size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
127                 RTE_CACHE_LINE_SIZE;
128         ptr = malloc(size);
129         if (ptr == NULL)
130                 return -ENOMEM;
131         memset(ptr, 0, size);
132
133         rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
134         for (i = 0; i < dma_devices_max; i++)
135                 dma_fp_object_dummy(&rte_dma_fp_objs[i]);
136
137         return 0;
138 }
139
140 static int
141 dma_dev_data_prepare(void)
142 {
143         size_t size;
144
145         if (rte_dma_devices != NULL)
146                 return 0;
147
148         size = dma_devices_max * sizeof(struct rte_dma_dev);
149         rte_dma_devices = malloc(size);
150         if (rte_dma_devices == NULL)
151                 return -ENOMEM;
152         memset(rte_dma_devices, 0, size);
153
154         return 0;
155 }
156
157 static int
158 dma_shared_data_prepare(void)
159 {
160         const char *mz_name = "rte_dma_dev_data";
161         const struct rte_memzone *mz;
162         size_t size;
163
164         if (dma_devices_shared_data != NULL)
165                 return 0;
166
167         size = sizeof(*dma_devices_shared_data) +
168                 sizeof(struct rte_dma_dev_data) * dma_devices_max;
169
170         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
171                 mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
172         else
173                 mz = rte_memzone_lookup(mz_name);
174         if (mz == NULL)
175                 return -ENOMEM;
176
177         dma_devices_shared_data = mz->addr;
178         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
179                 memset(dma_devices_shared_data, 0, size);
180                 dma_devices_shared_data->dev_max = dma_devices_max;
181         } else {
182                 dma_devices_max = dma_devices_shared_data->dev_max;
183         }
184
185         return 0;
186 }
187
188 static int
189 dma_data_prepare(void)
190 {
191         int ret;
192
193         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
194                 if (dma_devices_max == 0)
195                         dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
196                 ret = dma_fp_data_prepare();
197                 if (ret)
198                         return ret;
199                 ret = dma_dev_data_prepare();
200                 if (ret)
201                         return ret;
202                 ret = dma_shared_data_prepare();
203                 if (ret)
204                         return ret;
205         } else {
206                 ret = dma_shared_data_prepare();
207                 if (ret)
208                         return ret;
209                 ret = dma_fp_data_prepare();
210                 if (ret)
211                         return ret;
212                 ret = dma_dev_data_prepare();
213                 if (ret)
214                         return ret;
215         }
216
217         return 0;
218 }
219
220 static struct rte_dma_dev *
221 dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
222 {
223         struct rte_dma_dev *dev;
224         void *dev_private;
225         int16_t dev_id;
226         int ret;
227
228         ret = dma_data_prepare();
229         if (ret < 0) {
230                 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
231                 return NULL;
232         }
233
234         dev = dma_find_by_name(name);
235         if (dev != NULL) {
236                 RTE_DMA_LOG(ERR, "DMA device already allocated");
237                 return NULL;
238         }
239
240         dev_private = rte_zmalloc_socket(name, private_data_size,
241                                          RTE_CACHE_LINE_SIZE, numa_node);
242         if (dev_private == NULL) {
243                 RTE_DMA_LOG(ERR, "Cannot allocate private data");
244                 return NULL;
245         }
246
247         dev_id = dma_find_free_id();
248         if (dev_id < 0) {
249                 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
250                 rte_free(dev_private);
251                 return NULL;
252         }
253
254         dev = &rte_dma_devices[dev_id];
255         dev->data = &dma_devices_shared_data->data[dev_id];
256         rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
257         dev->data->dev_id = dev_id;
258         dev->data->numa_node = numa_node;
259         dev->data->dev_private = dev_private;
260
261         return dev;
262 }
263
264 static struct rte_dma_dev *
265 dma_attach_secondary(const char *name)
266 {
267         struct rte_dma_dev *dev;
268         int16_t i;
269         int ret;
270
271         ret = dma_data_prepare();
272         if (ret < 0) {
273                 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
274                 return NULL;
275         }
276
277         for (i = 0; i < dma_devices_max; i++) {
278                 if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
279                         break;
280         }
281         if (i == dma_devices_max) {
282                 RTE_DMA_LOG(ERR,
283                         "Device %s is not driven by the primary process",
284                         name);
285                 return NULL;
286         }
287
288         dev = &rte_dma_devices[i];
289         dev->data = &dma_devices_shared_data->data[i];
290
291         return dev;
292 }
293
294 static struct rte_dma_dev *
295 dma_allocate(const char *name, int numa_node, size_t private_data_size)
296 {
297         struct rte_dma_dev *dev;
298
299         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
300                 dev = dma_allocate_primary(name, numa_node, private_data_size);
301         else
302                 dev = dma_attach_secondary(name);
303
304         if (dev) {
305                 dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
306                 dma_fp_object_dummy(dev->fp_obj);
307         }
308
309         return dev;
310 }
311
312 static void
313 dma_release(struct rte_dma_dev *dev)
314 {
315         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
316                 rte_free(dev->data->dev_private);
317                 memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
318         }
319
320         dma_fp_object_dummy(dev->fp_obj);
321         memset(dev, 0, sizeof(struct rte_dma_dev));
322 }
323
324 struct rte_dma_dev *
325 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
326 {
327         struct rte_dma_dev *dev;
328
329         if (dma_check_name(name) != 0 || private_data_size == 0)
330                 return NULL;
331
332         dev = dma_allocate(name, numa_node, private_data_size);
333         if (dev == NULL)
334                 return NULL;
335
336         dev->state = RTE_DMA_DEV_REGISTERED;
337
338         return dev;
339 }
340
341 int
342 rte_dma_pmd_release(const char *name)
343 {
344         struct rte_dma_dev *dev;
345
346         if (dma_check_name(name) != 0)
347                 return -EINVAL;
348
349         dev = dma_find_by_name(name);
350         if (dev == NULL)
351                 return -EINVAL;
352
353         if (dev->state == RTE_DMA_DEV_READY)
354                 return rte_dma_close(dev->data->dev_id);
355
356         dma_release(dev);
357         return 0;
358 }
359
360 int
361 rte_dma_get_dev_id_by_name(const char *name)
362 {
363         struct rte_dma_dev *dev;
364
365         if (dma_check_name(name) != 0)
366                 return -EINVAL;
367
368         dev = dma_find_by_name(name);
369         if (dev == NULL)
370                 return -EINVAL;
371
372         return dev->data->dev_id;
373 }
374
375 bool
376 rte_dma_is_valid(int16_t dev_id)
377 {
378         return (dev_id >= 0) && (dev_id < dma_devices_max) &&
379                 rte_dma_devices != NULL &&
380                 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
381 }
382
383 uint16_t
384 rte_dma_count_avail(void)
385 {
386         uint16_t count = 0;
387         uint16_t i;
388
389         if (rte_dma_devices == NULL)
390                 return count;
391
392         for (i = 0; i < dma_devices_max; i++) {
393                 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
394                         count++;
395         }
396
397         return count;
398 }
399
400 int
401 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
402 {
403         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
404         int ret;
405
406         if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
407                 return -EINVAL;
408
409         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
410         memset(dev_info, 0, sizeof(struct rte_dma_info));
411         ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
412                                             sizeof(struct rte_dma_info));
413         if (ret != 0)
414                 return ret;
415
416         dev_info->dev_name = dev->data->dev_name;
417         dev_info->numa_node = dev->device->numa_node;
418         dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
419
420         return 0;
421 }
422
423 int
424 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
425 {
426         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
427         struct rte_dma_info dev_info;
428         int ret;
429
430         if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
431                 return -EINVAL;
432
433         if (dev->data->dev_started != 0) {
434                 RTE_DMA_LOG(ERR,
435                         "Device %d must be stopped to allow configuration",
436                         dev_id);
437                 return -EBUSY;
438         }
439
440         ret = rte_dma_info_get(dev_id, &dev_info);
441         if (ret != 0) {
442                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
443                 return -EINVAL;
444         }
445         if (dev_conf->nb_vchans == 0) {
446                 RTE_DMA_LOG(ERR,
447                         "Device %d configure zero vchans", dev_id);
448                 return -EINVAL;
449         }
450         if (dev_conf->nb_vchans > dev_info.max_vchans) {
451                 RTE_DMA_LOG(ERR,
452                         "Device %d configure too many vchans", dev_id);
453                 return -EINVAL;
454         }
455         if (dev_conf->enable_silent &&
456             !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
457                 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
458                 return -EINVAL;
459         }
460
461         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
462         ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
463                                              sizeof(struct rte_dma_conf));
464         if (ret == 0)
465                 memcpy(&dev->data->dev_conf, dev_conf,
466                        sizeof(struct rte_dma_conf));
467
468         return ret;
469 }
470
471 int
472 rte_dma_start(int16_t dev_id)
473 {
474         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
475         int ret;
476
477         if (!rte_dma_is_valid(dev_id))
478                 return -EINVAL;
479
480         if (dev->data->dev_conf.nb_vchans == 0) {
481                 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
482                 return -EINVAL;
483         }
484
485         if (dev->data->dev_started != 0) {
486                 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
487                 return 0;
488         }
489
490         if (dev->dev_ops->dev_start == NULL)
491                 goto mark_started;
492
493         ret = (*dev->dev_ops->dev_start)(dev);
494         if (ret != 0)
495                 return ret;
496
497 mark_started:
498         dev->data->dev_started = 1;
499         return 0;
500 }
501
502 int
503 rte_dma_stop(int16_t dev_id)
504 {
505         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
506         int ret;
507
508         if (!rte_dma_is_valid(dev_id))
509                 return -EINVAL;
510
511         if (dev->data->dev_started == 0) {
512                 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
513                 return 0;
514         }
515
516         if (dev->dev_ops->dev_stop == NULL)
517                 goto mark_stopped;
518
519         ret = (*dev->dev_ops->dev_stop)(dev);
520         if (ret != 0)
521                 return ret;
522
523 mark_stopped:
524         dev->data->dev_started = 0;
525         return 0;
526 }
527
528 int
529 rte_dma_close(int16_t dev_id)
530 {
531         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
532         int ret;
533
534         if (!rte_dma_is_valid(dev_id))
535                 return -EINVAL;
536
537         /* Device must be stopped before it can be closed */
538         if (dev->data->dev_started == 1) {
539                 RTE_DMA_LOG(ERR,
540                         "Device %d must be stopped before closing", dev_id);
541                 return -EBUSY;
542         }
543
544         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
545         ret = (*dev->dev_ops->dev_close)(dev);
546         if (ret == 0)
547                 dma_release(dev);
548
549         return ret;
550 }
551
552 int
553 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
554                     const struct rte_dma_vchan_conf *conf)
555 {
556         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
557         struct rte_dma_info dev_info;
558         bool src_is_dev, dst_is_dev;
559         int ret;
560
561         if (!rte_dma_is_valid(dev_id) || conf == NULL)
562                 return -EINVAL;
563
564         if (dev->data->dev_started != 0) {
565                 RTE_DMA_LOG(ERR,
566                         "Device %d must be stopped to allow configuration",
567                         dev_id);
568                 return -EBUSY;
569         }
570
571         ret = rte_dma_info_get(dev_id, &dev_info);
572         if (ret != 0) {
573                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
574                 return -EINVAL;
575         }
576         if (dev->data->dev_conf.nb_vchans == 0) {
577                 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
578                 return -EINVAL;
579         }
580         if (vchan >= dev_info.nb_vchans) {
581                 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
582                 return -EINVAL;
583         }
584         if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
585             conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
586             conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
587             conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
588                 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
589                 return -EINVAL;
590         }
591         if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
592             !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
593                 RTE_DMA_LOG(ERR,
594                         "Device %d don't support mem2mem transfer", dev_id);
595                 return -EINVAL;
596         }
597         if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
598             !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
599                 RTE_DMA_LOG(ERR,
600                         "Device %d don't support mem2dev transfer", dev_id);
601                 return -EINVAL;
602         }
603         if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
604             !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
605                 RTE_DMA_LOG(ERR,
606                         "Device %d don't support dev2mem transfer", dev_id);
607                 return -EINVAL;
608         }
609         if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
610             !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
611                 RTE_DMA_LOG(ERR,
612                         "Device %d don't support dev2dev transfer", dev_id);
613                 return -EINVAL;
614         }
615         if (conf->nb_desc < dev_info.min_desc ||
616             conf->nb_desc > dev_info.max_desc) {
617                 RTE_DMA_LOG(ERR,
618                         "Device %d number of descriptors invalid", dev_id);
619                 return -EINVAL;
620         }
621         src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
622                      conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
623         if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
624             (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
625                 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
626                 return -EINVAL;
627         }
628         dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
629                      conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
630         if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
631             (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
632                 RTE_DMA_LOG(ERR,
633                         "Device %d destination port type invalid", dev_id);
634                 return -EINVAL;
635         }
636
637         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
638         return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
639                                         sizeof(struct rte_dma_vchan_conf));
640 }
641
642 int
643 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
644 {
645         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
646
647         if (!rte_dma_is_valid(dev_id) || stats == NULL)
648                 return -EINVAL;
649
650         if (vchan >= dev->data->dev_conf.nb_vchans &&
651             vchan != RTE_DMA_ALL_VCHAN) {
652                 RTE_DMA_LOG(ERR,
653                         "Device %d vchan %u out of range", dev_id, vchan);
654                 return -EINVAL;
655         }
656
657         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
658         memset(stats, 0, sizeof(struct rte_dma_stats));
659         return (*dev->dev_ops->stats_get)(dev, vchan, stats,
660                                           sizeof(struct rte_dma_stats));
661 }
662
663 int
664 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
665 {
666         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
667
668         if (!rte_dma_is_valid(dev_id))
669                 return -EINVAL;
670
671         if (vchan >= dev->data->dev_conf.nb_vchans &&
672             vchan != RTE_DMA_ALL_VCHAN) {
673                 RTE_DMA_LOG(ERR,
674                         "Device %d vchan %u out of range", dev_id, vchan);
675                 return -EINVAL;
676         }
677
678         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
679         return (*dev->dev_ops->stats_reset)(dev, vchan);
680 }
681
682 int
683 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status)
684 {
685         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
686
687         if (!rte_dma_is_valid(dev_id))
688                 return -EINVAL;
689
690         if (vchan >= dev->data->dev_conf.nb_vchans) {
691                 RTE_DMA_LOG(ERR, "Device %u vchan %u out of range\n", dev_id, vchan);
692                 return -EINVAL;
693         }
694
695         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_status, -ENOTSUP);
696         return (*dev->dev_ops->vchan_status)(dev, vchan, status);
697 }
698
699 static const char *
700 dma_capability_name(uint64_t capability)
701 {
702         static const struct {
703                 uint64_t capability;
704                 const char *name;
705         } capa_names[] = {
706                 { RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
707                 { RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
708                 { RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
709                 { RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
710                 { RTE_DMA_CAPA_SVA,         "sva"     },
711                 { RTE_DMA_CAPA_SILENT,      "silent"  },
712                 { RTE_DMA_CAPA_OPS_COPY,    "copy"    },
713                 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
714                 { RTE_DMA_CAPA_OPS_FILL,    "fill"    },
715         };
716
717         const char *name = "unknown";
718         uint32_t i;
719
720         for (i = 0; i < RTE_DIM(capa_names); i++) {
721                 if (capability == capa_names[i].capability) {
722                         name = capa_names[i].name;
723                         break;
724                 }
725         }
726
727         return name;
728 }
729
730 static void
731 dma_dump_capability(FILE *f, uint64_t dev_capa)
732 {
733         uint64_t capa;
734
735         (void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
736         while (dev_capa > 0) {
737                 capa = 1ull << __builtin_ctzll(dev_capa);
738                 (void)fprintf(f, " %s", dma_capability_name(capa));
739                 dev_capa &= ~capa;
740         }
741         (void)fprintf(f, "\n");
742 }
743
744 int
745 rte_dma_dump(int16_t dev_id, FILE *f)
746 {
747         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
748         struct rte_dma_info dev_info;
749         int ret;
750
751         if (!rte_dma_is_valid(dev_id) || f == NULL)
752                 return -EINVAL;
753
754         ret = rte_dma_info_get(dev_id, &dev_info);
755         if (ret != 0) {
756                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
757                 return -EINVAL;
758         }
759
760         (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
761                 dev->data->dev_id,
762                 dev->data->dev_name,
763                 dev->data->dev_started ? "started" : "stopped");
764         dma_dump_capability(f, dev_info.dev_capa);
765         (void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
766         (void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
767         (void)fprintf(f, "  silent_mode: %s\n",
768                 dev->data->dev_conf.enable_silent ? "on" : "off");
769
770         if (dev->dev_ops->dev_dump != NULL)
771                 return (*dev->dev_ops->dev_dump)(dev, f);
772
773         return 0;
774 }
775
776 static int
777 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
778            __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
779            __rte_unused uint32_t length, __rte_unused uint64_t flags)
780 {
781         RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
782         return -EINVAL;
783 }
784
785 static int
786 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
787               __rte_unused const struct rte_dma_sge *src,
788               __rte_unused const struct rte_dma_sge *dst,
789               __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
790               __rte_unused uint64_t flags)
791 {
792         RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
793         return -EINVAL;
794 }
795
796 static int
797 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
798            __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
799            __rte_unused uint32_t length, __rte_unused uint64_t flags)
800 {
801         RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
802         return -EINVAL;
803 }
804
805 static int
806 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
807 {
808         RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
809         return -EINVAL;
810 }
811
812 static uint16_t
813 dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
814                 __rte_unused const uint16_t nb_cpls,
815                 __rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
816 {
817         RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
818         return 0;
819 }
820
821 static uint16_t
822 dummy_completed_status(__rte_unused void *dev_private,
823                        __rte_unused uint16_t vchan,
824                        __rte_unused const uint16_t nb_cpls,
825                        __rte_unused uint16_t *last_idx,
826                        __rte_unused enum rte_dma_status_code *status)
827 {
828         RTE_DMA_LOG(ERR,
829                     "completed_status is not configured or not supported.");
830         return 0;
831 }
832
833 static void
834 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
835 {
836         obj->dev_private      = NULL;
837         obj->copy             = dummy_copy;
838         obj->copy_sg          = dummy_copy_sg;
839         obj->fill             = dummy_fill;
840         obj->submit           = dummy_submit;
841         obj->completed        = dummy_completed;
842         obj->completed_status = dummy_completed_status;
843 }