54e15817fa6b63d8bd9773b64955124d5dc7d473
[dpdk.git] / lib / dmadev / rte_dmadev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  */
5
6 #include <inttypes.h>
7
8 #include <rte_eal.h>
9 #include <rte_lcore.h>
10 #include <rte_log.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
14
15 #include "rte_dmadev.h"
16 #include "rte_dmadev_pmd.h"
17
18 static int16_t dma_devices_max;
19
20 struct rte_dma_fp_object *rte_dma_fp_objs;
21 struct rte_dma_dev *rte_dma_devices;
22
23 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
24 #define RTE_DMA_LOG(level, ...) \
25         rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
26                 RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
27
28 int
29 rte_dma_dev_max(size_t dev_max)
30 {
31         /* This function may be called before rte_eal_init(), so no rte library
32          * function can be called in this function.
33          */
34         if (dev_max == 0 || dev_max > INT16_MAX)
35                 return -EINVAL;
36
37         if (dma_devices_max > 0)
38                 return -EINVAL;
39
40         dma_devices_max = dev_max;
41
42         return 0;
43 }
44
45 static int
46 dma_check_name(const char *name)
47 {
48         size_t name_len;
49
50         if (name == NULL) {
51                 RTE_DMA_LOG(ERR, "Name can't be NULL");
52                 return -EINVAL;
53         }
54
55         name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
56         if (name_len == 0) {
57                 RTE_DMA_LOG(ERR, "Zero length DMA device name");
58                 return -EINVAL;
59         }
60         if (name_len >= RTE_DEV_NAME_MAX_LEN) {
61                 RTE_DMA_LOG(ERR, "DMA device name is too long");
62                 return -EINVAL;
63         }
64
65         return 0;
66 }
67
68 static int16_t
69 dma_find_free_id(void)
70 {
71         int16_t i;
72
73         if (rte_dma_devices == NULL)
74                 return -1;
75
76         for (i = 0; i < dma_devices_max; i++) {
77                 if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
78                         return i;
79         }
80
81         return -1;
82 }
83
84 static struct rte_dma_dev*
85 dma_find_by_name(const char *name)
86 {
87         int16_t i;
88
89         if (rte_dma_devices == NULL)
90                 return NULL;
91
92         for (i = 0; i < dma_devices_max; i++) {
93                 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
94                     (!strcmp(name, rte_dma_devices[i].dev_name)))
95                         return &rte_dma_devices[i];
96         }
97
98         return NULL;
99 }
100
101 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
102
103 static int
104 dma_fp_data_prepare(void)
105 {
106         size_t size;
107         void *ptr;
108         int i;
109
110         if (rte_dma_fp_objs != NULL)
111                 return 0;
112
113         /* Fast-path object must align cacheline, but the return value of malloc
114          * may not be aligned to the cache line. Therefore, extra memory is
115          * applied for realignment.
116          * note: We do not call posix_memalign/aligned_alloc because it is
117          * version dependent on libc.
118          */
119         size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
120                 RTE_CACHE_LINE_SIZE;
121         ptr = malloc(size);
122         if (ptr == NULL)
123                 return -ENOMEM;
124         memset(ptr, 0, size);
125
126         rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
127         for (i = 0; i < dma_devices_max; i++)
128                 dma_fp_object_dummy(&rte_dma_fp_objs[i]);
129
130         return 0;
131 }
132
133 static int
134 dma_dev_data_prepare(void)
135 {
136         size_t size;
137
138         if (rte_dma_devices != NULL)
139                 return 0;
140
141         size = dma_devices_max * sizeof(struct rte_dma_dev);
142         rte_dma_devices = malloc(size);
143         if (rte_dma_devices == NULL)
144                 return -ENOMEM;
145         memset(rte_dma_devices, 0, size);
146
147         return 0;
148 }
149
150 static int
151 dma_data_prepare(void)
152 {
153         int ret;
154
155         if (dma_devices_max == 0)
156                 dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
157
158         ret = dma_fp_data_prepare();
159         if (ret)
160                 return ret;
161
162         return dma_dev_data_prepare();
163 }
164
165 static struct rte_dma_dev *
166 dma_allocate(const char *name, int numa_node, size_t private_data_size)
167 {
168         struct rte_dma_dev *dev;
169         void *dev_private;
170         int16_t dev_id;
171         int ret;
172
173         ret = dma_data_prepare();
174         if (ret < 0) {
175                 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
176                 return NULL;
177         }
178
179         dev = dma_find_by_name(name);
180         if (dev != NULL) {
181                 RTE_DMA_LOG(ERR, "DMA device already allocated");
182                 return NULL;
183         }
184
185         dev_private = rte_zmalloc_socket(name, private_data_size,
186                                          RTE_CACHE_LINE_SIZE, numa_node);
187         if (dev_private == NULL) {
188                 RTE_DMA_LOG(ERR, "Cannot allocate private data");
189                 return NULL;
190         }
191
192         dev_id = dma_find_free_id();
193         if (dev_id < 0) {
194                 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
195                 rte_free(dev_private);
196                 return NULL;
197         }
198
199         dev = &rte_dma_devices[dev_id];
200         rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
201         dev->dev_id = dev_id;
202         dev->numa_node = numa_node;
203         dev->dev_private = dev_private;
204         dev->fp_obj = &rte_dma_fp_objs[dev_id];
205         dma_fp_object_dummy(dev->fp_obj);
206
207         return dev;
208 }
209
210 static void
211 dma_release(struct rte_dma_dev *dev)
212 {
213         rte_free(dev->dev_private);
214         dma_fp_object_dummy(dev->fp_obj);
215         memset(dev, 0, sizeof(struct rte_dma_dev));
216 }
217
218 struct rte_dma_dev *
219 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
220 {
221         struct rte_dma_dev *dev;
222
223         if (dma_check_name(name) != 0 || private_data_size == 0)
224                 return NULL;
225
226         dev = dma_allocate(name, numa_node, private_data_size);
227         if (dev == NULL)
228                 return NULL;
229
230         dev->state = RTE_DMA_DEV_REGISTERED;
231
232         return dev;
233 }
234
235 int
236 rte_dma_pmd_release(const char *name)
237 {
238         struct rte_dma_dev *dev;
239
240         if (dma_check_name(name) != 0)
241                 return -EINVAL;
242
243         dev = dma_find_by_name(name);
244         if (dev == NULL)
245                 return -EINVAL;
246
247         if (dev->state == RTE_DMA_DEV_READY)
248                 return rte_dma_close(dev->dev_id);
249
250         dma_release(dev);
251         return 0;
252 }
253
254 int
255 rte_dma_get_dev_id_by_name(const char *name)
256 {
257         struct rte_dma_dev *dev;
258
259         if (dma_check_name(name) != 0)
260                 return -EINVAL;
261
262         dev = dma_find_by_name(name);
263         if (dev == NULL)
264                 return -EINVAL;
265
266         return dev->dev_id;
267 }
268
269 bool
270 rte_dma_is_valid(int16_t dev_id)
271 {
272         return (dev_id >= 0) && (dev_id < dma_devices_max) &&
273                 rte_dma_devices != NULL &&
274                 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
275 }
276
277 uint16_t
278 rte_dma_count_avail(void)
279 {
280         uint16_t count = 0;
281         uint16_t i;
282
283         if (rte_dma_devices == NULL)
284                 return count;
285
286         for (i = 0; i < dma_devices_max; i++) {
287                 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
288                         count++;
289         }
290
291         return count;
292 }
293
294 int
295 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
296 {
297         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
298         int ret;
299
300         if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
301                 return -EINVAL;
302
303         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
304         memset(dev_info, 0, sizeof(struct rte_dma_info));
305         ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
306                                             sizeof(struct rte_dma_info));
307         if (ret != 0)
308                 return ret;
309
310         dev_info->dev_name = dev->dev_name;
311         dev_info->numa_node = dev->device->numa_node;
312         dev_info->nb_vchans = dev->dev_conf.nb_vchans;
313
314         return 0;
315 }
316
317 int
318 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
319 {
320         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
321         struct rte_dma_info dev_info;
322         int ret;
323
324         if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
325                 return -EINVAL;
326
327         if (dev->dev_started != 0) {
328                 RTE_DMA_LOG(ERR,
329                         "Device %d must be stopped to allow configuration",
330                         dev_id);
331                 return -EBUSY;
332         }
333
334         ret = rte_dma_info_get(dev_id, &dev_info);
335         if (ret != 0) {
336                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
337                 return -EINVAL;
338         }
339         if (dev_conf->nb_vchans == 0) {
340                 RTE_DMA_LOG(ERR,
341                         "Device %d configure zero vchans", dev_id);
342                 return -EINVAL;
343         }
344         if (dev_conf->nb_vchans > dev_info.max_vchans) {
345                 RTE_DMA_LOG(ERR,
346                         "Device %d configure too many vchans", dev_id);
347                 return -EINVAL;
348         }
349         if (dev_conf->enable_silent &&
350             !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
351                 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
352                 return -EINVAL;
353         }
354
355         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
356         ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
357                                              sizeof(struct rte_dma_conf));
358         if (ret == 0)
359                 memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
360
361         return ret;
362 }
363
364 int
365 rte_dma_start(int16_t dev_id)
366 {
367         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
368         int ret;
369
370         if (!rte_dma_is_valid(dev_id))
371                 return -EINVAL;
372
373         if (dev->dev_conf.nb_vchans == 0) {
374                 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
375                 return -EINVAL;
376         }
377
378         if (dev->dev_started != 0) {
379                 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
380                 return 0;
381         }
382
383         if (dev->dev_ops->dev_start == NULL)
384                 goto mark_started;
385
386         ret = (*dev->dev_ops->dev_start)(dev);
387         if (ret != 0)
388                 return ret;
389
390 mark_started:
391         dev->dev_started = 1;
392         return 0;
393 }
394
395 int
396 rte_dma_stop(int16_t dev_id)
397 {
398         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
399         int ret;
400
401         if (!rte_dma_is_valid(dev_id))
402                 return -EINVAL;
403
404         if (dev->dev_started == 0) {
405                 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
406                 return 0;
407         }
408
409         if (dev->dev_ops->dev_stop == NULL)
410                 goto mark_stopped;
411
412         ret = (*dev->dev_ops->dev_stop)(dev);
413         if (ret != 0)
414                 return ret;
415
416 mark_stopped:
417         dev->dev_started = 0;
418         return 0;
419 }
420
421 int
422 rte_dma_close(int16_t dev_id)
423 {
424         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
425         int ret;
426
427         if (!rte_dma_is_valid(dev_id))
428                 return -EINVAL;
429
430         /* Device must be stopped before it can be closed */
431         if (dev->dev_started == 1) {
432                 RTE_DMA_LOG(ERR,
433                         "Device %d must be stopped before closing", dev_id);
434                 return -EBUSY;
435         }
436
437         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
438         ret = (*dev->dev_ops->dev_close)(dev);
439         if (ret == 0)
440                 dma_release(dev);
441
442         return ret;
443 }
444
445 int
446 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
447                     const struct rte_dma_vchan_conf *conf)
448 {
449         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
450         struct rte_dma_info dev_info;
451         bool src_is_dev, dst_is_dev;
452         int ret;
453
454         if (!rte_dma_is_valid(dev_id) || conf == NULL)
455                 return -EINVAL;
456
457         if (dev->dev_started != 0) {
458                 RTE_DMA_LOG(ERR,
459                         "Device %d must be stopped to allow configuration",
460                         dev_id);
461                 return -EBUSY;
462         }
463
464         ret = rte_dma_info_get(dev_id, &dev_info);
465         if (ret != 0) {
466                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
467                 return -EINVAL;
468         }
469         if (dev->dev_conf.nb_vchans == 0) {
470                 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
471                 return -EINVAL;
472         }
473         if (vchan >= dev_info.nb_vchans) {
474                 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
475                 return -EINVAL;
476         }
477         if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
478             conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
479             conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
480             conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
481                 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
482                 return -EINVAL;
483         }
484         if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
485             !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
486                 RTE_DMA_LOG(ERR,
487                         "Device %d don't support mem2mem transfer", dev_id);
488                 return -EINVAL;
489         }
490         if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
491             !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
492                 RTE_DMA_LOG(ERR,
493                         "Device %d don't support mem2dev transfer", dev_id);
494                 return -EINVAL;
495         }
496         if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
497             !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
498                 RTE_DMA_LOG(ERR,
499                         "Device %d don't support dev2mem transfer", dev_id);
500                 return -EINVAL;
501         }
502         if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
503             !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
504                 RTE_DMA_LOG(ERR,
505                         "Device %d don't support dev2dev transfer", dev_id);
506                 return -EINVAL;
507         }
508         if (conf->nb_desc < dev_info.min_desc ||
509             conf->nb_desc > dev_info.max_desc) {
510                 RTE_DMA_LOG(ERR,
511                         "Device %d number of descriptors invalid", dev_id);
512                 return -EINVAL;
513         }
514         src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
515                      conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
516         if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
517             (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
518                 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
519                 return -EINVAL;
520         }
521         dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
522                      conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
523         if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
524             (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
525                 RTE_DMA_LOG(ERR,
526                         "Device %d destination port type invalid", dev_id);
527                 return -EINVAL;
528         }
529
530         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
531         return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
532                                         sizeof(struct rte_dma_vchan_conf));
533 }
534
535 int
536 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
537 {
538         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
539
540         if (!rte_dma_is_valid(dev_id) || stats == NULL)
541                 return -EINVAL;
542
543         if (vchan >= dev->dev_conf.nb_vchans &&
544             vchan != RTE_DMA_ALL_VCHAN) {
545                 RTE_DMA_LOG(ERR,
546                         "Device %d vchan %u out of range", dev_id, vchan);
547                 return -EINVAL;
548         }
549
550         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
551         memset(stats, 0, sizeof(struct rte_dma_stats));
552         return (*dev->dev_ops->stats_get)(dev, vchan, stats,
553                                           sizeof(struct rte_dma_stats));
554 }
555
556 int
557 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
558 {
559         struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
560
561         if (!rte_dma_is_valid(dev_id))
562                 return -EINVAL;
563
564         if (vchan >= dev->dev_conf.nb_vchans &&
565             vchan != RTE_DMA_ALL_VCHAN) {
566                 RTE_DMA_LOG(ERR,
567                         "Device %d vchan %u out of range", dev_id, vchan);
568                 return -EINVAL;
569         }
570
571         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
572         return (*dev->dev_ops->stats_reset)(dev, vchan);
573 }
574
575 static const char *
576 dma_capability_name(uint64_t capability)
577 {
578         static const struct {
579                 uint64_t capability;
580                 const char *name;
581         } capa_names[] = {
582                 { RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
583                 { RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
584                 { RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
585                 { RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
586                 { RTE_DMA_CAPA_SVA,         "sva"     },
587                 { RTE_DMA_CAPA_SILENT,      "silent"  },
588                 { RTE_DMA_CAPA_OPS_COPY,    "copy"    },
589                 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
590                 { RTE_DMA_CAPA_OPS_FILL,    "fill"    },
591         };
592
593         const char *name = "unknown";
594         uint32_t i;
595
596         for (i = 0; i < RTE_DIM(capa_names); i++) {
597                 if (capability == capa_names[i].capability) {
598                         name = capa_names[i].name;
599                         break;
600                 }
601         }
602
603         return name;
604 }
605
606 static void
607 dma_dump_capability(FILE *f, uint64_t dev_capa)
608 {
609         uint64_t capa;
610
611         (void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
612         while (dev_capa > 0) {
613                 capa = 1ull << __builtin_ctzll(dev_capa);
614                 (void)fprintf(f, " %s", dma_capability_name(capa));
615                 dev_capa &= ~capa;
616         }
617         (void)fprintf(f, "\n");
618 }
619
620 int
621 rte_dma_dump(int16_t dev_id, FILE *f)
622 {
623         const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
624         struct rte_dma_info dev_info;
625         int ret;
626
627         if (!rte_dma_is_valid(dev_id) || f == NULL)
628                 return -EINVAL;
629
630         ret = rte_dma_info_get(dev_id, &dev_info);
631         if (ret != 0) {
632                 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
633                 return -EINVAL;
634         }
635
636         (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
637                 dev->dev_id,
638                 dev->dev_name,
639                 dev->dev_started ? "started" : "stopped");
640         dma_dump_capability(f, dev_info.dev_capa);
641         (void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
642         (void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
643         (void)fprintf(f, "  silent_mode: %s\n",
644                 dev->dev_conf.enable_silent ? "on" : "off");
645
646         if (dev->dev_ops->dev_dump != NULL)
647                 return (*dev->dev_ops->dev_dump)(dev, f);
648
649         return 0;
650 }
651
652 static int
653 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
654            __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
655            __rte_unused uint32_t length, __rte_unused uint64_t flags)
656 {
657         RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
658         return -EINVAL;
659 }
660
661 static int
662 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
663               __rte_unused const struct rte_dma_sge *src,
664               __rte_unused const struct rte_dma_sge *dst,
665               __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
666               __rte_unused uint64_t flags)
667 {
668         RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
669         return -EINVAL;
670 }
671
672 static int
673 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
674            __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
675            __rte_unused uint32_t length, __rte_unused uint64_t flags)
676 {
677         RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
678         return -EINVAL;
679 }
680
681 static int
682 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
683 {
684         RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
685         return -EINVAL;
686 }
687
688 static uint16_t
689 dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
690                 __rte_unused const uint16_t nb_cpls,
691                 __rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
692 {
693         RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
694         return 0;
695 }
696
697 static uint16_t
698 dummy_completed_status(__rte_unused void *dev_private,
699                        __rte_unused uint16_t vchan,
700                        __rte_unused const uint16_t nb_cpls,
701                        __rte_unused uint16_t *last_idx,
702                        __rte_unused enum rte_dma_status_code *status)
703 {
704         RTE_DMA_LOG(ERR,
705                     "completed_status is not configured or not supported.");
706         return 0;
707 }
708
709 static void
710 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
711 {
712         obj->dev_private      = NULL;
713         obj->copy             = dummy_copy;
714         obj->copy_sg          = dummy_copy_sg;
715         obj->fill             = dummy_fill;
716         obj->submit           = dummy_submit;
717         obj->completed        = dummy_completed;
718         obj->completed_status = dummy_completed_status;
719 }