1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
3 * Copyright(c) 2021 Intel Corporation
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
15 #include "rte_dmadev.h"
16 #include "rte_dmadev_pmd.h"
18 static int16_t dma_devices_max;
20 struct rte_dma_fp_object *rte_dma_fp_objs;
21 struct rte_dma_dev *rte_dma_devices;
23 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
24 #define RTE_DMA_LOG(level, ...) \
25 rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
26 RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
29 rte_dma_dev_max(size_t dev_max)
31 /* This function may be called before rte_eal_init(), so no rte library
32 * function can be called in this function.
34 if (dev_max == 0 || dev_max > INT16_MAX)
37 if (dma_devices_max > 0)
40 dma_devices_max = dev_max;
46 dma_check_name(const char *name)
51 RTE_DMA_LOG(ERR, "Name can't be NULL");
55 name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
57 RTE_DMA_LOG(ERR, "Zero length DMA device name");
60 if (name_len >= RTE_DEV_NAME_MAX_LEN) {
61 RTE_DMA_LOG(ERR, "DMA device name is too long");
69 dma_find_free_id(void)
73 if (rte_dma_devices == NULL)
76 for (i = 0; i < dma_devices_max; i++) {
77 if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
84 static struct rte_dma_dev*
85 dma_find_by_name(const char *name)
89 if (rte_dma_devices == NULL)
92 for (i = 0; i < dma_devices_max; i++) {
93 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
94 (!strcmp(name, rte_dma_devices[i].dev_name)))
95 return &rte_dma_devices[i];
101 static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
104 dma_fp_data_prepare(void)
110 if (rte_dma_fp_objs != NULL)
113 /* Fast-path object must align cacheline, but the return value of malloc
114 * may not be aligned to the cache line. Therefore, extra memory is
115 * applied for realignment.
116 * note: We do not call posix_memalign/aligned_alloc because it is
117 * version dependent on libc.
119 size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
124 memset(ptr, 0, size);
126 rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
127 for (i = 0; i < dma_devices_max; i++)
128 dma_fp_object_dummy(&rte_dma_fp_objs[i]);
134 dma_dev_data_prepare(void)
138 if (rte_dma_devices != NULL)
141 size = dma_devices_max * sizeof(struct rte_dma_dev);
142 rte_dma_devices = malloc(size);
143 if (rte_dma_devices == NULL)
145 memset(rte_dma_devices, 0, size);
151 dma_data_prepare(void)
155 if (dma_devices_max == 0)
156 dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
158 ret = dma_fp_data_prepare();
162 return dma_dev_data_prepare();
165 static struct rte_dma_dev *
166 dma_allocate(const char *name, int numa_node, size_t private_data_size)
168 struct rte_dma_dev *dev;
173 ret = dma_data_prepare();
175 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
179 dev = dma_find_by_name(name);
181 RTE_DMA_LOG(ERR, "DMA device already allocated");
185 dev_private = rte_zmalloc_socket(name, private_data_size,
186 RTE_CACHE_LINE_SIZE, numa_node);
187 if (dev_private == NULL) {
188 RTE_DMA_LOG(ERR, "Cannot allocate private data");
192 dev_id = dma_find_free_id();
194 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
195 rte_free(dev_private);
199 dev = &rte_dma_devices[dev_id];
200 rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
201 dev->dev_id = dev_id;
202 dev->numa_node = numa_node;
203 dev->dev_private = dev_private;
204 dev->fp_obj = &rte_dma_fp_objs[dev_id];
205 dma_fp_object_dummy(dev->fp_obj);
211 dma_release(struct rte_dma_dev *dev)
213 rte_free(dev->dev_private);
214 dma_fp_object_dummy(dev->fp_obj);
215 memset(dev, 0, sizeof(struct rte_dma_dev));
219 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
221 struct rte_dma_dev *dev;
223 if (dma_check_name(name) != 0 || private_data_size == 0)
226 dev = dma_allocate(name, numa_node, private_data_size);
230 dev->state = RTE_DMA_DEV_REGISTERED;
236 rte_dma_pmd_release(const char *name)
238 struct rte_dma_dev *dev;
240 if (dma_check_name(name) != 0)
243 dev = dma_find_by_name(name);
247 if (dev->state == RTE_DMA_DEV_READY)
248 return rte_dma_close(dev->dev_id);
255 rte_dma_get_dev_id_by_name(const char *name)
257 struct rte_dma_dev *dev;
259 if (dma_check_name(name) != 0)
262 dev = dma_find_by_name(name);
270 rte_dma_is_valid(int16_t dev_id)
272 return (dev_id >= 0) && (dev_id < dma_devices_max) &&
273 rte_dma_devices != NULL &&
274 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
278 rte_dma_count_avail(void)
283 if (rte_dma_devices == NULL)
286 for (i = 0; i < dma_devices_max; i++) {
287 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
295 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
297 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
300 if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
303 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
304 memset(dev_info, 0, sizeof(struct rte_dma_info));
305 ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
306 sizeof(struct rte_dma_info));
310 dev_info->dev_name = dev->dev_name;
311 dev_info->numa_node = dev->device->numa_node;
312 dev_info->nb_vchans = dev->dev_conf.nb_vchans;
318 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
320 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
321 struct rte_dma_info dev_info;
324 if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
327 if (dev->dev_started != 0) {
329 "Device %d must be stopped to allow configuration",
334 ret = rte_dma_info_get(dev_id, &dev_info);
336 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
339 if (dev_conf->nb_vchans == 0) {
341 "Device %d configure zero vchans", dev_id);
344 if (dev_conf->nb_vchans > dev_info.max_vchans) {
346 "Device %d configure too many vchans", dev_id);
349 if (dev_conf->enable_silent &&
350 !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
351 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
355 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
356 ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
357 sizeof(struct rte_dma_conf));
359 memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
365 rte_dma_start(int16_t dev_id)
367 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
370 if (!rte_dma_is_valid(dev_id))
373 if (dev->dev_conf.nb_vchans == 0) {
374 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
378 if (dev->dev_started != 0) {
379 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
383 if (dev->dev_ops->dev_start == NULL)
386 ret = (*dev->dev_ops->dev_start)(dev);
391 dev->dev_started = 1;
396 rte_dma_stop(int16_t dev_id)
398 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
401 if (!rte_dma_is_valid(dev_id))
404 if (dev->dev_started == 0) {
405 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
409 if (dev->dev_ops->dev_stop == NULL)
412 ret = (*dev->dev_ops->dev_stop)(dev);
417 dev->dev_started = 0;
422 rte_dma_close(int16_t dev_id)
424 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
427 if (!rte_dma_is_valid(dev_id))
430 /* Device must be stopped before it can be closed */
431 if (dev->dev_started == 1) {
433 "Device %d must be stopped before closing", dev_id);
437 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
438 ret = (*dev->dev_ops->dev_close)(dev);
446 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
447 const struct rte_dma_vchan_conf *conf)
449 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
450 struct rte_dma_info dev_info;
451 bool src_is_dev, dst_is_dev;
454 if (!rte_dma_is_valid(dev_id) || conf == NULL)
457 if (dev->dev_started != 0) {
459 "Device %d must be stopped to allow configuration",
464 ret = rte_dma_info_get(dev_id, &dev_info);
466 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
469 if (dev->dev_conf.nb_vchans == 0) {
470 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
473 if (vchan >= dev_info.nb_vchans) {
474 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
477 if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
478 conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
479 conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
480 conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
481 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
484 if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
485 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
487 "Device %d don't support mem2mem transfer", dev_id);
490 if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
491 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
493 "Device %d don't support mem2dev transfer", dev_id);
496 if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
497 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
499 "Device %d don't support dev2mem transfer", dev_id);
502 if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
503 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
505 "Device %d don't support dev2dev transfer", dev_id);
508 if (conf->nb_desc < dev_info.min_desc ||
509 conf->nb_desc > dev_info.max_desc) {
511 "Device %d number of descriptors invalid", dev_id);
514 src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
515 conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
516 if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
517 (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
518 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
521 dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
522 conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
523 if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
524 (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
526 "Device %d destination port type invalid", dev_id);
530 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
531 return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
532 sizeof(struct rte_dma_vchan_conf));
536 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
538 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
540 if (!rte_dma_is_valid(dev_id) || stats == NULL)
543 if (vchan >= dev->dev_conf.nb_vchans &&
544 vchan != RTE_DMA_ALL_VCHAN) {
546 "Device %d vchan %u out of range", dev_id, vchan);
550 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
551 memset(stats, 0, sizeof(struct rte_dma_stats));
552 return (*dev->dev_ops->stats_get)(dev, vchan, stats,
553 sizeof(struct rte_dma_stats));
557 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
559 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
561 if (!rte_dma_is_valid(dev_id))
564 if (vchan >= dev->dev_conf.nb_vchans &&
565 vchan != RTE_DMA_ALL_VCHAN) {
567 "Device %d vchan %u out of range", dev_id, vchan);
571 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
572 return (*dev->dev_ops->stats_reset)(dev, vchan);
576 dma_capability_name(uint64_t capability)
578 static const struct {
582 { RTE_DMA_CAPA_MEM_TO_MEM, "mem2mem" },
583 { RTE_DMA_CAPA_MEM_TO_DEV, "mem2dev" },
584 { RTE_DMA_CAPA_DEV_TO_MEM, "dev2mem" },
585 { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" },
586 { RTE_DMA_CAPA_SVA, "sva" },
587 { RTE_DMA_CAPA_SILENT, "silent" },
588 { RTE_DMA_CAPA_OPS_COPY, "copy" },
589 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
590 { RTE_DMA_CAPA_OPS_FILL, "fill" },
593 const char *name = "unknown";
596 for (i = 0; i < RTE_DIM(capa_names); i++) {
597 if (capability == capa_names[i].capability) {
598 name = capa_names[i].name;
607 dma_dump_capability(FILE *f, uint64_t dev_capa)
611 (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa);
612 while (dev_capa > 0) {
613 capa = 1ull << __builtin_ctzll(dev_capa);
614 (void)fprintf(f, " %s", dma_capability_name(capa));
617 (void)fprintf(f, "\n");
621 rte_dma_dump(int16_t dev_id, FILE *f)
623 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
624 struct rte_dma_info dev_info;
627 if (!rte_dma_is_valid(dev_id) || f == NULL)
630 ret = rte_dma_info_get(dev_id, &dev_info);
632 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
636 (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
639 dev->dev_started ? "started" : "stopped");
640 dma_dump_capability(f, dev_info.dev_capa);
641 (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans);
642 (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans);
643 (void)fprintf(f, " silent_mode: %s\n",
644 dev->dev_conf.enable_silent ? "on" : "off");
646 if (dev->dev_ops->dev_dump != NULL)
647 return (*dev->dev_ops->dev_dump)(dev, f);
653 dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
654 __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
655 __rte_unused uint32_t length, __rte_unused uint64_t flags)
657 RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
662 dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
663 __rte_unused const struct rte_dma_sge *src,
664 __rte_unused const struct rte_dma_sge *dst,
665 __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
666 __rte_unused uint64_t flags)
668 RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
673 dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
674 __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
675 __rte_unused uint32_t length, __rte_unused uint64_t flags)
677 RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
682 dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
684 RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
689 dummy_completed(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
690 __rte_unused const uint16_t nb_cpls,
691 __rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
693 RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
698 dummy_completed_status(__rte_unused void *dev_private,
699 __rte_unused uint16_t vchan,
700 __rte_unused const uint16_t nb_cpls,
701 __rte_unused uint16_t *last_idx,
702 __rte_unused enum rte_dma_status_code *status)
705 "completed_status is not configured or not supported.");
710 dma_fp_object_dummy(struct rte_dma_fp_object *obj)
712 obj->dev_private = NULL;
713 obj->copy = dummy_copy;
714 obj->copy_sg = dummy_copy_sg;
715 obj->fill = dummy_fill;
716 obj->submit = dummy_submit;
717 obj->completed = dummy_completed;
718 obj->completed_status = dummy_completed_status;