1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
3 * Copyright(c) 2021 Intel Corporation
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_string_fns.h>
15 #include "rte_dmadev.h"
16 #include "rte_dmadev_pmd.h"
18 static int16_t dma_devices_max;
20 struct rte_dma_dev *rte_dma_devices;
22 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
23 #define RTE_DMA_LOG(level, ...) \
24 rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
25 RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
28 rte_dma_dev_max(size_t dev_max)
30 /* This function may be called before rte_eal_init(), so no rte library
31 * function can be called in this function.
33 if (dev_max == 0 || dev_max > INT16_MAX)
36 if (dma_devices_max > 0)
39 dma_devices_max = dev_max;
45 dma_check_name(const char *name)
50 RTE_DMA_LOG(ERR, "Name can't be NULL");
54 name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
56 RTE_DMA_LOG(ERR, "Zero length DMA device name");
59 if (name_len >= RTE_DEV_NAME_MAX_LEN) {
60 RTE_DMA_LOG(ERR, "DMA device name is too long");
68 dma_find_free_id(void)
72 if (rte_dma_devices == NULL)
75 for (i = 0; i < dma_devices_max; i++) {
76 if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
83 static struct rte_dma_dev*
84 dma_find_by_name(const char *name)
88 if (rte_dma_devices == NULL)
91 for (i = 0; i < dma_devices_max; i++) {
92 if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
93 (!strcmp(name, rte_dma_devices[i].dev_name)))
94 return &rte_dma_devices[i];
101 dma_dev_data_prepare(void)
105 if (rte_dma_devices != NULL)
108 size = dma_devices_max * sizeof(struct rte_dma_dev);
109 rte_dma_devices = malloc(size);
110 if (rte_dma_devices == NULL)
112 memset(rte_dma_devices, 0, size);
118 dma_data_prepare(void)
120 if (dma_devices_max == 0)
121 dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
122 return dma_dev_data_prepare();
125 static struct rte_dma_dev *
126 dma_allocate(const char *name, int numa_node, size_t private_data_size)
128 struct rte_dma_dev *dev;
133 ret = dma_data_prepare();
135 RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
139 dev = dma_find_by_name(name);
141 RTE_DMA_LOG(ERR, "DMA device already allocated");
145 dev_private = rte_zmalloc_socket(name, private_data_size,
146 RTE_CACHE_LINE_SIZE, numa_node);
147 if (dev_private == NULL) {
148 RTE_DMA_LOG(ERR, "Cannot allocate private data");
152 dev_id = dma_find_free_id();
154 RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
155 rte_free(dev_private);
159 dev = &rte_dma_devices[dev_id];
160 rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
161 dev->dev_id = dev_id;
162 dev->numa_node = numa_node;
163 dev->dev_private = dev_private;
169 dma_release(struct rte_dma_dev *dev)
171 rte_free(dev->dev_private);
172 memset(dev, 0, sizeof(struct rte_dma_dev));
176 rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
178 struct rte_dma_dev *dev;
180 if (dma_check_name(name) != 0 || private_data_size == 0)
183 dev = dma_allocate(name, numa_node, private_data_size);
187 dev->state = RTE_DMA_DEV_REGISTERED;
193 rte_dma_pmd_release(const char *name)
195 struct rte_dma_dev *dev;
197 if (dma_check_name(name) != 0)
200 dev = dma_find_by_name(name);
204 if (dev->state == RTE_DMA_DEV_READY)
205 return rte_dma_close(dev->dev_id);
212 rte_dma_get_dev_id_by_name(const char *name)
214 struct rte_dma_dev *dev;
216 if (dma_check_name(name) != 0)
219 dev = dma_find_by_name(name);
227 rte_dma_is_valid(int16_t dev_id)
229 return (dev_id >= 0) && (dev_id < dma_devices_max) &&
230 rte_dma_devices != NULL &&
231 rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
235 rte_dma_count_avail(void)
240 if (rte_dma_devices == NULL)
243 for (i = 0; i < dma_devices_max; i++) {
244 if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
252 rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
254 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
257 if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
260 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
261 memset(dev_info, 0, sizeof(struct rte_dma_info));
262 ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
263 sizeof(struct rte_dma_info));
267 dev_info->dev_name = dev->dev_name;
268 dev_info->numa_node = dev->device->numa_node;
269 dev_info->nb_vchans = dev->dev_conf.nb_vchans;
275 rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
277 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
278 struct rte_dma_info dev_info;
281 if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
284 if (dev->dev_started != 0) {
286 "Device %d must be stopped to allow configuration",
291 ret = rte_dma_info_get(dev_id, &dev_info);
293 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
296 if (dev_conf->nb_vchans == 0) {
298 "Device %d configure zero vchans", dev_id);
301 if (dev_conf->nb_vchans > dev_info.max_vchans) {
303 "Device %d configure too many vchans", dev_id);
306 if (dev_conf->enable_silent &&
307 !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
308 RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
312 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
313 ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
314 sizeof(struct rte_dma_conf));
316 memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
322 rte_dma_start(int16_t dev_id)
324 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
327 if (!rte_dma_is_valid(dev_id))
330 if (dev->dev_conf.nb_vchans == 0) {
331 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
335 if (dev->dev_started != 0) {
336 RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
340 if (dev->dev_ops->dev_start == NULL)
343 ret = (*dev->dev_ops->dev_start)(dev);
348 dev->dev_started = 1;
353 rte_dma_stop(int16_t dev_id)
355 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
358 if (!rte_dma_is_valid(dev_id))
361 if (dev->dev_started == 0) {
362 RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
366 if (dev->dev_ops->dev_stop == NULL)
369 ret = (*dev->dev_ops->dev_stop)(dev);
374 dev->dev_started = 0;
379 rte_dma_close(int16_t dev_id)
381 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
384 if (!rte_dma_is_valid(dev_id))
387 /* Device must be stopped before it can be closed */
388 if (dev->dev_started == 1) {
390 "Device %d must be stopped before closing", dev_id);
394 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
395 ret = (*dev->dev_ops->dev_close)(dev);
403 rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
404 const struct rte_dma_vchan_conf *conf)
406 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
407 struct rte_dma_info dev_info;
408 bool src_is_dev, dst_is_dev;
411 if (!rte_dma_is_valid(dev_id) || conf == NULL)
414 if (dev->dev_started != 0) {
416 "Device %d must be stopped to allow configuration",
421 ret = rte_dma_info_get(dev_id, &dev_info);
423 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
426 if (dev->dev_conf.nb_vchans == 0) {
427 RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
430 if (vchan >= dev_info.nb_vchans) {
431 RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
434 if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
435 conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
436 conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
437 conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
438 RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
441 if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
442 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
444 "Device %d don't support mem2mem transfer", dev_id);
447 if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
448 !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
450 "Device %d don't support mem2dev transfer", dev_id);
453 if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
454 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
456 "Device %d don't support dev2mem transfer", dev_id);
459 if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
460 !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
462 "Device %d don't support dev2dev transfer", dev_id);
465 if (conf->nb_desc < dev_info.min_desc ||
466 conf->nb_desc > dev_info.max_desc) {
468 "Device %d number of descriptors invalid", dev_id);
471 src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
472 conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
473 if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
474 (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
475 RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
478 dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
479 conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
480 if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
481 (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
483 "Device %d destination port type invalid", dev_id);
487 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
488 return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
489 sizeof(struct rte_dma_vchan_conf));
493 rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
495 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
497 if (!rte_dma_is_valid(dev_id) || stats == NULL)
500 if (vchan >= dev->dev_conf.nb_vchans &&
501 vchan != RTE_DMA_ALL_VCHAN) {
503 "Device %d vchan %u out of range", dev_id, vchan);
507 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
508 memset(stats, 0, sizeof(struct rte_dma_stats));
509 return (*dev->dev_ops->stats_get)(dev, vchan, stats,
510 sizeof(struct rte_dma_stats));
514 rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
516 struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
518 if (!rte_dma_is_valid(dev_id))
521 if (vchan >= dev->dev_conf.nb_vchans &&
522 vchan != RTE_DMA_ALL_VCHAN) {
524 "Device %d vchan %u out of range", dev_id, vchan);
528 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
529 return (*dev->dev_ops->stats_reset)(dev, vchan);
533 dma_capability_name(uint64_t capability)
535 static const struct {
539 { RTE_DMA_CAPA_MEM_TO_MEM, "mem2mem" },
540 { RTE_DMA_CAPA_MEM_TO_DEV, "mem2dev" },
541 { RTE_DMA_CAPA_DEV_TO_MEM, "dev2mem" },
542 { RTE_DMA_CAPA_DEV_TO_DEV, "dev2dev" },
543 { RTE_DMA_CAPA_SVA, "sva" },
544 { RTE_DMA_CAPA_SILENT, "silent" },
545 { RTE_DMA_CAPA_OPS_COPY, "copy" },
546 { RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
547 { RTE_DMA_CAPA_OPS_FILL, "fill" },
550 const char *name = "unknown";
553 for (i = 0; i < RTE_DIM(capa_names); i++) {
554 if (capability == capa_names[i].capability) {
555 name = capa_names[i].name;
564 dma_dump_capability(FILE *f, uint64_t dev_capa)
568 (void)fprintf(f, " dev_capa: 0x%" PRIx64 " -", dev_capa);
569 while (dev_capa > 0) {
570 capa = 1ull << __builtin_ctzll(dev_capa);
571 (void)fprintf(f, " %s", dma_capability_name(capa));
574 (void)fprintf(f, "\n");
578 rte_dma_dump(int16_t dev_id, FILE *f)
580 const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
581 struct rte_dma_info dev_info;
584 if (!rte_dma_is_valid(dev_id) || f == NULL)
587 ret = rte_dma_info_get(dev_id, &dev_info);
589 RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
593 (void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
596 dev->dev_started ? "started" : "stopped");
597 dma_dump_capability(f, dev_info.dev_capa);
598 (void)fprintf(f, " max_vchans_supported: %u\n", dev_info.max_vchans);
599 (void)fprintf(f, " nb_vchans_configured: %u\n", dev_info.nb_vchans);
600 (void)fprintf(f, " silent_mode: %s\n",
601 dev->dev_conf.enable_silent ? "on" : "off");
603 if (dev->dev_ops->dev_dump != NULL)
604 return (*dev->dev_ops->dev_dump)(dev, f);