1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
7 #include <rte_bus_vdev.h>
8 #include <rte_cycles.h>
10 #include <rte_kvargs.h>
11 #include <rte_lcore.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
16 #include <rte_dmadev_pmd.h>
18 #include "skeleton_dmadev.h"
20 RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
21 #define SKELDMA_LOG(level, fmt, args...) \
22 rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
25 /* Count of instances, currently only 1 is supported. */
26 static uint16_t skeldma_count;
29 skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
32 #define SKELDMA_MAX_DESC 8192
33 #define SKELDMA_MIN_DESC 32
36 RTE_SET_USED(info_sz);
38 dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
40 RTE_DMA_CAPA_OPS_COPY;
41 dev_info->max_vchans = 1;
42 dev_info->max_desc = SKELDMA_MAX_DESC;
43 dev_info->min_desc = SKELDMA_MIN_DESC;
49 skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
54 RTE_SET_USED(conf_sz);
59 cpucopy_thread(void *param)
61 #define SLEEP_THRESHOLD 10000
62 #define SLEEP_US_VAL 10
64 struct rte_dma_dev *dev = param;
65 struct skeldma_hw *hw = dev->data->dev_private;
66 struct skeldma_desc *desc = NULL;
69 while (!hw->exit_flag) {
70 ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
73 if (hw->zero_req_count == 0)
74 hw->zero_req_count = SLEEP_THRESHOLD;
75 if (hw->zero_req_count >= SLEEP_THRESHOLD)
76 rte_delay_us_sleep(SLEEP_US_VAL);
80 hw->zero_req_count = 0;
81 rte_memcpy(desc->dst, desc->src, desc->len);
82 __atomic_add_fetch(&hw->completed_count, 1, __ATOMIC_RELEASE);
83 (void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
90 fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
92 struct skeldma_desc *desc = NULL;
93 while (rte_ring_count(ring) > 0) {
94 (void)rte_ring_dequeue(ring, (void **)&desc);
95 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
100 skeldma_start(struct rte_dma_dev *dev)
102 struct skeldma_hw *hw = dev->data->dev_private;
106 if (hw->desc_mem == NULL) {
107 SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
111 /* Reset the dmadev to a known state, include:
112 * 1) fflush pending/running/completed ring to empty ring.
113 * 2) init ring idx to zero.
114 * 3) init running statistics.
115 * 4) mark cpucopy task exit_flag to false.
117 fflush_ring(hw, hw->desc_pending);
118 fflush_ring(hw, hw->desc_running);
119 fflush_ring(hw, hw->desc_completed);
121 hw->submitted_count = 0;
122 hw->zero_req_count = 0;
123 hw->completed_count = 0;
124 hw->exit_flag = false;
128 ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
129 cpucopy_thread, dev);
131 SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
135 if (hw->lcore_id != -1) {
136 cpuset = rte_lcore_cpuset(hw->lcore_id);
137 ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
141 "Set thread affinity lcore = %d fail!",
149 skeldma_stop(struct rte_dma_dev *dev)
151 struct skeldma_hw *hw = dev->data->dev_private;
153 hw->exit_flag = true;
156 pthread_cancel(hw->thread);
157 pthread_join(hw->thread, NULL);
163 vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
165 struct skeldma_desc *desc;
166 struct rte_ring *empty;
167 struct rte_ring *pending;
168 struct rte_ring *running;
169 struct rte_ring *completed;
172 desc = rte_zmalloc_socket("dma_skelteon_desc",
173 nb_desc * sizeof(struct skeldma_desc),
174 RTE_CACHE_LINE_SIZE, hw->socket_id);
176 SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
180 empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
181 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
182 pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
183 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
184 running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
185 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
186 completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
187 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
188 if (empty == NULL || pending == NULL || running == NULL ||
190 SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
191 rte_ring_free(empty);
192 rte_ring_free(pending);
193 rte_ring_free(running);
194 rte_ring_free(completed);
199 /* The real usable ring size is *count-1* instead of *count* to
200 * differentiate a free ring from an empty ring.
201 * @see rte_ring_create
203 for (i = 0; i < nb_desc - 1; i++)
204 (void)rte_ring_enqueue(empty, (void *)(desc + i));
207 hw->desc_empty = empty;
208 hw->desc_pending = pending;
209 hw->desc_running = running;
210 hw->desc_completed = completed;
216 vchan_release(struct skeldma_hw *hw)
218 if (hw->desc_mem == NULL)
221 rte_free(hw->desc_mem);
223 rte_ring_free(hw->desc_empty);
224 hw->desc_empty = NULL;
225 rte_ring_free(hw->desc_pending);
226 hw->desc_pending = NULL;
227 rte_ring_free(hw->desc_running);
228 hw->desc_running = NULL;
229 rte_ring_free(hw->desc_completed);
230 hw->desc_completed = NULL;
234 skeldma_close(struct rte_dma_dev *dev)
236 /* The device already stopped */
237 vchan_release(dev->data->dev_private);
242 skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
243 const struct rte_dma_vchan_conf *conf,
246 struct skeldma_hw *hw = dev->data->dev_private;
249 RTE_SET_USED(conf_sz);
251 if (!rte_is_power_of_2(conf->nb_desc)) {
252 SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
257 return vchan_setup(hw, conf->nb_desc);
261 skeldma_vchan_status(const struct rte_dma_dev *dev,
262 uint16_t vchan, enum rte_dma_vchan_status *status)
264 struct skeldma_hw *hw = dev->data->dev_private;
268 *status = RTE_DMA_VCHAN_IDLE;
269 if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
270 || hw->zero_req_count == 0)
271 *status = RTE_DMA_VCHAN_ACTIVE;
276 skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
277 struct rte_dma_stats *stats, uint32_t stats_sz)
279 struct skeldma_hw *hw = dev->data->dev_private;
282 RTE_SET_USED(stats_sz);
284 stats->submitted = hw->submitted_count;
285 stats->completed = hw->completed_count;
292 skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
294 struct skeldma_hw *hw = dev->data->dev_private;
298 hw->submitted_count = 0;
299 hw->completed_count = 0;
305 skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
307 #define GET_RING_COUNT(ring) ((ring) ? (rte_ring_count(ring)) : 0)
309 struct skeldma_hw *hw = dev->data->dev_private;
314 " desc_empty_ring_count: %u\n"
315 " desc_pending_ring_count: %u\n"
316 " desc_running_ring_count: %u\n"
317 " desc_completed_ring_count: %u\n",
318 hw->lcore_id, hw->socket_id,
319 GET_RING_COUNT(hw->desc_empty),
320 GET_RING_COUNT(hw->desc_pending),
321 GET_RING_COUNT(hw->desc_running),
322 GET_RING_COUNT(hw->desc_completed));
324 " next_ring_idx: %u\n"
325 " submitted_count: %" PRIu64 "\n"
326 " completed_count: %" PRIu64 "\n",
327 hw->ridx, hw->submitted_count, hw->completed_count);
333 submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
335 uint16_t count = rte_ring_count(hw->desc_pending);
336 struct skeldma_desc *pend_desc = NULL;
339 (void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
340 (void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
345 (void)rte_ring_enqueue(hw->desc_running, (void *)desc);
349 skeldma_copy(void *dev_private, uint16_t vchan,
350 rte_iova_t src, rte_iova_t dst,
351 uint32_t length, uint64_t flags)
353 struct skeldma_hw *hw = dev_private;
354 struct skeldma_desc *desc;
360 ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
363 desc->src = (void *)(uintptr_t)src;
364 desc->dst = (void *)(uintptr_t)dst;
366 desc->ridx = hw->ridx;
367 if (flags & RTE_DMA_OP_FLAG_SUBMIT)
370 (void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
371 hw->submitted_count++;
377 skeldma_submit(void *dev_private, uint16_t vchan)
379 struct skeldma_hw *hw = dev_private;
386 skeldma_completed(void *dev_private,
387 uint16_t vchan, const uint16_t nb_cpls,
388 uint16_t *last_idx, bool *has_error)
390 struct skeldma_hw *hw = dev_private;
391 struct skeldma_desc *desc = NULL;
396 RTE_SET_USED(has_error);
398 count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
399 while (index < count) {
400 (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
401 if (index == count - 1)
402 *last_idx = desc->ridx;
404 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
411 skeldma_completed_status(void *dev_private,
412 uint16_t vchan, const uint16_t nb_cpls,
413 uint16_t *last_idx, enum rte_dma_status_code *status)
415 struct skeldma_hw *hw = dev_private;
416 struct skeldma_desc *desc = NULL;
422 count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
423 while (index < count) {
424 (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
425 if (index == count - 1)
426 *last_idx = desc->ridx;
427 status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
428 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
434 static const struct rte_dma_dev_ops skeldma_ops = {
435 .dev_info_get = skeldma_info_get,
436 .dev_configure = skeldma_configure,
437 .dev_start = skeldma_start,
438 .dev_stop = skeldma_stop,
439 .dev_close = skeldma_close,
441 .vchan_setup = skeldma_vchan_setup,
442 .vchan_status = skeldma_vchan_status,
444 .stats_get = skeldma_stats_get,
445 .stats_reset = skeldma_stats_reset,
447 .dev_dump = skeldma_dump,
451 skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
453 struct rte_dma_dev *dev;
454 struct skeldma_hw *hw;
457 socket_id = (lcore_id < 0) ? rte_socket_id() :
458 rte_lcore_to_socket_id(lcore_id);
459 dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
461 SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
465 dev->device = &vdev->device;
466 dev->dev_ops = &skeldma_ops;
467 dev->fp_obj->dev_private = dev->data->dev_private;
468 dev->fp_obj->copy = skeldma_copy;
469 dev->fp_obj->submit = skeldma_submit;
470 dev->fp_obj->completed = skeldma_completed;
471 dev->fp_obj->completed_status = skeldma_completed_status;
473 hw = dev->data->dev_private;
474 hw->lcore_id = lcore_id;
475 hw->socket_id = socket_id;
477 dev->state = RTE_DMA_DEV_READY;
479 return dev->data->dev_id;
483 skeldma_destroy(const char *name)
485 return rte_dma_pmd_release(name);
489 skeldma_parse_lcore(const char *key __rte_unused,
493 int lcore_id = atoi(value);
494 if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
495 *(int *)opaque = lcore_id;
500 skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
502 static const char *const args[] = {
507 struct rte_kvargs *kvlist;
510 params = rte_vdev_device_args(vdev);
511 if (params == NULL || params[0] == '\0')
514 kvlist = rte_kvargs_parse(params, args);
518 (void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
519 skeldma_parse_lcore, lcore_id);
520 SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
522 rte_kvargs_free(kvlist);
526 skeldma_probe(struct rte_vdev_device *vdev)
532 name = rte_vdev_device_name(vdev);
536 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
537 SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
541 /* More than one instance is not supported */
542 if (skeldma_count > 0) {
543 SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
548 skeldma_parse_vdev_args(vdev, &lcore_id);
550 ret = skeldma_create(name, vdev, lcore_id);
552 SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
557 return ret < 0 ? ret : 0;
561 skeldma_remove(struct rte_vdev_device *vdev)
566 name = rte_vdev_device_name(vdev);
570 ret = skeldma_destroy(name);
573 SKELDMA_LOG(INFO, "Remove %s dmadev", name);
579 static struct rte_vdev_driver skeldma_pmd_drv = {
580 .probe = skeldma_probe,
581 .remove = skeldma_remove,
582 .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
585 RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
586 RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
587 SKELDMA_ARG_LCORE "=<uint16> ");