1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2021 HiSilicon Limited
7 #include <rte_bus_vdev.h>
8 #include <rte_cycles.h>
10 #include <rte_kvargs.h>
11 #include <rte_lcore.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
16 #include <rte_dmadev_pmd.h>
18 #include "skeleton_dmadev.h"
20 RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
21 #define SKELDMA_LOG(level, fmt, args...) \
22 rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
25 /* Count of instances, currently only 1 is supported. */
26 static uint16_t skeldma_count;
29 skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
32 #define SKELDMA_MAX_DESC 8192
33 #define SKELDMA_MIN_DESC 32
36 RTE_SET_USED(info_sz);
38 dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
40 RTE_DMA_CAPA_OPS_COPY;
41 dev_info->max_vchans = 1;
42 dev_info->max_desc = SKELDMA_MAX_DESC;
43 dev_info->min_desc = SKELDMA_MIN_DESC;
49 skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
54 RTE_SET_USED(conf_sz);
59 cpucopy_thread(void *param)
61 #define SLEEP_THRESHOLD 10000
62 #define SLEEP_US_VAL 10
64 struct rte_dma_dev *dev = param;
65 struct skeldma_hw *hw = dev->data->dev_private;
66 struct skeldma_desc *desc = NULL;
69 while (!hw->exit_flag) {
70 ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
73 if (hw->zero_req_count == 0)
74 hw->zero_req_count = SLEEP_THRESHOLD;
75 if (hw->zero_req_count >= SLEEP_THRESHOLD)
76 rte_delay_us_sleep(SLEEP_US_VAL);
80 hw->zero_req_count = 0;
81 rte_memcpy(desc->dst, desc->src, desc->len);
82 hw->completed_count++;
83 (void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
90 fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
92 struct skeldma_desc *desc = NULL;
93 while (rte_ring_count(ring) > 0) {
94 (void)rte_ring_dequeue(ring, (void **)&desc);
95 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
100 skeldma_start(struct rte_dma_dev *dev)
102 struct skeldma_hw *hw = dev->data->dev_private;
106 if (hw->desc_mem == NULL) {
107 SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
111 /* Reset the dmadev to a known state, include:
112 * 1) fflush pending/running/completed ring to empty ring.
113 * 2) init ring idx to zero.
114 * 3) init running statistics.
115 * 4) mark cpucopy task exit_flag to false.
117 fflush_ring(hw, hw->desc_pending);
118 fflush_ring(hw, hw->desc_running);
119 fflush_ring(hw, hw->desc_completed);
121 hw->submitted_count = 0;
122 hw->zero_req_count = 0;
123 hw->completed_count = 0;
124 hw->exit_flag = false;
128 ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
129 cpucopy_thread, dev);
131 SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
135 if (hw->lcore_id != -1) {
136 cpuset = rte_lcore_cpuset(hw->lcore_id);
137 ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
141 "Set thread affinity lcore = %d fail!",
149 skeldma_stop(struct rte_dma_dev *dev)
151 struct skeldma_hw *hw = dev->data->dev_private;
153 hw->exit_flag = true;
156 pthread_cancel(hw->thread);
157 pthread_join(hw->thread, NULL);
163 vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
165 struct skeldma_desc *desc;
166 struct rte_ring *empty;
167 struct rte_ring *pending;
168 struct rte_ring *running;
169 struct rte_ring *completed;
172 desc = rte_zmalloc_socket("dma_skelteon_desc",
173 nb_desc * sizeof(struct skeldma_desc),
174 RTE_CACHE_LINE_SIZE, hw->socket_id);
176 SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
180 empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
181 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
182 pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
183 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
184 running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
185 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
186 completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
187 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
188 if (empty == NULL || pending == NULL || running == NULL ||
190 SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
191 rte_ring_free(empty);
192 rte_ring_free(pending);
193 rte_ring_free(running);
194 rte_ring_free(completed);
199 /* The real usable ring size is *count-1* instead of *count* to
200 * differentiate a free ring from an empty ring.
201 * @see rte_ring_create
203 for (i = 0; i < nb_desc - 1; i++)
204 (void)rte_ring_enqueue(empty, (void *)(desc + i));
207 hw->desc_empty = empty;
208 hw->desc_pending = pending;
209 hw->desc_running = running;
210 hw->desc_completed = completed;
216 vchan_release(struct skeldma_hw *hw)
218 if (hw->desc_mem == NULL)
221 rte_free(hw->desc_mem);
223 rte_ring_free(hw->desc_empty);
224 hw->desc_empty = NULL;
225 rte_ring_free(hw->desc_pending);
226 hw->desc_pending = NULL;
227 rte_ring_free(hw->desc_running);
228 hw->desc_running = NULL;
229 rte_ring_free(hw->desc_completed);
230 hw->desc_completed = NULL;
234 skeldma_close(struct rte_dma_dev *dev)
236 /* The device already stopped */
237 vchan_release(dev->data->dev_private);
242 skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
243 const struct rte_dma_vchan_conf *conf,
246 struct skeldma_hw *hw = dev->data->dev_private;
249 RTE_SET_USED(conf_sz);
251 if (!rte_is_power_of_2(conf->nb_desc)) {
252 SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
257 return vchan_setup(hw, conf->nb_desc);
261 skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
262 struct rte_dma_stats *stats, uint32_t stats_sz)
264 struct skeldma_hw *hw = dev->data->dev_private;
267 RTE_SET_USED(stats_sz);
269 stats->submitted = hw->submitted_count;
270 stats->completed = hw->completed_count;
277 skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
279 struct skeldma_hw *hw = dev->data->dev_private;
283 hw->submitted_count = 0;
284 hw->completed_count = 0;
290 skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
292 #define GET_RING_COUNT(ring) ((ring) ? (rte_ring_count(ring)) : 0)
294 struct skeldma_hw *hw = dev->data->dev_private;
299 " desc_empty_ring_count: %u\n"
300 " desc_pending_ring_count: %u\n"
301 " desc_running_ring_count: %u\n"
302 " desc_completed_ring_count: %u\n",
303 hw->lcore_id, hw->socket_id,
304 GET_RING_COUNT(hw->desc_empty),
305 GET_RING_COUNT(hw->desc_pending),
306 GET_RING_COUNT(hw->desc_running),
307 GET_RING_COUNT(hw->desc_completed));
309 " next_ring_idx: %u\n"
310 " submitted_count: %" PRIu64 "\n"
311 " completed_count: %" PRIu64 "\n",
312 hw->ridx, hw->submitted_count, hw->completed_count);
318 submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
320 uint16_t count = rte_ring_count(hw->desc_pending);
321 struct skeldma_desc *pend_desc = NULL;
324 (void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
325 (void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
330 (void)rte_ring_enqueue(hw->desc_running, (void *)desc);
334 skeldma_copy(void *dev_private, uint16_t vchan,
335 rte_iova_t src, rte_iova_t dst,
336 uint32_t length, uint64_t flags)
338 struct skeldma_hw *hw = dev_private;
339 struct skeldma_desc *desc;
345 ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
348 desc->src = (void *)(uintptr_t)src;
349 desc->dst = (void *)(uintptr_t)dst;
351 desc->ridx = hw->ridx;
352 if (flags & RTE_DMA_OP_FLAG_SUBMIT)
355 (void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
356 hw->submitted_count++;
362 skeldma_submit(void *dev_private, uint16_t vchan)
364 struct skeldma_hw *hw = dev_private;
371 skeldma_completed(void *dev_private,
372 uint16_t vchan, const uint16_t nb_cpls,
373 uint16_t *last_idx, bool *has_error)
375 struct skeldma_hw *hw = dev_private;
376 struct skeldma_desc *desc = NULL;
381 RTE_SET_USED(has_error);
383 count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
384 while (index < count) {
385 (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
386 if (index == count - 1)
387 *last_idx = desc->ridx;
389 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
396 skeldma_completed_status(void *dev_private,
397 uint16_t vchan, const uint16_t nb_cpls,
398 uint16_t *last_idx, enum rte_dma_status_code *status)
400 struct skeldma_hw *hw = dev_private;
401 struct skeldma_desc *desc = NULL;
407 count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
408 while (index < count) {
409 (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
410 if (index == count - 1)
411 *last_idx = desc->ridx;
412 status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
413 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
419 static const struct rte_dma_dev_ops skeldma_ops = {
420 .dev_info_get = skeldma_info_get,
421 .dev_configure = skeldma_configure,
422 .dev_start = skeldma_start,
423 .dev_stop = skeldma_stop,
424 .dev_close = skeldma_close,
426 .vchan_setup = skeldma_vchan_setup,
428 .stats_get = skeldma_stats_get,
429 .stats_reset = skeldma_stats_reset,
431 .dev_dump = skeldma_dump,
435 skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
437 struct rte_dma_dev *dev;
438 struct skeldma_hw *hw;
441 socket_id = (lcore_id < 0) ? rte_socket_id() :
442 rte_lcore_to_socket_id(lcore_id);
443 dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
445 SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
449 dev->device = &vdev->device;
450 dev->dev_ops = &skeldma_ops;
451 dev->fp_obj->dev_private = dev->data->dev_private;
452 dev->fp_obj->copy = skeldma_copy;
453 dev->fp_obj->submit = skeldma_submit;
454 dev->fp_obj->completed = skeldma_completed;
455 dev->fp_obj->completed_status = skeldma_completed_status;
457 hw = dev->data->dev_private;
458 hw->lcore_id = lcore_id;
459 hw->socket_id = socket_id;
461 dev->state = RTE_DMA_DEV_READY;
463 return dev->data->dev_id;
467 skeldma_destroy(const char *name)
469 return rte_dma_pmd_release(name);
473 skeldma_parse_lcore(const char *key __rte_unused,
477 int lcore_id = atoi(value);
478 if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
479 *(int *)opaque = lcore_id;
484 skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
486 static const char *const args[] = {
491 struct rte_kvargs *kvlist;
494 params = rte_vdev_device_args(vdev);
495 if (params == NULL || params[0] == '\0')
498 kvlist = rte_kvargs_parse(params, args);
502 (void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
503 skeldma_parse_lcore, lcore_id);
504 SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
506 rte_kvargs_free(kvlist);
510 skeldma_probe(struct rte_vdev_device *vdev)
516 name = rte_vdev_device_name(vdev);
520 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
521 SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
525 /* More than one instance is not supported */
526 if (skeldma_count > 0) {
527 SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
532 skeldma_parse_vdev_args(vdev, &lcore_id);
534 ret = skeldma_create(name, vdev, lcore_id);
536 SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
541 return ret < 0 ? ret : 0;
545 skeldma_remove(struct rte_vdev_device *vdev)
550 name = rte_vdev_device_name(vdev);
554 ret = skeldma_destroy(name);
557 SKELDMA_LOG(INFO, "Remove %s dmadev", name);
563 static struct rte_vdev_driver skeldma_pmd_drv = {
564 .probe = skeldma_probe,
565 .remove = skeldma_remove,
566 .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
569 RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
570 RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
571 SKELDMA_ARG_LCORE "=<uint16> ");