dmadev: add channel status check for testing use
[dpdk.git] / drivers / dma / skeleton / skeleton_dmadev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  */
4
5 #include <inttypes.h>
6
7 #include <rte_bus_vdev.h>
8 #include <rte_cycles.h>
9 #include <rte_eal.h>
10 #include <rte_kvargs.h>
11 #include <rte_lcore.h>
12 #include <rte_log.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15
16 #include <rte_dmadev_pmd.h>
17
18 #include "skeleton_dmadev.h"
19
20 RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
21 #define SKELDMA_LOG(level, fmt, args...) \
22         rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
23                 __func__, ##args)
24
25 /* Count of instances, currently only 1 is supported. */
26 static uint16_t skeldma_count;
27
28 static int
29 skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
30                  uint32_t info_sz)
31 {
32 #define SKELDMA_MAX_DESC        8192
33 #define SKELDMA_MIN_DESC        32
34
35         RTE_SET_USED(dev);
36         RTE_SET_USED(info_sz);
37
38         dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
39                              RTE_DMA_CAPA_SVA |
40                              RTE_DMA_CAPA_OPS_COPY;
41         dev_info->max_vchans = 1;
42         dev_info->max_desc = SKELDMA_MAX_DESC;
43         dev_info->min_desc = SKELDMA_MIN_DESC;
44
45         return 0;
46 }
47
48 static int
49 skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
50                   uint32_t conf_sz)
51 {
52         RTE_SET_USED(dev);
53         RTE_SET_USED(conf);
54         RTE_SET_USED(conf_sz);
55         return 0;
56 }
57
58 static void *
59 cpucopy_thread(void *param)
60 {
61 #define SLEEP_THRESHOLD         10000
62 #define SLEEP_US_VAL            10
63
64         struct rte_dma_dev *dev = param;
65         struct skeldma_hw *hw = dev->data->dev_private;
66         struct skeldma_desc *desc = NULL;
67         int ret;
68
69         while (!hw->exit_flag) {
70                 ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
71                 if (ret) {
72                         hw->zero_req_count++;
73                         if (hw->zero_req_count == 0)
74                                 hw->zero_req_count = SLEEP_THRESHOLD;
75                         if (hw->zero_req_count >= SLEEP_THRESHOLD)
76                                 rte_delay_us_sleep(SLEEP_US_VAL);
77                         continue;
78                 }
79
80                 hw->zero_req_count = 0;
81                 rte_memcpy(desc->dst, desc->src, desc->len);
82                 __atomic_add_fetch(&hw->completed_count, 1, __ATOMIC_RELEASE);
83                 (void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
84         }
85
86         return NULL;
87 }
88
89 static void
90 fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
91 {
92         struct skeldma_desc *desc = NULL;
93         while (rte_ring_count(ring) > 0) {
94                 (void)rte_ring_dequeue(ring, (void **)&desc);
95                 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
96         }
97 }
98
99 static int
100 skeldma_start(struct rte_dma_dev *dev)
101 {
102         struct skeldma_hw *hw = dev->data->dev_private;
103         rte_cpuset_t cpuset;
104         int ret;
105
106         if (hw->desc_mem == NULL) {
107                 SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
108                 return -EINVAL;
109         }
110
111         /* Reset the dmadev to a known state, include:
112          * 1) fflush pending/running/completed ring to empty ring.
113          * 2) init ring idx to zero.
114          * 3) init running statistics.
115          * 4) mark cpucopy task exit_flag to false.
116          */
117         fflush_ring(hw, hw->desc_pending);
118         fflush_ring(hw, hw->desc_running);
119         fflush_ring(hw, hw->desc_completed);
120         hw->ridx = 0;
121         hw->submitted_count = 0;
122         hw->zero_req_count = 0;
123         hw->completed_count = 0;
124         hw->exit_flag = false;
125
126         rte_mb();
127
128         ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
129                                      cpucopy_thread, dev);
130         if (ret) {
131                 SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
132                 return -EINVAL;
133         }
134
135         if (hw->lcore_id != -1) {
136                 cpuset = rte_lcore_cpuset(hw->lcore_id);
137                 ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
138                                              &cpuset);
139                 if (ret)
140                         SKELDMA_LOG(WARNING,
141                                 "Set thread affinity lcore = %d fail!",
142                                 hw->lcore_id);
143         }
144
145         return 0;
146 }
147
148 static int
149 skeldma_stop(struct rte_dma_dev *dev)
150 {
151         struct skeldma_hw *hw = dev->data->dev_private;
152
153         hw->exit_flag = true;
154         rte_delay_ms(1);
155
156         pthread_cancel(hw->thread);
157         pthread_join(hw->thread, NULL);
158
159         return 0;
160 }
161
162 static int
163 vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
164 {
165         struct skeldma_desc *desc;
166         struct rte_ring *empty;
167         struct rte_ring *pending;
168         struct rte_ring *running;
169         struct rte_ring *completed;
170         uint16_t i;
171
172         desc = rte_zmalloc_socket("dma_skelteon_desc",
173                                   nb_desc * sizeof(struct skeldma_desc),
174                                   RTE_CACHE_LINE_SIZE, hw->socket_id);
175         if (desc == NULL) {
176                 SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
177                 return -ENOMEM;
178         }
179
180         empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
181                                 hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
182         pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
183                                   hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
184         running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
185                                   hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
186         completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
187                                   hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
188         if (empty == NULL || pending == NULL || running == NULL ||
189             completed == NULL) {
190                 SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
191                 rte_ring_free(empty);
192                 rte_ring_free(pending);
193                 rte_ring_free(running);
194                 rte_ring_free(completed);
195                 rte_free(desc);
196                 return -ENOMEM;
197         }
198
199         /* The real usable ring size is *count-1* instead of *count* to
200          * differentiate a free ring from an empty ring.
201          * @see rte_ring_create
202          */
203         for (i = 0; i < nb_desc - 1; i++)
204                 (void)rte_ring_enqueue(empty, (void *)(desc + i));
205
206         hw->desc_mem = desc;
207         hw->desc_empty = empty;
208         hw->desc_pending = pending;
209         hw->desc_running = running;
210         hw->desc_completed = completed;
211
212         return 0;
213 }
214
215 static void
216 vchan_release(struct skeldma_hw *hw)
217 {
218         if (hw->desc_mem == NULL)
219                 return;
220
221         rte_free(hw->desc_mem);
222         hw->desc_mem = NULL;
223         rte_ring_free(hw->desc_empty);
224         hw->desc_empty = NULL;
225         rte_ring_free(hw->desc_pending);
226         hw->desc_pending = NULL;
227         rte_ring_free(hw->desc_running);
228         hw->desc_running = NULL;
229         rte_ring_free(hw->desc_completed);
230         hw->desc_completed = NULL;
231 }
232
233 static int
234 skeldma_close(struct rte_dma_dev *dev)
235 {
236         /* The device already stopped */
237         vchan_release(dev->data->dev_private);
238         return 0;
239 }
240
241 static int
242 skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
243                     const struct rte_dma_vchan_conf *conf,
244                     uint32_t conf_sz)
245 {
246         struct skeldma_hw *hw = dev->data->dev_private;
247
248         RTE_SET_USED(vchan);
249         RTE_SET_USED(conf_sz);
250
251         if (!rte_is_power_of_2(conf->nb_desc)) {
252                 SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
253                 return -EINVAL;
254         }
255
256         vchan_release(hw);
257         return vchan_setup(hw, conf->nb_desc);
258 }
259
260 static int
261 skeldma_vchan_status(const struct rte_dma_dev *dev,
262                 uint16_t vchan, enum rte_dma_vchan_status *status)
263 {
264         struct skeldma_hw *hw = dev->data->dev_private;
265
266         RTE_SET_USED(vchan);
267
268         *status = RTE_DMA_VCHAN_IDLE;
269         if (hw->submitted_count != __atomic_load_n(&hw->completed_count, __ATOMIC_ACQUIRE)
270                         || hw->zero_req_count == 0)
271                 *status = RTE_DMA_VCHAN_ACTIVE;
272         return 0;
273 }
274
275 static int
276 skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
277                   struct rte_dma_stats *stats, uint32_t stats_sz)
278 {
279         struct skeldma_hw *hw = dev->data->dev_private;
280
281         RTE_SET_USED(vchan);
282         RTE_SET_USED(stats_sz);
283
284         stats->submitted = hw->submitted_count;
285         stats->completed = hw->completed_count;
286         stats->errors = 0;
287
288         return 0;
289 }
290
291 static int
292 skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
293 {
294         struct skeldma_hw *hw = dev->data->dev_private;
295
296         RTE_SET_USED(vchan);
297
298         hw->submitted_count = 0;
299         hw->completed_count = 0;
300
301         return 0;
302 }
303
304 static int
305 skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
306 {
307 #define GET_RING_COUNT(ring)    ((ring) ? (rte_ring_count(ring)) : 0)
308
309         struct skeldma_hw *hw = dev->data->dev_private;
310
311         (void)fprintf(f,
312                 "    lcore_id: %d\n"
313                 "    socket_id: %d\n"
314                 "    desc_empty_ring_count: %u\n"
315                 "    desc_pending_ring_count: %u\n"
316                 "    desc_running_ring_count: %u\n"
317                 "    desc_completed_ring_count: %u\n",
318                 hw->lcore_id, hw->socket_id,
319                 GET_RING_COUNT(hw->desc_empty),
320                 GET_RING_COUNT(hw->desc_pending),
321                 GET_RING_COUNT(hw->desc_running),
322                 GET_RING_COUNT(hw->desc_completed));
323         (void)fprintf(f,
324                 "    next_ring_idx: %u\n"
325                 "    submitted_count: %" PRIu64 "\n"
326                 "    completed_count: %" PRIu64 "\n",
327                 hw->ridx, hw->submitted_count, hw->completed_count);
328
329         return 0;
330 }
331
332 static inline void
333 submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
334 {
335         uint16_t count = rte_ring_count(hw->desc_pending);
336         struct skeldma_desc *pend_desc = NULL;
337
338         while (count > 0) {
339                 (void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
340                 (void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
341                 count--;
342         }
343
344         if (desc)
345                 (void)rte_ring_enqueue(hw->desc_running, (void *)desc);
346 }
347
348 static int
349 skeldma_copy(void *dev_private, uint16_t vchan,
350              rte_iova_t src, rte_iova_t dst,
351              uint32_t length, uint64_t flags)
352 {
353         struct skeldma_hw *hw = dev_private;
354         struct skeldma_desc *desc;
355         int ret;
356
357         RTE_SET_USED(vchan);
358         RTE_SET_USED(flags);
359
360         ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
361         if (ret)
362                 return -ENOSPC;
363         desc->src = (void *)(uintptr_t)src;
364         desc->dst = (void *)(uintptr_t)dst;
365         desc->len = length;
366         desc->ridx = hw->ridx;
367         if (flags & RTE_DMA_OP_FLAG_SUBMIT)
368                 submit(hw, desc);
369         else
370                 (void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
371         hw->submitted_count++;
372
373         return hw->ridx++;
374 }
375
376 static int
377 skeldma_submit(void *dev_private, uint16_t vchan)
378 {
379         struct skeldma_hw *hw = dev_private;
380         RTE_SET_USED(vchan);
381         submit(hw, NULL);
382         return 0;
383 }
384
385 static uint16_t
386 skeldma_completed(void *dev_private,
387                   uint16_t vchan, const uint16_t nb_cpls,
388                   uint16_t *last_idx, bool *has_error)
389 {
390         struct skeldma_hw *hw = dev_private;
391         struct skeldma_desc *desc = NULL;
392         uint16_t index = 0;
393         uint16_t count;
394
395         RTE_SET_USED(vchan);
396         RTE_SET_USED(has_error);
397
398         count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
399         while (index < count) {
400                 (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
401                 if (index == count - 1)
402                         *last_idx = desc->ridx;
403                 index++;
404                 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
405         }
406
407         return count;
408 }
409
410 static uint16_t
411 skeldma_completed_status(void *dev_private,
412                          uint16_t vchan, const uint16_t nb_cpls,
413                          uint16_t *last_idx, enum rte_dma_status_code *status)
414 {
415         struct skeldma_hw *hw = dev_private;
416         struct skeldma_desc *desc = NULL;
417         uint16_t index = 0;
418         uint16_t count;
419
420         RTE_SET_USED(vchan);
421
422         count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
423         while (index < count) {
424                 (void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
425                 if (index == count - 1)
426                         *last_idx = desc->ridx;
427                 status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
428                 (void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
429         }
430
431         return count;
432 }
433
434 static const struct rte_dma_dev_ops skeldma_ops = {
435         .dev_info_get     = skeldma_info_get,
436         .dev_configure    = skeldma_configure,
437         .dev_start        = skeldma_start,
438         .dev_stop         = skeldma_stop,
439         .dev_close        = skeldma_close,
440
441         .vchan_setup      = skeldma_vchan_setup,
442         .vchan_status     = skeldma_vchan_status,
443
444         .stats_get        = skeldma_stats_get,
445         .stats_reset      = skeldma_stats_reset,
446
447         .dev_dump         = skeldma_dump,
448 };
449
450 static int
451 skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
452 {
453         struct rte_dma_dev *dev;
454         struct skeldma_hw *hw;
455         int socket_id;
456
457         socket_id = (lcore_id < 0) ? rte_socket_id() :
458                                      rte_lcore_to_socket_id(lcore_id);
459         dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
460         if (dev == NULL) {
461                 SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
462                 return -EINVAL;
463         }
464
465         dev->device = &vdev->device;
466         dev->dev_ops = &skeldma_ops;
467         dev->fp_obj->dev_private = dev->data->dev_private;
468         dev->fp_obj->copy = skeldma_copy;
469         dev->fp_obj->submit = skeldma_submit;
470         dev->fp_obj->completed = skeldma_completed;
471         dev->fp_obj->completed_status = skeldma_completed_status;
472
473         hw = dev->data->dev_private;
474         hw->lcore_id = lcore_id;
475         hw->socket_id = socket_id;
476
477         dev->state = RTE_DMA_DEV_READY;
478
479         return dev->data->dev_id;
480 }
481
482 static int
483 skeldma_destroy(const char *name)
484 {
485         return rte_dma_pmd_release(name);
486 }
487
488 static int
489 skeldma_parse_lcore(const char *key __rte_unused,
490                     const char *value,
491                     void *opaque)
492 {
493         int lcore_id = atoi(value);
494         if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
495                 *(int *)opaque = lcore_id;
496         return 0;
497 }
498
499 static void
500 skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
501 {
502         static const char *const args[] = {
503                 SKELDMA_ARG_LCORE,
504                 NULL
505         };
506
507         struct rte_kvargs *kvlist;
508         const char *params;
509
510         params = rte_vdev_device_args(vdev);
511         if (params == NULL || params[0] == '\0')
512                 return;
513
514         kvlist = rte_kvargs_parse(params, args);
515         if (!kvlist)
516                 return;
517
518         (void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
519                                  skeldma_parse_lcore, lcore_id);
520         SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
521
522         rte_kvargs_free(kvlist);
523 }
524
525 static int
526 skeldma_probe(struct rte_vdev_device *vdev)
527 {
528         const char *name;
529         int lcore_id = -1;
530         int ret;
531
532         name = rte_vdev_device_name(vdev);
533         if (name == NULL)
534                 return -EINVAL;
535
536         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
537                 SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
538                 return -EINVAL;
539         }
540
541         /* More than one instance is not supported */
542         if (skeldma_count > 0) {
543                 SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
544                         name);
545                 return -EINVAL;
546         }
547
548         skeldma_parse_vdev_args(vdev, &lcore_id);
549
550         ret = skeldma_create(name, vdev, lcore_id);
551         if (ret >= 0) {
552                 SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
553                         name, lcore_id);
554                 skeldma_count = 1;
555         }
556
557         return ret < 0 ? ret : 0;
558 }
559
560 static int
561 skeldma_remove(struct rte_vdev_device *vdev)
562 {
563         const char *name;
564         int ret;
565
566         name = rte_vdev_device_name(vdev);
567         if (name == NULL)
568                 return -1;
569
570         ret = skeldma_destroy(name);
571         if (!ret) {
572                 skeldma_count = 0;
573                 SKELDMA_LOG(INFO, "Remove %s dmadev", name);
574         }
575
576         return ret;
577 }
578
579 static struct rte_vdev_driver skeldma_pmd_drv = {
580         .probe = skeldma_probe,
581         .remove = skeldma_remove,
582         .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
583 };
584
585 RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
586 RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
587                 SKELDMA_ARG_LCORE "=<uint16> ");