1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2021 NVIDIA Corporation & Affiliates
7 #include <rte_string_fns.h>
8 #include <rte_memzone.h>
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
12 #include <rte_eal_paging.h>
14 #include "rte_gpudev.h"
15 #include "gpudev_driver.h"
18 RTE_LOG_REGISTER_DEFAULT(gpu_logtype, NOTICE);
19 #define GPU_LOG(level, ...) \
20 rte_log(RTE_LOG_ ## level, gpu_logtype, RTE_FMT("gpu: " \
21 RTE_FMT_HEAD(__VA_ARGS__, ) "\n", RTE_FMT_TAIL(__VA_ARGS__, )))
23 /* Set any driver error as EPERM */
24 #define GPU_DRV_RET(function) \
25 ((function != 0) ? -(rte_errno = EPERM) : (rte_errno = 0))
27 /* Array of devices */
28 static struct rte_gpu *gpus;
29 /* Number of currently valid devices */
30 static int16_t gpu_max;
31 /* Number of currently valid devices */
32 static int16_t gpu_count;
34 /* Shared memory between processes. */
35 static const char *GPU_MEMZONE = "rte_gpu_shared";
37 __extension__ struct rte_gpu_mpshared gpus[0];
40 /* Event callback object */
41 struct rte_gpu_callback {
42 TAILQ_ENTRY(rte_gpu_callback) next;
43 rte_gpu_callback_t *function;
45 enum rte_gpu_event event;
47 static rte_rwlock_t gpu_callback_lock = RTE_RWLOCK_INITIALIZER;
48 static void gpu_free_callbacks(struct rte_gpu *dev);
51 rte_gpu_init(size_t dev_max)
53 if (dev_max == 0 || dev_max > INT16_MAX) {
54 GPU_LOG(ERR, "invalid array size");
59 /* No lock, it must be called before or during first probing. */
61 GPU_LOG(ERR, "already initialized");
66 gpus = calloc(dev_max, sizeof(struct rte_gpu));
68 GPU_LOG(ERR, "cannot initialize library");
78 rte_gpu_count_avail(void)
84 rte_gpu_is_valid(int16_t dev_id)
86 if (dev_id >= 0 && dev_id < gpu_max &&
87 gpus[dev_id].process_state == RTE_GPU_STATE_INITIALIZED)
93 gpu_match_parent(int16_t dev_id, int16_t parent)
95 if (parent == RTE_GPU_ID_ANY)
97 return gpus[dev_id].mpshared->info.parent == parent;
101 rte_gpu_find_next(int16_t dev_id, int16_t parent)
105 while (dev_id < gpu_max &&
106 (gpus[dev_id].process_state == RTE_GPU_STATE_UNUSED ||
107 !gpu_match_parent(dev_id, parent)))
110 if (dev_id >= gpu_max)
111 return RTE_GPU_ID_NONE;
116 gpu_find_free_id(void)
120 for (dev_id = 0; dev_id < gpu_max; dev_id++) {
121 if (gpus[dev_id].process_state == RTE_GPU_STATE_UNUSED)
124 return RTE_GPU_ID_NONE;
127 static struct rte_gpu *
128 gpu_get_by_id(int16_t dev_id)
130 if (!rte_gpu_is_valid(dev_id))
132 return &gpus[dev_id];
136 rte_gpu_get_by_name(const char *name)
146 RTE_GPU_FOREACH(dev_id) {
148 if (strncmp(name, dev->mpshared->name, RTE_DEV_NAME_MAX_LEN) == 0)
155 gpu_shared_mem_init(void)
157 const struct rte_memzone *memzone;
159 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
160 memzone = rte_memzone_reserve(GPU_MEMZONE,
161 sizeof(*gpu_shared_mem) +
162 sizeof(*gpu_shared_mem->gpus) * gpu_max,
165 memzone = rte_memzone_lookup(GPU_MEMZONE);
167 if (memzone == NULL) {
168 GPU_LOG(ERR, "cannot initialize shared memory");
173 gpu_shared_mem = memzone->addr;
178 rte_gpu_allocate(const char *name)
183 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
184 GPU_LOG(ERR, "only primary process can allocate device");
189 GPU_LOG(ERR, "allocate device without a name");
194 /* implicit initialization of library before adding first device */
195 if (gpus == NULL && rte_gpu_init(RTE_GPU_DEFAULT_MAX) < 0)
198 /* initialize shared memory before adding first device */
199 if (gpu_shared_mem == NULL && gpu_shared_mem_init() < 0)
202 if (rte_gpu_get_by_name(name) != NULL) {
203 GPU_LOG(ERR, "device with name %s already exists", name);
207 dev_id = gpu_find_free_id();
208 if (dev_id == RTE_GPU_ID_NONE) {
209 GPU_LOG(ERR, "reached maximum number of devices");
215 memset(dev, 0, sizeof(*dev));
217 dev->mpshared = &gpu_shared_mem->gpus[dev_id];
218 memset(dev->mpshared, 0, sizeof(*dev->mpshared));
220 if (rte_strscpy(dev->mpshared->name, name, RTE_DEV_NAME_MAX_LEN) < 0) {
221 GPU_LOG(ERR, "device name too long: %s", name);
222 rte_errno = ENAMETOOLONG;
225 dev->mpshared->info.name = dev->mpshared->name;
226 dev->mpshared->info.dev_id = dev_id;
227 dev->mpshared->info.numa_node = -1;
228 dev->mpshared->info.parent = RTE_GPU_ID_NONE;
229 TAILQ_INIT(&dev->callbacks);
230 __atomic_fetch_add(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
233 GPU_LOG(DEBUG, "new device %s (id %d) of total %d",
234 name, dev_id, gpu_count);
239 rte_gpu_attach(const char *name)
243 struct rte_gpu_mpshared *shared_dev;
245 if (rte_eal_process_type() != RTE_PROC_SECONDARY) {
246 GPU_LOG(ERR, "only secondary process can attach device");
251 GPU_LOG(ERR, "attach device without a name");
256 /* implicit initialization of library before adding first device */
257 if (gpus == NULL && rte_gpu_init(RTE_GPU_DEFAULT_MAX) < 0)
260 /* initialize shared memory before adding first device */
261 if (gpu_shared_mem == NULL && gpu_shared_mem_init() < 0)
264 for (dev_id = 0; dev_id < gpu_max; dev_id++) {
265 shared_dev = &gpu_shared_mem->gpus[dev_id];
266 if (strncmp(name, shared_dev->name, RTE_DEV_NAME_MAX_LEN) == 0)
269 if (dev_id >= gpu_max) {
270 GPU_LOG(ERR, "device with name %s not found", name);
275 memset(dev, 0, sizeof(*dev));
277 TAILQ_INIT(&dev->callbacks);
278 dev->mpshared = shared_dev;
279 __atomic_fetch_add(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
282 GPU_LOG(DEBUG, "attached device %s (id %d) of total %d",
283 name, dev_id, gpu_count);
288 rte_gpu_add_child(const char *name, int16_t parent, uint64_t child_context)
292 if (!rte_gpu_is_valid(parent)) {
293 GPU_LOG(ERR, "add child to invalid parent ID %d", parent);
298 dev = rte_gpu_allocate(name);
302 dev->mpshared->info.parent = parent;
303 dev->mpshared->info.context = child_context;
305 rte_gpu_complete_new(dev);
306 return dev->mpshared->info.dev_id;
310 rte_gpu_complete_new(struct rte_gpu *dev)
315 dev->process_state = RTE_GPU_STATE_INITIALIZED;
316 rte_gpu_notify(dev, RTE_GPU_EVENT_NEW);
320 rte_gpu_release(struct rte_gpu *dev)
322 int16_t dev_id, child;
328 dev_id = dev->mpshared->info.dev_id;
329 RTE_GPU_FOREACH_CHILD(child, dev_id) {
330 GPU_LOG(ERR, "cannot release device %d with child %d",
336 GPU_LOG(DEBUG, "free device %s (id %d)",
337 dev->mpshared->info.name, dev->mpshared->info.dev_id);
338 rte_gpu_notify(dev, RTE_GPU_EVENT_DEL);
340 gpu_free_callbacks(dev);
341 dev->process_state = RTE_GPU_STATE_UNUSED;
342 __atomic_fetch_sub(&dev->mpshared->process_refcnt, 1, __ATOMIC_RELAXED);
349 rte_gpu_close(int16_t dev_id)
351 int firsterr, binerr;
352 int *lasterr = &firsterr;
355 dev = gpu_get_by_id(dev_id);
357 GPU_LOG(ERR, "close invalid device ID %d", dev_id);
362 if (dev->ops.dev_close != NULL) {
363 *lasterr = GPU_DRV_RET(dev->ops.dev_close(dev));
368 *lasterr = rte_gpu_release(dev);
370 rte_errno = -firsterr;
375 rte_gpu_callback_register(int16_t dev_id, enum rte_gpu_event event,
376 rte_gpu_callback_t *function, void *user_data)
378 int16_t next_dev, last_dev;
379 struct rte_gpu_callback_list *callbacks;
380 struct rte_gpu_callback *callback;
382 if (!rte_gpu_is_valid(dev_id) && dev_id != RTE_GPU_ID_ANY) {
383 GPU_LOG(ERR, "register callback of invalid ID %d", dev_id);
387 if (function == NULL) {
388 GPU_LOG(ERR, "cannot register callback without function");
393 if (dev_id == RTE_GPU_ID_ANY) {
395 last_dev = gpu_max - 1;
397 next_dev = last_dev = dev_id;
400 rte_rwlock_write_lock(&gpu_callback_lock);
402 callbacks = &gpus[next_dev].callbacks;
404 /* check if not already registered */
405 TAILQ_FOREACH(callback, callbacks, next) {
406 if (callback->event == event &&
407 callback->function == function &&
408 callback->user_data == user_data) {
409 GPU_LOG(INFO, "callback already registered");
414 callback = malloc(sizeof(*callback));
415 if (callback == NULL) {
416 GPU_LOG(ERR, "cannot allocate callback");
419 callback->function = function;
420 callback->user_data = user_data;
421 callback->event = event;
422 TAILQ_INSERT_TAIL(callbacks, callback, next);
424 } while (++next_dev <= last_dev);
425 rte_rwlock_write_unlock(&gpu_callback_lock);
431 rte_gpu_callback_unregister(int16_t dev_id, enum rte_gpu_event event,
432 rte_gpu_callback_t *function, void *user_data)
434 int16_t next_dev, last_dev;
435 struct rte_gpu_callback_list *callbacks;
436 struct rte_gpu_callback *callback, *nextcb;
438 if (!rte_gpu_is_valid(dev_id) && dev_id != RTE_GPU_ID_ANY) {
439 GPU_LOG(ERR, "unregister callback of invalid ID %d", dev_id);
443 if (function == NULL) {
444 GPU_LOG(ERR, "cannot unregister callback without function");
449 if (dev_id == RTE_GPU_ID_ANY) {
451 last_dev = gpu_max - 1;
453 next_dev = last_dev = dev_id;
456 rte_rwlock_write_lock(&gpu_callback_lock);
458 callbacks = &gpus[next_dev].callbacks;
459 RTE_TAILQ_FOREACH_SAFE(callback, callbacks, next, nextcb) {
460 if (callback->event != event ||
461 callback->function != function ||
462 (callback->user_data != user_data &&
463 user_data != (void *)-1))
465 TAILQ_REMOVE(callbacks, callback, next);
468 } while (++next_dev <= last_dev);
469 rte_rwlock_write_unlock(&gpu_callback_lock);
475 gpu_free_callbacks(struct rte_gpu *dev)
477 struct rte_gpu_callback_list *callbacks;
478 struct rte_gpu_callback *callback, *nextcb;
480 callbacks = &dev->callbacks;
481 rte_rwlock_write_lock(&gpu_callback_lock);
482 RTE_TAILQ_FOREACH_SAFE(callback, callbacks, next, nextcb) {
483 TAILQ_REMOVE(callbacks, callback, next);
486 rte_rwlock_write_unlock(&gpu_callback_lock);
490 rte_gpu_notify(struct rte_gpu *dev, enum rte_gpu_event event)
493 struct rte_gpu_callback *callback;
495 dev_id = dev->mpshared->info.dev_id;
496 rte_rwlock_read_lock(&gpu_callback_lock);
497 TAILQ_FOREACH(callback, &dev->callbacks, next) {
498 if (callback->event != event || callback->function == NULL)
500 callback->function(dev_id, event, callback->user_data);
502 rte_rwlock_read_unlock(&gpu_callback_lock);
506 rte_gpu_info_get(int16_t dev_id, struct rte_gpu_info *info)
510 dev = gpu_get_by_id(dev_id);
512 GPU_LOG(ERR, "query invalid device ID %d", dev_id);
517 GPU_LOG(ERR, "query without storage");
522 if (dev->ops.dev_info_get == NULL) {
523 *info = dev->mpshared->info;
526 return GPU_DRV_RET(dev->ops.dev_info_get(dev, info));
530 rte_gpu_mem_alloc(int16_t dev_id, size_t size, unsigned int align)
536 dev = gpu_get_by_id(dev_id);
538 GPU_LOG(ERR, "alloc mem for invalid device ID %d", dev_id);
543 if (dev->ops.mem_alloc == NULL) {
544 GPU_LOG(ERR, "mem allocation not supported");
549 if (size == 0) /* dry-run */
552 if (align && !rte_is_power_of_2(align)) {
553 GPU_LOG(ERR, "requested alignment is not a power of two %u", align);
558 ret = dev->ops.mem_alloc(dev, size, align, &ptr);
574 rte_gpu_mem_free(int16_t dev_id, void *ptr)
578 dev = gpu_get_by_id(dev_id);
580 GPU_LOG(ERR, "free mem for invalid device ID %d", dev_id);
585 if (dev->ops.mem_free == NULL) {
590 if (ptr == NULL) /* dry-run */
593 return GPU_DRV_RET(dev->ops.mem_free(dev, ptr));
597 rte_gpu_mem_register(int16_t dev_id, size_t size, void *ptr)
601 dev = gpu_get_by_id(dev_id);
603 GPU_LOG(ERR, "alloc mem for invalid device ID %d", dev_id);
608 if (dev->ops.mem_register == NULL) {
609 GPU_LOG(ERR, "mem registration not supported");
614 if (ptr == NULL || size == 0) /* dry-run */
617 return GPU_DRV_RET(dev->ops.mem_register(dev, size, ptr));
621 rte_gpu_mem_unregister(int16_t dev_id, void *ptr)
625 dev = gpu_get_by_id(dev_id);
627 GPU_LOG(ERR, "unregister mem for invalid device ID %d", dev_id);
632 if (dev->ops.mem_unregister == NULL) {
637 if (ptr == NULL) /* dry-run */
640 return GPU_DRV_RET(dev->ops.mem_unregister(dev, ptr));
644 rte_gpu_mem_cpu_map(int16_t dev_id, size_t size, void *ptr)
650 dev = gpu_get_by_id(dev_id);
652 GPU_LOG(ERR, "mem CPU map for invalid device ID %d", dev_id);
657 if (dev->ops.mem_cpu_map == NULL) {
658 GPU_LOG(ERR, "mem CPU map not supported");
663 if (ptr == NULL || size == 0) /* dry-run */
666 ret = GPU_DRV_RET(dev->ops.mem_cpu_map(dev, size, ptr, &ptr_out));
682 rte_gpu_mem_cpu_unmap(int16_t dev_id, void *ptr)
686 dev = gpu_get_by_id(dev_id);
688 GPU_LOG(ERR, "cpu_unmap mem for invalid device ID %d", dev_id);
693 if (dev->ops.mem_cpu_unmap == NULL) {
698 if (ptr == NULL) /* dry-run */
701 return GPU_DRV_RET(dev->ops.mem_cpu_unmap(dev, ptr));
705 rte_gpu_wmb(int16_t dev_id)
709 dev = gpu_get_by_id(dev_id);
711 GPU_LOG(ERR, "memory barrier for invalid device ID %d", dev_id);
716 if (dev->ops.wmb == NULL) {
720 return GPU_DRV_RET(dev->ops.wmb(dev));
724 rte_gpu_comm_create_flag(uint16_t dev_id, struct rte_gpu_comm_flag *devflag,
725 enum rte_gpu_comm_flag_type mtype)
730 if (devflag == NULL) {
734 if (mtype != RTE_GPU_COMM_FLAG_CPU) {
739 flag_size = sizeof(uint32_t);
741 devflag->ptr = rte_zmalloc(NULL, flag_size, 0);
742 if (devflag->ptr == NULL) {
747 ret = rte_gpu_mem_register(dev_id, flag_size, devflag->ptr);
753 devflag->mtype = mtype;
754 devflag->dev_id = dev_id;
760 rte_gpu_comm_destroy_flag(struct rte_gpu_comm_flag *devflag)
764 if (devflag == NULL) {
769 ret = rte_gpu_mem_unregister(devflag->dev_id, devflag->ptr);
775 rte_free(devflag->ptr);
781 rte_gpu_comm_set_flag(struct rte_gpu_comm_flag *devflag, uint32_t val)
783 if (devflag == NULL) {
788 if (devflag->mtype != RTE_GPU_COMM_FLAG_CPU) {
793 RTE_GPU_VOLATILE(*devflag->ptr) = val;
799 rte_gpu_comm_get_flag_value(struct rte_gpu_comm_flag *devflag, uint32_t *val)
801 if (devflag == NULL) {
805 if (devflag->mtype != RTE_GPU_COMM_FLAG_CPU) {
810 *val = RTE_GPU_VOLATILE(*devflag->ptr);
815 struct rte_gpu_comm_list *
816 rte_gpu_comm_create_list(uint16_t dev_id,
817 uint32_t num_comm_items)
819 struct rte_gpu_comm_list *comm_list;
823 struct rte_gpu_info info;
825 if (num_comm_items == 0) {
830 dev = gpu_get_by_id(dev_id);
832 GPU_LOG(ERR, "memory barrier for invalid device ID %d", dev_id);
837 ret = rte_gpu_info_get(dev_id, &info);
843 comm_list = rte_zmalloc(NULL,
844 sizeof(struct rte_gpu_comm_list) * num_comm_items, 0);
845 if (comm_list == NULL) {
850 ret = rte_gpu_mem_register(dev_id,
851 sizeof(struct rte_gpu_comm_list) * num_comm_items, comm_list);
858 * Use GPU memory CPU map feature if enabled in the driver
859 * to allocate the status flags of the list.
860 * Allocating this flag in GPU memory will reduce
861 * the latency when GPU workload is polling this flag.
863 comm_list[0].status_d = rte_gpu_mem_alloc(dev_id,
864 sizeof(enum rte_gpu_comm_list_status) * num_comm_items,
871 comm_list[0].status_h = rte_gpu_mem_cpu_map(dev_id,
872 sizeof(enum rte_gpu_comm_list_status) * num_comm_items,
873 comm_list[0].status_d);
874 if (comm_list[0].status_h == NULL) {
876 * If CPU mapping is not supported by driver
877 * use regular CPU registered memory.
879 comm_list[0].status_h = rte_zmalloc(NULL,
880 sizeof(enum rte_gpu_comm_list_status) * num_comm_items, 0);
881 if (comm_list[0].status_h == NULL) {
886 ret = rte_gpu_mem_register(dev_id,
887 sizeof(enum rte_gpu_comm_list_status) * num_comm_items,
888 comm_list[0].status_h);
894 comm_list[0].status_d = comm_list[0].status_h;
897 for (idx_l = 0; idx_l < num_comm_items; idx_l++) {
898 comm_list[idx_l].pkt_list = rte_zmalloc(NULL,
899 sizeof(struct rte_gpu_comm_pkt) * RTE_GPU_COMM_LIST_PKTS_MAX, 0);
900 if (comm_list[idx_l].pkt_list == NULL) {
905 ret = rte_gpu_mem_register(dev_id,
906 sizeof(struct rte_gpu_comm_pkt) * RTE_GPU_COMM_LIST_PKTS_MAX,
907 comm_list[idx_l].pkt_list);
913 comm_list[idx_l].num_pkts = 0;
914 comm_list[idx_l].dev_id = dev_id;
916 comm_list[idx_l].mbufs = rte_zmalloc(NULL,
917 sizeof(struct rte_mbuf *) * RTE_GPU_COMM_LIST_PKTS_MAX, 0);
918 if (comm_list[idx_l].mbufs == NULL) {
924 comm_list[idx_l].status_h = &(comm_list[0].status_h[idx_l]);
925 comm_list[idx_l].status_d = &(comm_list[0].status_d[idx_l]);
927 ret = rte_gpu_comm_set_status(&comm_list[idx_l], RTE_GPU_COMM_LIST_FREE);
939 rte_gpu_comm_destroy_list(struct rte_gpu_comm_list *comm_list,
940 uint32_t num_comm_items)
946 if (comm_list == NULL) {
951 dev_id = comm_list[0].dev_id;
953 for (idx_l = 0; idx_l < num_comm_items; idx_l++) {
954 ret = rte_gpu_mem_unregister(dev_id, comm_list[idx_l].pkt_list);
960 rte_free(comm_list[idx_l].pkt_list);
961 rte_free(comm_list[idx_l].mbufs);
964 ret = rte_gpu_mem_unregister(dev_id, comm_list);
970 ret = rte_gpu_mem_cpu_unmap(dev_id, comm_list[0].status_d);
972 rte_gpu_mem_free(dev_id, comm_list[0].status_d);
974 rte_gpu_mem_unregister(dev_id, comm_list[0].status_h);
975 rte_free(comm_list[0].status_h);
984 rte_gpu_comm_populate_list_pkts(struct rte_gpu_comm_list *comm_list_item,
985 struct rte_mbuf **mbufs, uint32_t num_mbufs)
990 if (comm_list_item == NULL || comm_list_item->pkt_list == NULL ||
991 mbufs == NULL || num_mbufs > RTE_GPU_COMM_LIST_PKTS_MAX) {
996 for (idx = 0; idx < num_mbufs; idx++) {
997 /* support only unchained mbufs */
998 if (unlikely((mbufs[idx]->nb_segs > 1) ||
999 (mbufs[idx]->next != NULL) ||
1000 (mbufs[idx]->data_len != mbufs[idx]->pkt_len))) {
1001 rte_errno = ENOTSUP;
1004 comm_list_item->pkt_list[idx].addr =
1005 rte_pktmbuf_mtod_offset(mbufs[idx], uintptr_t, 0);
1006 comm_list_item->pkt_list[idx].size = mbufs[idx]->pkt_len;
1007 comm_list_item->mbufs[idx] = mbufs[idx];
1010 RTE_GPU_VOLATILE(comm_list_item->num_pkts) = num_mbufs;
1011 rte_gpu_wmb(comm_list_item->dev_id);
1012 ret = rte_gpu_comm_set_status(comm_list_item, RTE_GPU_COMM_LIST_READY);
1022 rte_gpu_comm_set_status(struct rte_gpu_comm_list *comm_list_item,
1023 enum rte_gpu_comm_list_status status)
1025 if (comm_list_item == NULL) {
1030 RTE_GPU_VOLATILE(comm_list_item->status_h[0]) = status;
1036 rte_gpu_comm_get_status(struct rte_gpu_comm_list *comm_list_item,
1037 enum rte_gpu_comm_list_status *status)
1039 if (comm_list_item == NULL || status == NULL) {
1044 *status = RTE_GPU_VOLATILE(comm_list_item->status_h[0]);
1050 rte_gpu_comm_cleanup_list(struct rte_gpu_comm_list *comm_list_item)
1053 enum rte_gpu_comm_list_status status;
1056 if (comm_list_item == NULL) {
1061 ret = rte_gpu_comm_get_status(comm_list_item, &status);
1067 if (status == RTE_GPU_COMM_LIST_READY) {
1068 GPU_LOG(ERR, "packet list is still in progress");
1073 for (idx = 0; idx < RTE_GPU_COMM_LIST_PKTS_MAX; idx++) {
1074 if (comm_list_item->pkt_list[idx].addr == 0)
1077 comm_list_item->pkt_list[idx].addr = 0;
1078 comm_list_item->pkt_list[idx].size = 0;
1079 comm_list_item->mbufs[idx] = NULL;
1082 ret = rte_gpu_comm_set_status(comm_list_item, RTE_GPU_COMM_LIST_FREE);
1087 RTE_GPU_VOLATILE(comm_list_item->num_pkts) = 0;