If headers are not found, the CUDA GPU driver library is not built.
+CPU map GPU memory
+~~~~~~~~~~~~~~~~~~
+
+To enable this gpudev feature (i.e. implement the ``rte_gpu_mem_cpu_map``),
+you need the `GDRCopy <https://github.com/NVIDIA/gdrcopy>`_ library and driver
+installed on your system.
+
+A quick recipe to download, build and run GDRCopy library and driver:
+
+.. code-block:: console
+
+ $ git clone https://github.com/NVIDIA/gdrcopy.git
+ $ make
+ $ # make install to install GDRCopy library system wide
+ $ # Launch gdrdrv kernel module on the system
+ $ sudo ./insmod.sh
+
+You need to indicate to meson where GDRCopy headers files are as in case of CUDA headers.
+An example would be:
+
+.. code-block:: console
+
+ $ meson build -Dc_args="-I/usr/local/cuda/include -I/path/to/gdrcopy/include"
+
+If headers are not found, the CUDA GPU driver library is built without the CPU map capability
+and will return error if the application invokes the gpudev ``rte_gpu_mem_cpu_map`` function.
+
+
CUDA Shared Library
-------------------
For this reason, to build the CUDA driver library,
no need to install the CUDA library.
+CPU map GPU memory
+~~~~~~~~~~~~~~~~~~
+
+Similarly to CUDA shared library, if the **libgdrapi.so** shared library
+is not installed in default locations (e.g. /usr/local/lib),
+you can use the variable ``GDRCOPY_PATH_L``.
+
+As an example, to enable the CPU map feature sanity check,
+run the ``app/test-gpudev`` application with:
+
+.. code-block:: console
+
+ $ sudo CUDA_PATH_L=/path/to/libcuda GDRCOPY_PATH_L=/path/to/libgdrapi ./build/app/dpdk-test-gpudev
+
+Additionally, the ``gdrdrv`` kernel module built with the GDRCopy project
+has to be loaded on the system:
+
+.. code-block:: console
+
+ $ lsmod | egrep gdrdrv
+ gdrdrv 20480 0
+ nvidia 35307520 19 nvidia_uvm,nv_peer_mem,gdrdrv,nvidia_modeset
+
+
Design
------
Share CPU memory with device = Y
Allocate device memory = Y
Free memory = Y
+CPU map device memory = Y
+CPU unmap device memory = Y
Share CPU memory with device =
Allocate device memory =
Free memory =
+CPU map device memory =
+CPU unmap device memory =
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#ifndef CUDA_COMMON_H
+#define CUDA_COMMON_H
+
+#include <dlfcn.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+
+extern int cuda_logtype;
+
+/* Helper macro for logging */
+#define rte_cuda_log(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, cuda_logtype, fmt "\n", ##__VA_ARGS__)
+
+#define rte_cuda_debug(fmt, ...) \
+ rte_cuda_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+ ##__VA_ARGS__)
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ #include <gdrapi.h>
+#else
+ struct gdr;
+ typedef struct gdr *gdr_t;
+ struct gdr_mh_s { unsigned long h; };
+ typedef struct gdr_mh_s gdr_mh_t;
+#endif
+
+int gdrcopy_pin(gdr_t *gdrc_h, __rte_unused gdr_mh_t *mh,
+ uint64_t d_addr, size_t size, void **h_addr);
+int gdrcopy_unpin(gdr_t gdrc_h, __rte_unused gdr_mh_t mh,
+ void *d_addr, size_t size);
+
+#endif /* CUDA_COMMON_H */
#include <dlfcn.h>
-#include <rte_common.h>
-#include <rte_log.h>
#include <rte_malloc.h>
-#include <rte_errno.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_byteorder.h>
#include <rte_dev.h>
#include <gpudev_driver.h>
+
#include <cuda.h>
#include <cudaTypedefs.h>
+#include "common.h"
+
#define CUDA_DRIVER_MIN_VERSION 11040
#define CUDA_API_MIN_VERSION 3020
static void *cudalib;
static unsigned int cuda_api_version;
static int cuda_driver_version;
+static gdr_t gdrc_h;
/* NVIDIA GPU vendor */
#define NVIDIA_GPU_VENDOR_ID (0x10de)
#define GPU_PAGE_SHIFT 16
#define GPU_PAGE_SIZE (1UL << GPU_PAGE_SHIFT)
-static RTE_LOG_REGISTER_DEFAULT(cuda_logtype, NOTICE);
-
-/* Helper macro for logging */
-#define rte_cuda_log(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, cuda_logtype, fmt "\n", ##__VA_ARGS__)
-
-#define rte_cuda_debug(fmt, ...) \
- rte_cuda_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
- ##__VA_ARGS__)
+RTE_LOG_REGISTER_DEFAULT(cuda_logtype, NOTICE);
/* NVIDIA GPU address map */
static const struct rte_pci_id pci_id_cuda_map[] = {
CUcontext ctx;
cuda_ptr_key pkey;
enum mem_type mtype;
+ gdr_mh_t mh;
struct mem_entry *prev;
struct mem_entry *next;
};
return 0;
}
+static int
+cuda_mem_cpu_map(struct rte_gpu *dev, __rte_unused size_t size, void *ptr_in, void **ptr_out)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->mtype != GPU_MEM) {
+ rte_cuda_log(ERR, "Memory address 0x%p is not GPU memory type.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->size != size)
+ rte_cuda_log(WARNING,
+ "Can't expose memory area with size (%zd) different from original size (%zd).",
+ size, mem_item->size);
+
+ if (gdrcopy_pin(&gdrc_h, &(mem_item->mh), (uint64_t)mem_item->ptr_d,
+ mem_item->size, &(mem_item->ptr_h))) {
+ rte_cuda_log(ERR, "Error exposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ *ptr_out = mem_item->ptr_h;
+
+ return 0;
+}
+
static int
cuda_mem_free(struct rte_gpu *dev, void *ptr)
{
return -rte_errno;
}
+static int
+cuda_mem_cpu_unmap(struct rte_gpu *dev, void *ptr_in)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (gdrcopy_unpin(gdrc_h, mem_item->mh, (void *)mem_item->ptr_d,
+ mem_item->size)) {
+ rte_cuda_log(ERR, "Error unexposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
cuda_dev_close(struct rte_gpu *dev)
{
rte_errno = ENOTSUP;
return -rte_errno;
}
+
+ gdrc_h = NULL;
}
/* Fill HW specific part of device structure */
dev->ops.mem_free = cuda_mem_free;
dev->ops.mem_register = cuda_mem_register;
dev->ops.mem_unregister = cuda_mem_unregister;
- dev->ops.mem_cpu_map = NULL;
- dev->ops.mem_cpu_unmap = NULL;
+ dev->ops.mem_cpu_map = cuda_mem_cpu_map;
+ dev->ops.mem_cpu_unmap = cuda_mem_cpu_unmap;
dev->ops.wmb = cuda_wmb;
rte_gpu_complete_new(dev);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 NVIDIA Corporation & Affiliates
+ */
+
+#include "common.h"
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+
+static void *gdrclib;
+static gdr_t (*sym_gdr_open)(void);
+static int (*sym_gdr_pin_buffer)(gdr_t g, unsigned long addr, size_t size,
+ uint64_t p2p_token, uint32_t va_space, gdr_mh_t *handle);
+static int (*sym_gdr_unpin_buffer)(gdr_t g, gdr_mh_t handle);
+static int (*sym_gdr_map)(gdr_t g, gdr_mh_t handle, void **va, size_t size);
+static int (*sym_gdr_unmap)(gdr_t g, gdr_mh_t handle, void *va, size_t size);
+
+static int
+gdrcopy_loader(void)
+{
+ char gdrcopy_path[1024];
+
+ if (getenv("GDRCOPY_PATH_L") == NULL)
+ snprintf(gdrcopy_path, 1024, "%s", "libgdrapi.so");
+ else
+ snprintf(gdrcopy_path, 1024, "%s/%s", getenv("GDRCOPY_PATH_L"), "libgdrapi.so");
+
+ gdrclib = dlopen(gdrcopy_path, RTLD_LAZY);
+ if (gdrclib == NULL) {
+ rte_cuda_log(ERR, "Failed to find GDRCopy library %s (GDRCOPY_PATH_L=%s)\n",
+ gdrcopy_path, getenv("GDRCOPY_PATH_L"));
+ return -1;
+ }
+
+ sym_gdr_open = dlsym(gdrclib, "gdr_open");
+ if (sym_gdr_open == NULL) {
+ rte_cuda_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_pin_buffer = dlsym(gdrclib, "gdr_pin_buffer");
+ if (sym_gdr_pin_buffer == NULL) {
+ rte_cuda_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_unpin_buffer = dlsym(gdrclib, "gdr_unpin_buffer");
+ if (sym_gdr_unpin_buffer == NULL) {
+ rte_cuda_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_map = dlsym(gdrclib, "gdr_map");
+ if (sym_gdr_map == NULL) {
+ rte_cuda_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ sym_gdr_unmap = dlsym(gdrclib, "gdr_unmap");
+ if (sym_gdr_unmap == NULL) {
+ rte_cuda_log(ERR, "Failed to load GDRCopy symbols\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+gdrcopy_open(gdr_t *g)
+{
+ gdr_t g_;
+
+ g_ = sym_gdr_open();
+ if (!g_)
+ return -1;
+ *g = g_;
+
+ return 0;
+}
+
+#endif
+
+int
+gdrcopy_pin(gdr_t *gdrc_h, __rte_unused gdr_mh_t *mh, uint64_t d_addr, size_t size, void **h_addr)
+{
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ if (*gdrc_h == NULL) {
+ if (gdrcopy_loader())
+ return -ENOTSUP;
+
+ if (gdrcopy_open(gdrc_h)) {
+ rte_cuda_log(ERR,
+ "GDRCopy gdrdrv kernel module not found. Can't CPU map GPU memory.");
+ return -EPERM;
+ }
+ }
+
+ /* Pin the device buffer */
+ if (sym_gdr_pin_buffer(*gdrc_h, d_addr, size, 0, 0, mh) != 0) {
+ rte_cuda_log(ERR, "GDRCopy pin buffer error.");
+ return -1;
+ }
+
+ /* Map the buffer to user space */
+ if (sym_gdr_map(*gdrc_h, *mh, h_addr, size) != 0) {
+ rte_cuda_log(ERR, "GDRCopy map buffer error.");
+ sym_gdr_unpin_buffer(*gdrc_h, *mh);
+ return -1;
+ }
+
+ return 0;
+#else
+ rte_cuda_log(ERR,
+ "GDRCopy headers not provided at DPDK building time. Can't CPU map GPU memory.");
+ return -ENOTSUP;
+#endif
+}
+
+int
+gdrcopy_unpin(gdr_t gdrc_h, __rte_unused gdr_mh_t mh, void *d_addr, size_t size)
+{
+ if (gdrc_h == NULL)
+ return -EINVAL;
+
+#ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
+ /* Unmap the buffer from user space */
+ if (sym_gdr_unmap(gdrc_h, mh, d_addr, size) != 0) {
+ rte_cuda_log(ERR, "GDRCopy unmap buffer error.");
+ return -1;
+ }
+ /* Unpin the device buffer */
+ if (sym_gdr_unpin_buffer(gdrc_h, mh) != 0) {
+ rte_cuda_log(ERR, "GDRCopy unpin buffer error.");
+ return -1;
+ }
+#endif
+
+ return 0;
+}
subdir_done()
endif
+if cc.has_header('gdrapi.h')
+ dpdk_conf.set('DRIVERS_GPU_CUDA_GDRCOPY_H', 1)
+endif
+
deps += ['gpudev', 'pci', 'bus_pci']
-sources = files('cuda.c')
+sources = files('cuda.c', 'gdrcopy.c')