#include <dlfcn.h>
-#include <rte_common.h>
-#include <rte_log.h>
#include <rte_malloc.h>
-#include <rte_errno.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_byteorder.h>
#include <rte_dev.h>
#include <gpudev_driver.h>
+
#include <cuda.h>
#include <cudaTypedefs.h>
+#include "common.h"
+#include "devices.h"
+
#define CUDA_DRIVER_MIN_VERSION 11040
#define CUDA_API_MIN_VERSION 3020
static void *cudalib;
static unsigned int cuda_api_version;
static int cuda_driver_version;
-
-/* NVIDIA GPU vendor */
-#define NVIDIA_GPU_VENDOR_ID (0x10de)
-
-/* NVIDIA GPU device IDs */
-#define NVIDIA_GPU_A100_40GB_DEVICE_ID (0x20f1)
-#define NVIDIA_GPU_A100_80GB_DEVICE_ID (0x20b5)
-
-#define NVIDIA_GPU_A30_24GB_DEVICE_ID (0x20b7)
-#define NVIDIA_GPU_A10_24GB_DEVICE_ID (0x2236)
-
-#define NVIDIA_GPU_V100_32GB_DEVICE_ID (0x1db6)
-#define NVIDIA_GPU_V100_16GB_DEVICE_ID (0x1db4)
-
-#define NVIDIA_GPU_T4_16GB_DEVICE_ID (0x1eb8)
+static gdr_t gdrc_h;
#define CUDA_MAX_ALLOCATION_NUM 512
#define GPU_PAGE_SHIFT 16
#define GPU_PAGE_SIZE (1UL << GPU_PAGE_SHIFT)
-static RTE_LOG_REGISTER_DEFAULT(cuda_logtype, NOTICE);
-
-/* Helper macro for logging */
-#define rte_cuda_log(level, fmt, ...) \
- rte_log(RTE_LOG_ ## level, cuda_logtype, fmt "\n", ##__VA_ARGS__)
-
-#define rte_cuda_debug(fmt, ...) \
- rte_cuda_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
- ##__VA_ARGS__)
+RTE_LOG_REGISTER_DEFAULT(cuda_logtype, NOTICE);
/* NVIDIA GPU address map */
static const struct rte_pci_id pci_id_cuda_map[] = {
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
- NVIDIA_GPU_A100_40GB_DEVICE_ID)
+ NVIDIA_GPU_A40_DEVICE_ID)
},
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
- NVIDIA_GPU_A100_80GB_DEVICE_ID)
+ NVIDIA_GPU_A30_24GB_DEVICE_ID)
},
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
- NVIDIA_GPU_A30_24GB_DEVICE_ID)
+ NVIDIA_GPU_A30X_24GB_DPU_DEVICE_ID)
},
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
},
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
- NVIDIA_GPU_V100_32GB_DEVICE_ID)
+ NVIDIA_GPU_A10G_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_A10M_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_A100_40GB_SXM4_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_A100_40GB_PCIE_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_A100_80GB_SXM4_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_A100_80GB_PCIE_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_A100X_80GB_DPU_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_GA100_PG506_207)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_GA100_PCIE)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_GA100_PG506_217)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_16GB_SXM2_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_16GB_DGXS_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_16GB_FHHL_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_16GB_PCIE_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_32GB_SXM2_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_32GB_PCIE_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_32GB_DGXS_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_32GB_SXM3_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_32GB_SXM3_H_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100_SXM2)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_V100S_PCIE)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_TITAN_V_CEO_ED)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_GV100GL_PG500_216)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_GV100GL_PG503_216)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_TU102_TITAN_RTX)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_TU102GL_QUADRO_RTX)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_GV100_QUADRO_DEVICE_ID)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_4000)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_5000)
},
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
- NVIDIA_GPU_V100_16GB_DEVICE_ID)
+ NVIDIA_GPU_QUADRO_RTX_6000)
},
{
RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
- NVIDIA_GPU_T4_16GB_DEVICE_ID)
+ NVIDIA_GPU_QUADRO_RTX_8000)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A4000)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A6000)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A5000)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A4500)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A5500)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A2000)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_QUADRO_RTX_A2000_12GB)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_T4G)
+ },
+ {
+ RTE_PCI_DEVICE(NVIDIA_GPU_VENDOR_ID,
+ NVIDIA_GPU_T4)
},
{
.device_id = 0
/* Single entry of the memory list */
struct mem_entry {
CUdeviceptr ptr_d;
+ CUdeviceptr ptr_orig_d;
void *ptr_h;
size_t size;
+ size_t size_orig;
struct rte_gpu *dev;
CUcontext ctx;
cuda_ptr_key pkey;
enum mem_type mtype;
+ gdr_mh_t mh;
struct mem_entry *prev;
struct mem_entry *next;
};
if (getenv("CUDA_PATH_L") == NULL)
snprintf(cuda_path, 1024, "%s", "libcuda.so");
else
- snprintf(cuda_path, 1024, "%s%s", getenv("CUDA_PATH_L"), "libcuda.so");
+ snprintf(cuda_path, 1024, "%s/%s", getenv("CUDA_PATH_L"), "libcuda.so");
cudalib = dlopen(cuda_path, RTLD_LAZY);
if (cudalib == NULL) {
return -EINVAL;
/* if key is in head */
- if (mem_alloc_list_cur->prev == NULL)
+ if (mem_alloc_list_cur->prev == NULL) {
mem_alloc_list_head = mem_alloc_list_cur->next;
- else {
+ if (mem_alloc_list_head != NULL)
+ mem_alloc_list_head->prev = NULL;
+ } else {
mem_alloc_list_cur->prev->next = mem_alloc_list_cur->next;
if (mem_alloc_list_cur->next != NULL)
mem_alloc_list_cur->next->prev = mem_alloc_list_cur->prev;
CUcontext current_ctx;
CUcontext input_ctx;
- if (dev == NULL)
- return -ENODEV;
+ if (dev == NULL) {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
/* Child initialization time probably called by rte_gpu_add_child() */
if (dev->mpshared->info.parent != RTE_GPU_ID_NONE &&
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Set child ctx as current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/*
(uint32_t)affinityPrm.param.smCount.val;
ret = rte_gpu_info_get(dev->mpshared->info.parent, &parent_info);
- if (ret)
- return -ENODEV;
+ if (ret) {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
dev->mpshared->info.total_memory = parent_info.total_memory;
+ dev->mpshared->info.page_size = parent_info.page_size;
+
/*
* GPU Device private info
*/
RTE_CACHE_LINE_SIZE);
if (dev->mpshared->dev_private == NULL) {
rte_cuda_log(ERR, "Failed to allocate memory for GPU process private");
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
private = (struct cuda_info *)dev->mpshared->dev_private;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetDevice failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
res = pfn_cuDeviceGetName(private->gpu_name,
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetName failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Restore original ctx as current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
}
*/
static int
-cuda_mem_alloc(struct rte_gpu *dev, size_t size, void **ptr)
+cuda_mem_alloc(struct rte_gpu *dev, size_t size, unsigned int align, void **ptr)
{
CUresult res;
const char *err_string;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Set child ctx as current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Get next memory list item */
mem_alloc_list_tail = mem_list_add_item();
- if (mem_alloc_list_tail == NULL)
- return -ENOMEM;
+ if (mem_alloc_list_tail == NULL) {
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
/* Allocate memory */
mem_alloc_list_tail->size = size;
- res = pfn_cuMemAlloc(&(mem_alloc_list_tail->ptr_d),
- mem_alloc_list_tail->size);
+ mem_alloc_list_tail->size_orig = size + align;
+
+ res = pfn_cuMemAlloc(&(mem_alloc_list_tail->ptr_orig_d),
+ mem_alloc_list_tail->size_orig);
if (res != 0) {
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
+ /* Align memory address */
+ mem_alloc_list_tail->ptr_d = mem_alloc_list_tail->ptr_orig_d;
+ if (align && ((uintptr_t)mem_alloc_list_tail->ptr_d) % align)
+ mem_alloc_list_tail->ptr_d += (align -
+ (((uintptr_t)mem_alloc_list_tail->ptr_d) % align));
+
/* GPUDirect RDMA attribute required */
res = pfn_cuPointerSetAttribute(&flag,
CU_POINTER_ATTRIBUTE_SYNC_MEMOPS,
rte_cuda_log(ERR, "Could not set SYNC MEMOP attribute for "
"GPU memory at %"PRIu32", err %d",
(uint32_t)mem_alloc_list_tail->ptr_d, res);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
mem_alloc_list_tail->pkey = get_hash_from_ptr((void *)mem_alloc_list_tail->ptr_d);
mem_alloc_list_tail->ptr_h = NULL;
- mem_alloc_list_tail->size = size;
mem_alloc_list_tail->dev = dev;
mem_alloc_list_tail->ctx = (CUcontext)((uintptr_t)dev->mpshared->info.context);
mem_alloc_list_tail->mtype = GPU_MEM;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
*ptr = (void *)mem_alloc_list_tail->ptr_d;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Set child ctx as current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Get next memory list item */
mem_alloc_list_tail = mem_list_add_item();
- if (mem_alloc_list_tail == NULL)
- return -ENOMEM;
+ if (mem_alloc_list_tail == NULL) {
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
/* Allocate memory */
mem_alloc_list_tail->size = size;
err_string,
mem_alloc_list_tail->ptr_h,
mem_alloc_list_tail->size);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
res = pfn_cuDeviceGetAttribute(&(use_ptr_h),
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if (use_ptr_h == 0) {
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuMemHostGetDevicePointer failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if ((uintptr_t)mem_alloc_list_tail->ptr_d !=
(uintptr_t)mem_alloc_list_tail->ptr_h) {
rte_cuda_log(ERR, "Host input pointer is different wrt GPU registered pointer");
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
} else {
mem_alloc_list_tail->ptr_d = (CUdeviceptr)mem_alloc_list_tail->ptr_h;
if (res != 0) {
rte_cuda_log(ERR, "Could not set SYNC MEMOP attribute for GPU memory at %"PRIu32
", err %d", (uint32_t)mem_alloc_list_tail->ptr_d, res);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
mem_alloc_list_tail->pkey = get_hash_from_ptr((void *)mem_alloc_list_tail->ptr_h);
mem_alloc_list_tail->dev = dev;
mem_alloc_list_tail->ctx = (CUcontext)((uintptr_t)dev->mpshared->info.context);
mem_alloc_list_tail->mtype = CPU_REGISTERED;
+ mem_alloc_list_tail->ptr_orig_d = mem_alloc_list_tail->ptr_d;
/* Restore original ctx as current ctx */
res = pfn_cuCtxSetCurrent(current_ctx);
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
return 0;
}
+static int
+cuda_mem_cpu_map(struct rte_gpu *dev, __rte_unused size_t size, void *ptr_in, void **ptr_out)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->mtype != GPU_MEM) {
+ rte_cuda_log(ERR, "Memory address 0x%p is not GPU memory type.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (mem_item->size != size)
+ rte_cuda_log(WARNING,
+ "Can't expose memory area with size (%zd) different from original size (%zd).",
+ size, mem_item->size);
+
+ if (gdrcopy_pin(&gdrc_h, &(mem_item->mh), (uint64_t)mem_item->ptr_d,
+ mem_item->size, &(mem_item->ptr_h))) {
+ rte_cuda_log(ERR, "Error exposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ *ptr_out = mem_item->ptr_h;
+
+ return 0;
+}
+
static int
cuda_mem_free(struct rte_gpu *dev, void *ptr)
{
mem_item = mem_list_find_item(hk);
if (mem_item == NULL) {
rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory", ptr);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if (mem_item->mtype == GPU_MEM) {
- res = pfn_cuMemFree(mem_item->ptr_d);
+ res = pfn_cuMemFree(mem_item->ptr_orig_d);
if (res != 0) {
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuMemFree current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
return mem_list_del_item(hk);
mem_item = mem_list_find_item(hk);
if (mem_item == NULL) {
rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory", ptr);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if (mem_item->mtype == CPU_REGISTERED) {
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuMemHostUnregister current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
return mem_list_del_item(hk);
rte_cuda_log(ERR, "Memory type %d not supported", mem_item->mtype);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
+}
+
+static int
+cuda_mem_cpu_unmap(struct rte_gpu *dev, void *ptr_in)
+{
+ struct mem_entry *mem_item;
+ cuda_ptr_key hk;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ hk = get_hash_from_ptr((void *)ptr_in);
+
+ mem_item = mem_list_find_item(hk);
+ if (mem_item == NULL) {
+ rte_cuda_log(ERR, "Memory address 0x%p not found in driver memory.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ if (gdrcopy_unpin(gdrc_h, mem_item->mh, (void *)mem_item->ptr_d,
+ mem_item->size)) {
+ rte_cuda_log(ERR, "Error unexposing GPU memory address 0x%p.", ptr_in);
+ rte_errno = EPERM;
+ return -rte_errno;
+ }
+
+ return 0;
}
static int
CUcontext input_ctx;
struct cuda_info *private;
- if (dev == NULL)
- return -ENODEV;
+ if (dev == NULL) {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
private = (struct cuda_info *)dev->mpshared->dev_private;
*/
rte_cuda_log(WARNING, "Can't flush GDR writes with cuFlushGPUDirectRDMAWrites CUDA function."
"Application needs to use alternative methods.");
- return -ENOTSUP;
+
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
/* Store current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxGetCurrent failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Set child ctx as current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent input failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
res = pfn_cuFlushGPUDirectRDMAWrites(CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX,
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuFlushGPUDirectRDMAWrites current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
/* Restore original ctx as current ctx */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuCtxSetCurrent current failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
return 0;
if (pci_dev == NULL) {
rte_cuda_log(ERR, "NULL PCI device");
- return -EINVAL;
+ rte_errno = ENODEV;
+ return -rte_errno;
}
rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
/* Allocate memory to be used privately by drivers */
dev = rte_gpu_allocate(pci_dev->device.name);
- if (dev == NULL)
- return -ENODEV;
+ if (dev == NULL) {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
/* Initialize values only for the first CUDA driver call */
if (dev->mpshared->info.dev_id == 0) {
/* Load libcuda.so library */
if (cuda_loader()) {
rte_cuda_log(ERR, "CUDA Driver library not found");
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
/* Load initial CUDA functions */
if (cuda_sym_func_loader()) {
rte_cuda_log(ERR, "CUDA functions not found in library");
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
/*
res = sym_cuDriverGetVersion(&cuda_driver_version);
if (res != 0) {
rte_cuda_log(ERR, "cuDriverGetVersion failed with %d", res);
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
if (cuda_driver_version < CUDA_DRIVER_MIN_VERSION) {
"Minimum requirement is %d",
cuda_driver_version,
CUDA_DRIVER_MIN_VERSION);
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
if (cuda_pfn_func_loader()) {
rte_cuda_log(ERR, "CUDA PFN functions not found in library");
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
+
+ gdrc_h = NULL;
}
/* Fill HW specific part of device structure */
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetByPCIBusId name %s failed with %d: %s",
dev->device->name, res, err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
res = pfn_cuDevicePrimaryCtxRetain(&pctx, cu_dev_id);
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDevicePrimaryCtxRetain name %s failed with %d: %s",
dev->device->name, res, err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
res = pfn_cuCtxGetApiVersion(pctx, &cuda_api_version);
if (res != 0) {
rte_cuda_log(ERR, "cuCtxGetApiVersion failed with %d", res);
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
if (cuda_api_version < CUDA_API_MIN_VERSION) {
rte_cuda_log(ERR, "CUDA API version found is %d Minimum requirement is %d",
cuda_api_version, CUDA_API_MIN_VERSION);
- return -ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
dev->mpshared->info.context = (uint64_t)pctx;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
dev->mpshared->info.processor_count = (uint32_t)processor_count;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceTotalMem failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
+ dev->mpshared->info.page_size = (size_t)GPU_PAGE_SIZE;
+
/*
* GPU Device private info
*/
RTE_CACHE_LINE_SIZE);
if (dev->mpshared->dev_private == NULL) {
rte_cuda_log(ERR, "Failed to allocate memory for GPU process private");
- return -ENOMEM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
private = (struct cuda_info *)dev->mpshared->dev_private;
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetName failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
res = pfn_cuDeviceGetAttribute(&(private->gdr_supported),
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if (private->gdr_supported == 0)
rte_cuda_log(ERR,
"cuDeviceGetAttribute failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if (private->gdr_write_ordering == CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE) {
pfn_cuGetErrorString(res, &(err_string));
rte_cuda_log(ERR, "cuDeviceGetAttribute failed with %s",
err_string);
- return -EPERM;
+ rte_errno = EPERM;
+ return -rte_errno;
}
if (private->gdr_flush_type != CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST)
dev->ops.mem_free = cuda_mem_free;
dev->ops.mem_register = cuda_mem_register;
dev->ops.mem_unregister = cuda_mem_unregister;
+ dev->ops.mem_cpu_map = cuda_mem_cpu_map;
+ dev->ops.mem_cpu_unmap = cuda_mem_cpu_unmap;
dev->ops.wmb = cuda_wmb;
rte_gpu_complete_new(dev);
int ret;
uint8_t gpu_id;
- if (pci_dev == NULL)
- return -EINVAL;
+ if (pci_dev == NULL) {
+ rte_errno = ENODEV;
+ return -rte_errno;
+ }
dev = rte_gpu_get_by_name(pci_dev->device.name);
if (dev == NULL) {
rte_cuda_log(ERR, "Couldn't find HW dev \"%s\" to uninitialise it",
pci_dev->device.name);
- return -ENODEV;
+ rte_errno = ENODEV;
+ return -rte_errno;
}
gpu_id = dev->mpshared->info.dev_id;