X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fmlx5%2Fmlx5_common.c;h=c26a2cfa30d8da033c96d6abfa09aa761471c5d5;hb=cf38bcd7763fa4cf6347a93a6193e00205404275;hp=db94d4aa8c8b8375910c27cb838586ba6f0ab2e3;hpb=79aa430721b1e33706f71c84a6a718469b8f9053;p=dpdk.git diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c index db94d4aa8c..c26a2cfa30 100644 --- a/drivers/common/mlx5/mlx5_common.c +++ b/drivers/common/mlx5/mlx5_common.c @@ -7,55 +7,17 @@ #include #include +#include #include "mlx5_common.h" +#include "mlx5_common_os.h" #include "mlx5_common_utils.h" +#include "mlx5_common_pci.h" int mlx5_common_logtype; -#ifdef MLX5_GLUE -const struct mlx5_glue *mlx5_glue; -#endif - uint8_t haswell_broadwell_cpu; -static int -mlx5_class_check_handler(__rte_unused const char *key, const char *value, - void *opaque) -{ - enum mlx5_class *ret = opaque; - - if (strcmp(value, "vdpa") == 0) { - *ret = MLX5_CLASS_VDPA; - } else if (strcmp(value, "net") == 0) { - *ret = MLX5_CLASS_NET; - } else { - DRV_LOG(ERR, "Invalid mlx5 class %s. Maybe typo in device" - " class argument setting?", value); - *ret = MLX5_CLASS_INVALID; - } - return 0; -} - -enum mlx5_class -mlx5_class_get(struct rte_devargs *devargs) -{ - struct rte_kvargs *kvlist; - const char *key = MLX5_CLASS_ARG_NAME; - enum mlx5_class ret = MLX5_CLASS_NET; - - if (devargs == NULL) - return ret; - kvlist = rte_kvargs_parse(devargs->args, NULL); - if (kvlist == NULL) - return ret; - if (rte_kvargs_count(kvlist, key)) - rte_kvargs_process(kvlist, key, mlx5_class_check_handler, &ret); - rte_kvargs_free(kvlist); - return ret; -} - - /* In case this is an x86_64 intel processor to check if * we should use relaxed ordering. */ @@ -86,12 +48,22 @@ RTE_INIT_PRIO(mlx5_log_init, LOG) rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE); } +static bool mlx5_common_initialized; + /** - * Initialization routine for run-time dependency on glue library. + * One time innitialization routine for run-time dependency on glue library + * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module, + * must invoke in its constructor. */ -RTE_INIT_PRIO(mlx5_glue_init, CLASS) +void +mlx5_common_init(void) { + if (mlx5_common_initialized) + return; + mlx5_glue_constructor(); + mlx5_common_pci_init(); + mlx5_common_initialized = true; } /** @@ -150,3 +122,102 @@ RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG) #endif haswell_broadwell_cpu = 0; } + +/** + * Allocate the User Access Region with DevX on specified device. + * + * @param [in] ctx + * Infiniband device context to perform allocation on. + * @param [in] mapping + * MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining + * attributes (if supported by the host), the + * writes to the UAR registers must be followed + * by write memory barrier. + * MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are + * promoted to the registers immediately, no + * memory barriers needed. + * mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF, + * if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC + * is performed. The drivers specifying negative values should + * always provide the write memory barrier operation after UAR + * register writings. + * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma + * library headers), the caller can specify 0. + * + * @return + * UAR object pointer on success, NULL otherwise and rte_errno is set. + */ +void * +mlx5_devx_alloc_uar(void *ctx, int mapping) +{ + void *uar; + uint32_t retry, uar_mapping; + void *base_addr; + + for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { +#ifdef MLX5DV_UAR_ALLOC_TYPE_NC + /* Control the mapping type according to the settings. */ + uar_mapping = (mapping < 0) ? + MLX5DV_UAR_ALLOC_TYPE_NC : mapping; +#else + /* + * It seems we have no way to control the memory mapping type + * for the UAR, the default "Write-Combining" type is supposed. + */ + uar_mapping = 0; + RTE_SET_USED(mapping); +#endif + uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping); +#ifdef MLX5DV_UAR_ALLOC_TYPE_NC + if (!uar && + mapping < 0 && + uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { + /* + * In some environments like virtual machine the + * Write Combining mapped might be not supported and + * UAR allocation fails. We tried "Non-Cached" mapping + * for the case. + */ + DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)"); + uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; + uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping); + } else if (!uar && + mapping < 0 && + uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) { + /* + * If Verbs/kernel does not support "Non-Cached" + * try the "Write-Combining". + */ + DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)"); + uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF; + uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping); + } +#endif + if (!uar) { + DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)"); + rte_errno = ENOMEM; + goto exit; + } + base_addr = mlx5_os_get_devx_uar_base_addr(uar); + if (base_addr) + break; + /* + * The UARs are allocated by rdma_core within the + * IB device context, on context closure all UARs + * will be freed, should be no memory/object leakage. + */ + DRV_LOG(WARNING, "Retrying to allocate DevX UAR"); + uar = NULL; + } + /* Check whether we finally succeeded with valid UAR allocation. */ + if (!uar) { + DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)"); + rte_errno = ENOMEM; + } + /* + * Return void * instead of struct mlx5dv_devx_uar * + * is for compatibility with older rdma-core library headers. + */ +exit: + return uar; +}