1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
10 #include <rte_mempool.h>
12 #include "mlx5_common.h"
13 #include "mlx5_common_os.h"
14 #include "mlx5_common_utils.h"
15 #include "mlx5_common_pci.h"
17 int mlx5_common_logtype;
19 uint8_t haswell_broadwell_cpu;
21 /* In case this is an x86_64 intel processor to check if
22 * we should use relaxed ordering.
24 #ifdef RTE_ARCH_X86_64
26 * This function returns processor identification and feature information
29 * @param eax, ebx, ecx, edx
30 * Pointers to the registers that will hold cpu information.
32 * The main category of information returned.
34 static inline void mlx5_cpu_id(unsigned int level,
35 unsigned int *eax, unsigned int *ebx,
36 unsigned int *ecx, unsigned int *edx)
39 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
44 RTE_INIT_PRIO(mlx5_log_init, LOG)
46 mlx5_common_logtype = rte_log_register("pmd.common.mlx5");
47 if (mlx5_common_logtype >= 0)
48 rte_log_set_level(mlx5_common_logtype, RTE_LOG_NOTICE);
51 static bool mlx5_common_initialized;
54 * One time innitialization routine for run-time dependency on glue library
55 * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
56 * must invoke in its constructor.
59 mlx5_common_init(void)
61 if (mlx5_common_initialized)
64 mlx5_glue_constructor();
65 mlx5_common_pci_init();
66 mlx5_common_initialized = true;
70 * This function is responsible of initializing the variable
71 * haswell_broadwell_cpu by checking if the cpu is intel
72 * and reading the data returned from mlx5_cpu_id().
73 * since haswell and broadwell cpus don't have improved performance
74 * when using relaxed ordering we want to check the cpu type before
75 * before deciding whether to enable RO or not.
76 * if the cpu is haswell or broadwell the variable will be set to 1
77 * otherwise it will be 0.
79 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
81 #ifdef RTE_ARCH_X86_64
82 unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
83 unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
84 unsigned int i, model, family, brand_id, vendor;
85 unsigned int signature_intel_ebx = 0x756e6547;
86 unsigned int extended_model;
93 mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
97 haswell_broadwell_cpu = 0;
100 mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
101 model = (eax >> 4) & 0x0f;
102 family = (eax >> 8) & 0x0f;
103 brand_id = ebx & 0xff;
104 extended_model = (eax >> 12) & 0xf0;
105 /* Check if the processor is Haswell or Broadwell */
106 if (vendor == signature_intel_ebx) {
108 model += extended_model;
109 if (brand_id == 0 && family == 0x6) {
110 for (i = 0; i < RTE_DIM(broadwell_models); i++)
111 if (model == broadwell_models[i]) {
112 haswell_broadwell_cpu = 1;
115 for (i = 0; i < RTE_DIM(haswell_models); i++)
116 if (model == haswell_models[i]) {
117 haswell_broadwell_cpu = 1;
123 haswell_broadwell_cpu = 0;
127 * Allocate the User Access Region with DevX on specified device.
130 * Infiniband device context to perform allocation on.
131 * @param [in] mapping
132 * MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining
133 * attributes (if supported by the host), the
134 * writes to the UAR registers must be followed
135 * by write memory barrier.
136 * MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are
137 * promoted to the registers immediately, no
138 * memory barriers needed.
139 * mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF,
140 * if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC
141 * is performed. The drivers specifying negative values should
142 * always provide the write memory barrier operation after UAR
144 * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma
145 * library headers), the caller can specify 0.
148 * UAR object pointer on success, NULL otherwise and rte_errno is set.
151 mlx5_devx_alloc_uar(void *ctx, int mapping)
154 uint32_t retry, uar_mapping;
157 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
158 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
159 /* Control the mapping type according to the settings. */
160 uar_mapping = (mapping < 0) ?
161 MLX5DV_UAR_ALLOC_TYPE_NC : mapping;
164 * It seems we have no way to control the memory mapping type
165 * for the UAR, the default "Write-Combining" type is supposed.
168 RTE_SET_USED(mapping);
170 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
171 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
174 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
176 * In some environments like virtual machine the
177 * Write Combining mapped might be not supported and
178 * UAR allocation fails. We tried "Non-Cached" mapping
181 DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)");
182 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
183 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
186 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
188 * If Verbs/kernel does not support "Non-Cached"
189 * try the "Write-Combining".
191 DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)");
192 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
193 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
197 DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
201 base_addr = mlx5_os_get_devx_uar_base_addr(uar);
205 * The UARs are allocated by rdma_core within the
206 * IB device context, on context closure all UARs
207 * will be freed, should be no memory/object leakage.
209 DRV_LOG(WARNING, "Retrying to allocate DevX UAR");
212 /* Check whether we finally succeeded with valid UAR allocation. */
214 DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
218 * Return void * instead of struct mlx5dv_devx_uar *
219 * is for compatibility with older rdma-core library headers.