+
+static bool mlx5_common_initialized;
+
+/**
+ * One time innitialization routine for run-time dependency on glue library
+ * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
+ * must invoke in its constructor.
+ */
+void
+mlx5_common_init(void)
+{
+ if (mlx5_common_initialized)
+ return;
+
+ mlx5_glue_constructor();
+ mlx5_common_driver_init();
+ mlx5_common_initialized = true;
+}
+
+/**
+ * This function is responsible of initializing the variable
+ * haswell_broadwell_cpu by checking if the cpu is intel
+ * and reading the data returned from mlx5_cpu_id().
+ * since haswell and broadwell cpus don't have improved performance
+ * when using relaxed ordering we want to check the cpu type before
+ * before deciding whether to enable RO or not.
+ * if the cpu is haswell or broadwell the variable will be set to 1
+ * otherwise it will be 0.
+ */
+RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
+{
+#ifdef RTE_ARCH_X86_64
+ unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
+ unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
+ unsigned int i, model, family, brand_id, vendor;
+ unsigned int signature_intel_ebx = 0x756e6547;
+ unsigned int extended_model;
+ unsigned int eax = 0;
+ unsigned int ebx = 0;
+ unsigned int ecx = 0;
+ unsigned int edx = 0;
+ int max_level;
+
+ mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
+ vendor = ebx;
+ max_level = eax;
+ if (max_level < 1) {
+ haswell_broadwell_cpu = 0;
+ return;
+ }
+ mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
+ model = (eax >> 4) & 0x0f;
+ family = (eax >> 8) & 0x0f;
+ brand_id = ebx & 0xff;
+ extended_model = (eax >> 12) & 0xf0;
+ /* Check if the processor is Haswell or Broadwell */
+ if (vendor == signature_intel_ebx) {
+ if (family == 0x06)
+ model += extended_model;
+ if (brand_id == 0 && family == 0x6) {
+ for (i = 0; i < RTE_DIM(broadwell_models); i++)
+ if (model == broadwell_models[i]) {
+ haswell_broadwell_cpu = 1;
+ return;
+ }
+ for (i = 0; i < RTE_DIM(haswell_models); i++)
+ if (model == haswell_models[i]) {
+ haswell_broadwell_cpu = 1;
+ return;
+ }
+ }
+ }
+#endif
+ haswell_broadwell_cpu = 0;
+}
+
+/**
+ * Allocate the User Access Region with DevX on specified device.
+ *
+ * @param [in] ctx
+ * Infiniband device context to perform allocation on.
+ * @param [in] mapping
+ * MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining
+ * attributes (if supported by the host), the
+ * writes to the UAR registers must be followed
+ * by write memory barrier.
+ * MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are
+ * promoted to the registers immediately, no
+ * memory barriers needed.
+ * mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF,
+ * if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC
+ * is performed. The drivers specifying negative values should
+ * always provide the write memory barrier operation after UAR
+ * register writings.
+ * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma
+ * library headers), the caller can specify 0.
+ *
+ * @return
+ * UAR object pointer on success, NULL otherwise and rte_errno is set.
+ */
+void *
+mlx5_devx_alloc_uar(void *ctx, int mapping)
+{
+ void *uar;
+ uint32_t retry, uar_mapping;
+ void *base_addr;
+
+ for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
+#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
+ /* Control the mapping type according to the settings. */
+ uar_mapping = (mapping < 0) ?
+ MLX5DV_UAR_ALLOC_TYPE_NC : mapping;
+#else
+ /*
+ * It seems we have no way to control the memory mapping type
+ * for the UAR, the default "Write-Combining" type is supposed.
+ */
+ uar_mapping = 0;
+ RTE_SET_USED(mapping);
+#endif
+ uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
+#ifdef MLX5DV_UAR_ALLOC_TYPE_NC
+ if (!uar &&
+ mapping < 0 &&
+ uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
+ /*
+ * In some environments like virtual machine the
+ * Write Combining mapped might be not supported and
+ * UAR allocation fails. We tried "Non-Cached" mapping
+ * for the case.
+ */
+ DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)");
+ uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
+ uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
+ } else if (!uar &&
+ mapping < 0 &&
+ uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
+ /*
+ * If Verbs/kernel does not support "Non-Cached"
+ * try the "Write-Combining".
+ */
+ DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)");
+ uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
+ uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
+ }
+#endif
+ if (!uar) {
+ DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
+ rte_errno = ENOMEM;
+ goto exit;
+ }
+ base_addr = mlx5_os_get_devx_uar_base_addr(uar);
+ if (base_addr)
+ break;
+ /*
+ * The UARs are allocated by rdma_core within the
+ * IB device context, on context closure all UARs
+ * will be freed, should be no memory/object leakage.
+ */
+ DRV_LOG(WARNING, "Retrying to allocate DevX UAR");
+ uar = NULL;
+ }
+ /* Check whether we finally succeeded with valid UAR allocation. */
+ if (!uar) {
+ DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
+ rte_errno = ENOMEM;
+ }
+ /*
+ * Return void * instead of struct mlx5dv_devx_uar *
+ * is for compatibility with older rdma-core library headers.
+ */
+exit:
+ return uar;
+}
+
+RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);