1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
10 #include <rte_mempool.h>
11 #include <rte_class.h>
12 #include <rte_malloc.h>
13 #include <rte_eal_paging.h>
15 #include "mlx5_common.h"
16 #include "mlx5_common_os.h"
17 #include "mlx5_common_mp.h"
18 #include "mlx5_common_log.h"
19 #include "mlx5_common_defs.h"
20 #include "mlx5_common_private.h"
22 uint8_t haswell_broadwell_cpu;
24 /* In case this is an x86_64 intel processor to check if
25 * we should use relaxed ordering.
27 #ifdef RTE_ARCH_X86_64
29 * This function returns processor identification and feature information
32 * @param eax, ebx, ecx, edx
33 * Pointers to the registers that will hold cpu information.
35 * The main category of information returned.
37 static inline void mlx5_cpu_id(unsigned int level,
38 unsigned int *eax, unsigned int *ebx,
39 unsigned int *ecx, unsigned int *edx)
42 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
47 RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE)
49 /* Head of list of drivers. */
50 static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list =
51 TAILQ_HEAD_INITIALIZER(drivers_list);
53 /* Head of devices. */
54 static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list =
55 TAILQ_HEAD_INITIALIZER(devices_list);
56 static pthread_mutex_t devices_list_lock;
60 unsigned int drv_class;
62 { .name = "vdpa", .drv_class = MLX5_CLASS_VDPA },
63 { .name = "eth", .drv_class = MLX5_CLASS_ETH },
64 /* Keep class "net" for backward compatibility. */
65 { .name = "net", .drv_class = MLX5_CLASS_ETH },
66 { .name = "regex", .drv_class = MLX5_CLASS_REGEX },
67 { .name = "compress", .drv_class = MLX5_CLASS_COMPRESS },
68 { .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO },
72 class_name_to_value(const char *class_name)
76 for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
77 if (strcmp(class_name, mlx5_classes[i].name) == 0)
78 return mlx5_classes[i].drv_class;
83 static struct mlx5_class_driver *
84 driver_get(uint32_t class)
86 struct mlx5_class_driver *driver;
88 TAILQ_FOREACH(driver, &drivers_list, next) {
89 if ((uint32_t)driver->drv_class == class)
96 * Verify and store value for devargs.
99 * Key argument to verify.
101 * Value associated with key.
106 * 0 on success, a negative errno value otherwise and rte_errno is set.
109 mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
111 struct mlx5_common_dev_config *config = opaque;
114 if (val == NULL || *val == '\0') {
115 DRV_LOG(ERR, "Key %s is missing value.", key);
120 tmp = strtol(val, NULL, 0);
123 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
126 if (strcmp(key, "tx_db_nc") == 0) {
127 if (tmp != MLX5_TXDB_CACHED &&
128 tmp != MLX5_TXDB_NCACHED &&
129 tmp != MLX5_TXDB_HEURISTIC) {
130 DRV_LOG(ERR, "Invalid Tx doorbell mapping parameter.");
135 } else if (strcmp(key, "mr_ext_memseg_en") == 0) {
136 config->mr_ext_memseg_en = !!tmp;
137 } else if (strcmp(key, "mr_mempool_reg_en") == 0) {
138 config->mr_mempool_reg_en = !!tmp;
139 } else if (strcmp(key, "sys_mem_en") == 0) {
140 config->sys_mem_en = !!tmp;
146 * Parse common device parameters.
149 * Device arguments structure.
151 * Pointer to device configuration structure.
154 * 0 on success, a negative errno value otherwise and rte_errno is set.
157 mlx5_common_config_get(struct rte_devargs *devargs,
158 struct mlx5_common_dev_config *config)
160 struct rte_kvargs *kvlist;
164 config->mr_ext_memseg_en = 1;
165 config->mr_mempool_reg_en = 1;
166 config->sys_mem_en = 0;
167 config->dbnc = MLX5_ARG_UNSET;
170 kvlist = rte_kvargs_parse(devargs->args, NULL);
171 if (kvlist == NULL) {
175 ret = rte_kvargs_process(kvlist, NULL, mlx5_common_args_check_handler,
179 rte_kvargs_free(kvlist);
180 DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en);
181 DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en);
182 DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en);
183 DRV_LOG(DEBUG, "Tx doorbell mapping parameter is %d.", config->dbnc);
188 devargs_class_handler(__rte_unused const char *key,
189 const char *class_names, void *opaque)
198 scratch = strdup(class_names);
199 if (scratch == NULL) {
203 found = strtok_r(scratch, ":", &refstr);
208 /* Extract each individual class name. Multiple
209 * classes can be supplied as class=net:regex:foo:bar.
211 class_val = class_name_to_value(found);
212 /* Check if its a valid class. */
218 found = strtok_r(NULL, ":", &refstr);
219 } while (found != NULL);
223 DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names);
228 parse_class_options(const struct rte_devargs *devargs)
230 struct rte_kvargs *kvlist;
235 if (devargs->cls != NULL && devargs->cls->name != NULL)
236 /* Global syntax, only one class type. */
237 return class_name_to_value(devargs->cls->name);
238 /* Legacy devargs support multiple classes. */
239 kvlist = rte_kvargs_parse(devargs->args, NULL);
242 rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_CLASS,
243 devargs_class_handler, &ret);
244 rte_kvargs_free(kvlist);
248 static const unsigned int mlx5_class_invalid_combinations[] = {
249 MLX5_CLASS_ETH | MLX5_CLASS_VDPA,
250 /* New class combination should be added here. */
254 is_valid_class_combination(uint32_t user_classes)
258 /* Verify if user specified unsupported combination. */
259 for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) {
260 if ((mlx5_class_invalid_combinations[i] & user_classes) ==
261 mlx5_class_invalid_combinations[i])
264 /* Not found any invalid class combination. */
269 mlx5_bus_match(const struct mlx5_class_driver *drv,
270 const struct rte_device *dev)
272 if (mlx5_dev_is_pci(dev))
273 return mlx5_dev_pci_match(drv, dev);
277 static struct mlx5_common_device *
278 to_mlx5_device(const struct rte_device *rte_dev)
280 struct mlx5_common_device *cdev;
282 TAILQ_FOREACH(cdev, &devices_list, next) {
283 if (rte_dev == cdev->dev)
290 mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
292 struct rte_pci_addr pci_addr = { 0 };
295 if (mlx5_dev_is_pci(dev)) {
296 /* Input might be <BDF>, format PCI address to <DBDF>. */
297 ret = rte_pci_addr_parse(dev->name, &pci_addr);
300 rte_pci_device_name(&pci_addr, addr, size);
303 #ifdef RTE_EXEC_ENV_LINUX
304 return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev),
313 * Register the mempool for the protection domain.
316 * Pointer to the mlx5 common device.
318 * Mempool being registered.
321 * 0 on success, (-1) on failure and rte_errno is set.
324 mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
325 struct rte_mempool *mp, bool is_extmem)
327 return mlx5_mr_mempool_register(cdev, mp, is_extmem);
331 * Unregister the mempool from the protection domain.
334 * Pointer to the mlx5 common device.
336 * Mempool being unregistered.
339 mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
340 struct rte_mempool *mp)
342 if (mlx5_mr_mempool_unregister(cdev, mp) < 0)
343 DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
344 mp->name, cdev->pd, rte_strerror(rte_errno));
348 * rte_mempool_walk() callback to register mempools for the protection domain.
351 * The mempool being walked.
353 * Pointer to the device shared context.
356 mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)
358 struct mlx5_common_device *cdev = arg;
361 ret = mlx5_dev_mempool_register(cdev, mp, false);
362 if (ret < 0 && rte_errno != EEXIST)
364 "Failed to register existing mempool %s for PD %p: %s",
365 mp->name, cdev->pd, rte_strerror(rte_errno));
369 * rte_mempool_walk() callback to unregister mempools
370 * from the protection domain.
373 * The mempool being walked.
375 * Pointer to the device shared context.
378 mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
380 mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp);
384 * Mempool life cycle callback for mlx5 common devices.
387 * Mempool life cycle event.
389 * Associated mempool.
391 * Pointer to a device shared context.
394 mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
397 struct mlx5_common_device *cdev = arg;
400 case RTE_MEMPOOL_EVENT_READY:
401 if (mlx5_dev_mempool_register(cdev, mp, false) < 0)
403 "Failed to register new mempool %s for PD %p: %s",
404 mp->name, cdev->pd, rte_strerror(rte_errno));
406 case RTE_MEMPOOL_EVENT_DESTROY:
407 mlx5_dev_mempool_unregister(cdev, mp);
413 mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
417 if (!cdev->config.mr_mempool_reg_en)
419 rte_rwlock_write_lock(&cdev->mr_scache.mprwlock);
420 if (cdev->mr_scache.mp_cb_registered)
422 /* Callback for this device may be already registered. */
423 ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb,
425 if (ret != 0 && rte_errno != EEXIST)
427 /* Register mempools only once for this device. */
429 rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev);
431 cdev->mr_scache.mp_cb_registered = 1;
433 rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock);
438 mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev)
442 if (!cdev->mr_scache.mp_cb_registered ||
443 !cdev->config.mr_mempool_reg_en)
445 /* Stop watching for mempool events and unregister all mempools. */
446 ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb,
449 rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev);
453 * Callback for memory event.
463 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
464 size_t len, void *arg __rte_unused)
466 struct mlx5_common_device *cdev;
468 /* Must be called from the primary process. */
469 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
470 switch (event_type) {
471 case RTE_MEM_EVENT_FREE:
472 pthread_mutex_lock(&devices_list_lock);
473 /* Iterate all the existing mlx5 devices. */
474 TAILQ_FOREACH(cdev, &devices_list, next)
475 mlx5_free_mr_by_addr(&cdev->mr_scache,
476 mlx5_os_get_ctx_device_name
479 pthread_mutex_unlock(&devices_list_lock);
481 case RTE_MEM_EVENT_ALLOC:
488 * Uninitialize all HW global of device context.
491 * Pointer to mlx5 device structure.
494 * 0 on success, a negative errno value otherwise and rte_errno is set.
497 mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
499 if (cdev->pd != NULL) {
500 claim_zero(mlx5_os_dealloc_pd(cdev->pd));
503 if (cdev->ctx != NULL) {
504 claim_zero(mlx5_glue->close_device(cdev->ctx));
510 * Initialize all HW global of device context.
513 * Pointer to mlx5 device structure.
515 * Chosen classes come from user device arguments.
518 * 0 on success, a negative errno value otherwise and rte_errno is set.
521 mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
525 /* Create context device */
526 ret = mlx5_os_open_device(cdev, classes);
529 /* Allocate Protection Domain object and extract its pdn. */
530 ret = mlx5_os_pd_create(cdev);
533 /* All actions taken below are relevant only when DevX is supported */
534 if (cdev->config.devx == 0)
536 /* Query HCA attributes. */
537 ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
539 DRV_LOG(ERR, "Unable to read HCA capabilities.");
545 mlx5_dev_hw_global_release(cdev);
550 mlx5_common_dev_release(struct mlx5_common_device *cdev)
552 pthread_mutex_lock(&devices_list_lock);
553 TAILQ_REMOVE(&devices_list, cdev, next);
554 pthread_mutex_unlock(&devices_list_lock);
555 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
556 if (TAILQ_EMPTY(&devices_list))
557 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
559 mlx5_dev_mempool_unsubscribe(cdev);
560 mlx5_mr_release_cache(&cdev->mr_scache);
561 mlx5_dev_hw_global_release(cdev);
566 static struct mlx5_common_device *
567 mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
569 struct mlx5_common_device *cdev;
572 cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0);
574 DRV_LOG(ERR, "Device allocation failure.");
579 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
581 /* Parse device parameters. */
582 ret = mlx5_common_config_get(eal_dev->devargs, &cdev->config);
584 DRV_LOG(ERR, "Failed to process device arguments: %s",
585 strerror(rte_errno));
589 mlx5_malloc_mem_select(cdev->config.sys_mem_en);
590 /* Initialize all HW global of device context. */
591 ret = mlx5_dev_hw_global_prepare(cdev, classes);
593 DRV_LOG(ERR, "Failed to initialize device context.");
597 /* Initialize global MR cache resources and update its functions. */
598 ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
600 DRV_LOG(ERR, "Failed to initialize global MR share cache.");
601 mlx5_dev_hw_global_release(cdev);
605 /* Register callback function for global shared MR cache management. */
606 if (TAILQ_EMPTY(&devices_list))
607 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
608 mlx5_mr_mem_event_cb, NULL);
610 pthread_mutex_lock(&devices_list_lock);
611 TAILQ_INSERT_HEAD(&devices_list, cdev, next);
612 pthread_mutex_unlock(&devices_list_lock);
617 drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
619 struct mlx5_class_driver *driver;
620 int local_ret = -ENODEV;
624 while (enabled_classes) {
625 driver = driver_get(RTE_BIT64(i));
626 if (driver != NULL) {
627 local_ret = driver->remove(cdev);
629 cdev->classes_loaded &= ~RTE_BIT64(i);
633 enabled_classes &= ~RTE_BIT64(i);
636 if (local_ret != 0 && ret == 0)
642 drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
644 struct mlx5_class_driver *driver;
645 uint32_t enabled_classes = 0;
649 TAILQ_FOREACH(driver, &drivers_list, next) {
650 if ((driver->drv_class & user_classes) == 0)
652 if (!mlx5_bus_match(driver, cdev->dev))
654 already_loaded = cdev->classes_loaded & driver->drv_class;
655 if (already_loaded && driver->probe_again == 0) {
656 DRV_LOG(ERR, "Device %s is already probed",
661 ret = driver->probe(cdev);
663 DRV_LOG(ERR, "Failed to load driver %s",
667 enabled_classes |= driver->drv_class;
670 cdev->classes_loaded |= enabled_classes;
675 * Need to remove only drivers which were not probed before this probe
676 * instance, but have already been probed before this failure.
678 enabled_classes &= ~cdev->classes_loaded;
679 drivers_remove(cdev, enabled_classes);
684 mlx5_common_dev_probe(struct rte_device *eal_dev)
686 struct mlx5_common_device *cdev;
687 uint32_t classes = 0;
688 bool new_device = false;
691 DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name);
692 ret = parse_class_options(eal_dev->devargs);
694 DRV_LOG(ERR, "Unsupported mlx5 class type: %s",
695 eal_dev->devargs->args);
700 /* Default to net class. */
701 classes = MLX5_CLASS_ETH;
702 cdev = to_mlx5_device(eal_dev);
704 cdev = mlx5_common_dev_create(eal_dev, classes);
710 * Validate combination here.
711 * For new device, the classes_loaded field is 0 and it check only
712 * the classes given as user device arguments.
714 ret = is_valid_class_combination(classes | cdev->classes_loaded);
716 DRV_LOG(ERR, "Unsupported mlx5 classes combination.");
719 ret = drivers_probe(cdev, classes);
725 mlx5_common_dev_release(cdev);
730 mlx5_common_dev_remove(struct rte_device *eal_dev)
732 struct mlx5_common_device *cdev;
735 cdev = to_mlx5_device(eal_dev);
738 /* Matching device found, cleanup and unload drivers. */
739 ret = drivers_remove(cdev, cdev->classes_loaded);
741 mlx5_common_dev_release(cdev);
746 * Callback to DMA map external memory to a device.
749 * Pointer to the generic device.
751 * Starting virtual address of memory to be mapped.
753 * Starting IOVA address of memory to be mapped.
755 * Length of memory segment being mapped.
758 * 0 on success, negative value on error.
761 mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
762 uint64_t iova __rte_unused, size_t len)
764 struct mlx5_common_device *dev;
767 dev = to_mlx5_device(rte_dev);
770 "Unable to find matching mlx5 device to device %s",
775 mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len,
776 SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb);
778 DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name);
782 rte_rwlock_write_lock(&dev->mr_scache.rwlock);
783 LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
784 /* Insert to the global cache table. */
785 mlx5_mr_insert_cache(&dev->mr_scache, mr);
786 rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
791 * Callback to DMA unmap external memory to a device.
794 * Pointer to the generic device.
796 * Starting virtual address of memory to be unmapped.
798 * Starting IOVA address of memory to be unmapped.
800 * Length of memory segment being unmapped.
803 * 0 on success, negative value on error.
806 mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr,
807 uint64_t iova __rte_unused, size_t len __rte_unused)
809 struct mlx5_common_device *dev;
810 struct mr_cache_entry entry;
813 dev = to_mlx5_device(rte_dev);
816 "Unable to find matching mlx5 device to device %s.",
821 rte_rwlock_read_lock(&dev->mr_scache.rwlock);
822 mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr);
824 rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
826 "Address 0x%" PRIxPTR " wasn't registered to device %s",
827 (uintptr_t)addr, rte_dev->name);
832 DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr);
833 mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
834 mlx5_mr_rebuild_cache(&dev->mr_scache);
836 * No explicit wmb is needed after updating dev_gen due to
837 * store-release ordering in unlock that provides the
838 * implicit barrier at the software visible level.
840 ++dev->mr_scache.dev_gen;
841 DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.",
842 dev->mr_scache.dev_gen);
843 rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
848 mlx5_class_driver_register(struct mlx5_class_driver *driver)
850 mlx5_common_driver_on_register_pci(driver);
851 TAILQ_INSERT_TAIL(&drivers_list, driver, next);
854 static void mlx5_common_driver_init(void)
856 mlx5_common_pci_init();
857 #ifdef RTE_EXEC_ENV_LINUX
858 mlx5_common_auxiliary_init();
862 static bool mlx5_common_initialized;
865 * One time initialization routine for run-time dependency on glue library
866 * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
867 * must invoke in its constructor.
870 mlx5_common_init(void)
872 if (mlx5_common_initialized)
875 pthread_mutex_init(&devices_list_lock, NULL);
876 mlx5_glue_constructor();
877 mlx5_common_driver_init();
878 mlx5_common_initialized = true;
882 * This function is responsible of initializing the variable
883 * haswell_broadwell_cpu by checking if the cpu is intel
884 * and reading the data returned from mlx5_cpu_id().
885 * since haswell and broadwell cpus don't have improved performance
886 * when using relaxed ordering we want to check the cpu type before
887 * before deciding whether to enable RO or not.
888 * if the cpu is haswell or broadwell the variable will be set to 1
889 * otherwise it will be 0.
891 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
893 #ifdef RTE_ARCH_X86_64
894 unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
895 unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
896 unsigned int i, model, family, brand_id, vendor;
897 unsigned int signature_intel_ebx = 0x756e6547;
898 unsigned int extended_model;
899 unsigned int eax = 0;
900 unsigned int ebx = 0;
901 unsigned int ecx = 0;
902 unsigned int edx = 0;
905 mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
909 haswell_broadwell_cpu = 0;
912 mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
913 model = (eax >> 4) & 0x0f;
914 family = (eax >> 8) & 0x0f;
915 brand_id = ebx & 0xff;
916 extended_model = (eax >> 12) & 0xf0;
917 /* Check if the processor is Haswell or Broadwell */
918 if (vendor == signature_intel_ebx) {
920 model += extended_model;
921 if (brand_id == 0 && family == 0x6) {
922 for (i = 0; i < RTE_DIM(broadwell_models); i++)
923 if (model == broadwell_models[i]) {
924 haswell_broadwell_cpu = 1;
927 for (i = 0; i < RTE_DIM(haswell_models); i++)
928 if (model == haswell_models[i]) {
929 haswell_broadwell_cpu = 1;
935 haswell_broadwell_cpu = 0;
939 * Allocate the User Access Region with DevX on specified device.
940 * This routine handles the following UAR allocation issues:
942 * - Try to allocate the UAR with the most appropriate memory mapping
943 * type from the ones supported by the host.
945 * - Try to allocate the UAR with non-NULL base address OFED 5.0.x and
946 * Upstream rdma_core before v29 returned the NULL as UAR base address
947 * if UAR was not the first object in the UAR page.
948 * It caused the PMD failure and we should try to get another UAR till
949 * we get the first one with non-NULL base address returned.
952 * Pointer to mlx5 device structure to perform allocation on its context.
955 * UAR object pointer on success, NULL otherwise and rte_errno is set.
958 mlx5_devx_alloc_uar(struct mlx5_common_device *cdev)
961 uint32_t retry, uar_mapping;
964 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
965 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
966 /* Control the mapping type according to the settings. */
967 uar_mapping = (cdev->config.dbnc == MLX5_TXDB_NCACHED) ?
968 MLX5DV_UAR_ALLOC_TYPE_NC : MLX5DV_UAR_ALLOC_TYPE_BF;
971 * It seems we have no way to control the memory mapping type
972 * for the UAR, the default "Write-Combining" type is supposed.
976 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
977 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
978 if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
980 * In some environments like virtual machine the
981 * Write Combining mapped might be not supported and
982 * UAR allocation fails. We tried "Non-Cached" mapping
985 DRV_LOG(DEBUG, "Failed to allocate DevX UAR (BF)");
986 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
987 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
988 } else if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
990 * If Verbs/kernel does not support "Non-Cached"
991 * try the "Write-Combining".
993 DRV_LOG(DEBUG, "Failed to allocate DevX UAR (NC)");
994 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
995 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
999 DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
1003 base_addr = mlx5_os_get_devx_uar_base_addr(uar);
1007 * The UARs are allocated by rdma_core within the
1008 * IB device context, on context closure all UARs
1009 * will be freed, should be no memory/object leakage.
1011 DRV_LOG(DEBUG, "Retrying to allocate DevX UAR");
1014 /* Check whether we finally succeeded with valid UAR allocation. */
1016 DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
1020 * Return void * instead of struct mlx5dv_devx_uar *
1021 * is for compatibility with older rdma-core library headers.
1028 mlx5_devx_uar_release(struct mlx5_uar *uar)
1030 if (uar->obj != NULL)
1031 mlx5_glue->devx_free_uar(uar->obj);
1032 memset(uar, 0, sizeof(*uar));
1036 mlx5_devx_uar_prepare(struct mlx5_common_device *cdev, struct mlx5_uar *uar)
1038 off_t uar_mmap_offset;
1039 const size_t page_size = rte_mem_page_size();
1043 if (page_size == (size_t)-1) {
1044 DRV_LOG(ERR, "Failed to get mem page size");
1048 uar_obj = mlx5_devx_alloc_uar(cdev);
1049 if (uar_obj == NULL || mlx5_os_get_devx_uar_reg_addr(uar_obj) == NULL) {
1051 DRV_LOG(ERR, "Failed to allocate UAR.");
1055 uar_mmap_offset = mlx5_os_get_devx_uar_mmap_offset(uar_obj);
1056 base_addr = mlx5_os_get_devx_uar_base_addr(uar_obj);
1057 uar->dbnc = mlx5_db_map_type_get(uar_mmap_offset, page_size);
1058 uar->bf_db.db = mlx5_os_get_devx_uar_reg_addr(uar_obj);
1059 uar->cq_db.db = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
1061 rte_spinlock_init(&uar->bf_sl);
1062 rte_spinlock_init(&uar->cq_sl);
1063 uar->bf_db.sl_p = &uar->bf_sl;
1064 uar->cq_db.sl_p = &uar->cq_sl;
1065 #endif /* RTE_ARCH_64 */
1069 RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);