1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
10 #include <rte_mempool.h>
11 #include <rte_class.h>
12 #include <rte_malloc.h>
13 #include <rte_eal_paging.h>
15 #include "mlx5_common.h"
16 #include "mlx5_common_os.h"
17 #include "mlx5_common_mp.h"
18 #include "mlx5_common_log.h"
19 #include "mlx5_common_defs.h"
20 #include "mlx5_common_private.h"
22 uint8_t haswell_broadwell_cpu;
24 /* In case this is an x86_64 intel processor to check if
25 * we should use relaxed ordering.
27 #ifdef RTE_ARCH_X86_64
29 * This function returns processor identification and feature information
32 * @param eax, ebx, ecx, edx
33 * Pointers to the registers that will hold cpu information.
35 * The main category of information returned.
37 static inline void mlx5_cpu_id(unsigned int level,
38 unsigned int *eax, unsigned int *ebx,
39 unsigned int *ecx, unsigned int *edx)
42 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
47 RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE)
49 /* Head of list of drivers. */
50 static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list =
51 TAILQ_HEAD_INITIALIZER(drivers_list);
53 /* Head of devices. */
54 static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list =
55 TAILQ_HEAD_INITIALIZER(devices_list);
56 static pthread_mutex_t devices_list_lock;
60 unsigned int drv_class;
62 { .name = "vdpa", .drv_class = MLX5_CLASS_VDPA },
63 { .name = "eth", .drv_class = MLX5_CLASS_ETH },
64 /* Keep class "net" for backward compatibility. */
65 { .name = "net", .drv_class = MLX5_CLASS_ETH },
66 { .name = "regex", .drv_class = MLX5_CLASS_REGEX },
67 { .name = "compress", .drv_class = MLX5_CLASS_COMPRESS },
68 { .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO },
72 class_name_to_value(const char *class_name)
76 for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
77 if (strcmp(class_name, mlx5_classes[i].name) == 0)
78 return mlx5_classes[i].drv_class;
83 static struct mlx5_class_driver *
84 driver_get(uint32_t class)
86 struct mlx5_class_driver *driver;
88 TAILQ_FOREACH(driver, &drivers_list, next) {
89 if ((uint32_t)driver->drv_class == class)
96 * Verify and store value for devargs.
99 * Key argument to verify.
101 * Value associated with key.
106 * 0 on success, a negative errno value otherwise and rte_errno is set.
109 mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
111 struct mlx5_common_dev_config *config = opaque;
115 tmp = strtol(val, NULL, 0);
118 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
121 if (strcmp(key, "tx_db_nc") == 0) {
122 if (tmp != MLX5_TXDB_CACHED &&
123 tmp != MLX5_TXDB_NCACHED &&
124 tmp != MLX5_TXDB_HEURISTIC) {
125 DRV_LOG(ERR, "Invalid Tx doorbell mapping parameter.");
130 } else if (strcmp(key, "mr_ext_memseg_en") == 0) {
131 config->mr_ext_memseg_en = !!tmp;
132 } else if (strcmp(key, "mr_mempool_reg_en") == 0) {
133 config->mr_mempool_reg_en = !!tmp;
134 } else if (strcmp(key, "sys_mem_en") == 0) {
135 config->sys_mem_en = !!tmp;
141 * Parse common device parameters.
144 * Device arguments structure.
146 * Pointer to device configuration structure.
149 * 0 on success, a negative errno value otherwise and rte_errno is set.
152 mlx5_common_config_get(struct rte_devargs *devargs,
153 struct mlx5_common_dev_config *config)
155 struct rte_kvargs *kvlist;
159 config->mr_ext_memseg_en = 1;
160 config->mr_mempool_reg_en = 1;
161 config->sys_mem_en = 0;
162 config->dbnc = MLX5_ARG_UNSET;
165 kvlist = rte_kvargs_parse(devargs->args, NULL);
166 if (kvlist == NULL) {
170 ret = rte_kvargs_process(kvlist, NULL, mlx5_common_args_check_handler,
174 rte_kvargs_free(kvlist);
175 DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en);
176 DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en);
177 DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en);
178 DRV_LOG(DEBUG, "Tx doorbell mapping parameter is %d.", config->dbnc);
183 devargs_class_handler(__rte_unused const char *key,
184 const char *class_names, void *opaque)
193 scratch = strdup(class_names);
194 if (scratch == NULL) {
198 found = strtok_r(scratch, ":", &refstr);
203 /* Extract each individual class name. Multiple
204 * classes can be supplied as class=net:regex:foo:bar.
206 class_val = class_name_to_value(found);
207 /* Check if its a valid class. */
213 found = strtok_r(NULL, ":", &refstr);
214 } while (found != NULL);
218 DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names);
223 parse_class_options(const struct rte_devargs *devargs)
225 struct rte_kvargs *kvlist;
230 if (devargs->cls != NULL && devargs->cls->name != NULL)
231 /* Global syntax, only one class type. */
232 return class_name_to_value(devargs->cls->name);
233 /* Legacy devargs support multiple classes. */
234 kvlist = rte_kvargs_parse(devargs->args, NULL);
237 rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_CLASS,
238 devargs_class_handler, &ret);
239 rte_kvargs_free(kvlist);
243 static const unsigned int mlx5_class_invalid_combinations[] = {
244 MLX5_CLASS_ETH | MLX5_CLASS_VDPA,
245 /* New class combination should be added here. */
249 is_valid_class_combination(uint32_t user_classes)
253 /* Verify if user specified unsupported combination. */
254 for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) {
255 if ((mlx5_class_invalid_combinations[i] & user_classes) ==
256 mlx5_class_invalid_combinations[i])
259 /* Not found any invalid class combination. */
264 mlx5_bus_match(const struct mlx5_class_driver *drv,
265 const struct rte_device *dev)
267 if (mlx5_dev_is_pci(dev))
268 return mlx5_dev_pci_match(drv, dev);
272 static struct mlx5_common_device *
273 to_mlx5_device(const struct rte_device *rte_dev)
275 struct mlx5_common_device *cdev;
277 TAILQ_FOREACH(cdev, &devices_list, next) {
278 if (rte_dev == cdev->dev)
285 mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
287 struct rte_pci_addr pci_addr = { 0 };
290 if (mlx5_dev_is_pci(dev)) {
291 /* Input might be <BDF>, format PCI address to <DBDF>. */
292 ret = rte_pci_addr_parse(dev->name, &pci_addr);
295 rte_pci_device_name(&pci_addr, addr, size);
298 #ifdef RTE_EXEC_ENV_LINUX
299 return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev),
308 * Register the mempool for the protection domain.
311 * Pointer to the mlx5 common device.
313 * Mempool being registered.
316 * 0 on success, (-1) on failure and rte_errno is set.
319 mlx5_dev_mempool_register(struct mlx5_common_device *cdev,
320 struct rte_mempool *mp, bool is_extmem)
322 return mlx5_mr_mempool_register(cdev, mp, is_extmem);
326 * Unregister the mempool from the protection domain.
329 * Pointer to the mlx5 common device.
331 * Mempool being unregistered.
334 mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev,
335 struct rte_mempool *mp)
337 if (mlx5_mr_mempool_unregister(cdev, mp) < 0)
338 DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
339 mp->name, cdev->pd, rte_strerror(rte_errno));
343 * rte_mempool_walk() callback to register mempools for the protection domain.
346 * The mempool being walked.
348 * Pointer to the device shared context.
351 mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg)
353 struct mlx5_common_device *cdev = arg;
356 ret = mlx5_dev_mempool_register(cdev, mp, false);
357 if (ret < 0 && rte_errno != EEXIST)
359 "Failed to register existing mempool %s for PD %p: %s",
360 mp->name, cdev->pd, rte_strerror(rte_errno));
364 * rte_mempool_walk() callback to unregister mempools
365 * from the protection domain.
368 * The mempool being walked.
370 * Pointer to the device shared context.
373 mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg)
375 mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp);
379 * Mempool life cycle callback for mlx5 common devices.
382 * Mempool life cycle event.
384 * Associated mempool.
386 * Pointer to a device shared context.
389 mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp,
392 struct mlx5_common_device *cdev = arg;
395 case RTE_MEMPOOL_EVENT_READY:
396 if (mlx5_dev_mempool_register(cdev, mp, false) < 0)
398 "Failed to register new mempool %s for PD %p: %s",
399 mp->name, cdev->pd, rte_strerror(rte_errno));
401 case RTE_MEMPOOL_EVENT_DESTROY:
402 mlx5_dev_mempool_unregister(cdev, mp);
408 mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev)
412 if (!cdev->config.mr_mempool_reg_en)
414 rte_rwlock_write_lock(&cdev->mr_scache.mprwlock);
415 if (cdev->mr_scache.mp_cb_registered)
417 /* Callback for this device may be already registered. */
418 ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb,
420 if (ret != 0 && rte_errno != EEXIST)
422 /* Register mempools only once for this device. */
424 rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev);
426 cdev->mr_scache.mp_cb_registered = 1;
428 rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock);
433 mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev)
437 if (!cdev->mr_scache.mp_cb_registered ||
438 !cdev->config.mr_mempool_reg_en)
440 /* Stop watching for mempool events and unregister all mempools. */
441 ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb,
444 rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev);
448 * Callback for memory event.
458 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
459 size_t len, void *arg __rte_unused)
461 struct mlx5_common_device *cdev;
463 /* Must be called from the primary process. */
464 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
465 switch (event_type) {
466 case RTE_MEM_EVENT_FREE:
467 pthread_mutex_lock(&devices_list_lock);
468 /* Iterate all the existing mlx5 devices. */
469 TAILQ_FOREACH(cdev, &devices_list, next)
470 mlx5_free_mr_by_addr(&cdev->mr_scache,
471 mlx5_os_get_ctx_device_name
474 pthread_mutex_unlock(&devices_list_lock);
476 case RTE_MEM_EVENT_ALLOC:
483 * Uninitialize all HW global of device context.
486 * Pointer to mlx5 device structure.
489 * 0 on success, a negative errno value otherwise and rte_errno is set.
492 mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
494 if (cdev->pd != NULL) {
495 claim_zero(mlx5_os_dealloc_pd(cdev->pd));
498 if (cdev->ctx != NULL) {
499 claim_zero(mlx5_glue->close_device(cdev->ctx));
505 * Initialize all HW global of device context.
508 * Pointer to mlx5 device structure.
510 * Chosen classes come from user device arguments.
513 * 0 on success, a negative errno value otherwise and rte_errno is set.
516 mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
520 /* Create context device */
521 ret = mlx5_os_open_device(cdev, classes);
524 /* Allocate Protection Domain object and extract its pdn. */
525 ret = mlx5_os_pd_create(cdev);
528 /* All actions taken below are relevant only when DevX is supported */
529 if (cdev->config.devx == 0)
531 /* Query HCA attributes. */
532 ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
534 DRV_LOG(ERR, "Unable to read HCA capabilities.");
540 mlx5_dev_hw_global_release(cdev);
545 mlx5_common_dev_release(struct mlx5_common_device *cdev)
547 pthread_mutex_lock(&devices_list_lock);
548 TAILQ_REMOVE(&devices_list, cdev, next);
549 pthread_mutex_unlock(&devices_list_lock);
550 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
551 if (TAILQ_EMPTY(&devices_list))
552 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
554 mlx5_dev_mempool_unsubscribe(cdev);
555 mlx5_mr_release_cache(&cdev->mr_scache);
556 mlx5_dev_hw_global_release(cdev);
561 static struct mlx5_common_device *
562 mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
564 struct mlx5_common_device *cdev;
567 cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0);
569 DRV_LOG(ERR, "Device allocation failure.");
574 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
576 /* Parse device parameters. */
577 ret = mlx5_common_config_get(eal_dev->devargs, &cdev->config);
579 DRV_LOG(ERR, "Failed to process device arguments: %s",
580 strerror(rte_errno));
584 mlx5_malloc_mem_select(cdev->config.sys_mem_en);
585 /* Initialize all HW global of device context. */
586 ret = mlx5_dev_hw_global_prepare(cdev, classes);
588 DRV_LOG(ERR, "Failed to initialize device context.");
592 /* Initialize global MR cache resources and update its functions. */
593 ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
595 DRV_LOG(ERR, "Failed to initialize global MR share cache.");
596 mlx5_dev_hw_global_release(cdev);
600 /* Register callback function for global shared MR cache management. */
601 if (TAILQ_EMPTY(&devices_list))
602 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
603 mlx5_mr_mem_event_cb, NULL);
605 pthread_mutex_lock(&devices_list_lock);
606 TAILQ_INSERT_HEAD(&devices_list, cdev, next);
607 pthread_mutex_unlock(&devices_list_lock);
612 drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
614 struct mlx5_class_driver *driver;
615 int local_ret = -ENODEV;
619 enabled_classes &= cdev->classes_loaded;
620 while (enabled_classes) {
621 driver = driver_get(RTE_BIT64(i));
622 if (driver != NULL) {
623 local_ret = driver->remove(cdev);
625 cdev->classes_loaded &= ~RTE_BIT64(i);
629 enabled_classes &= ~RTE_BIT64(i);
632 if (local_ret != 0 && ret == 0)
638 drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
640 struct mlx5_class_driver *driver;
641 uint32_t enabled_classes = 0;
645 TAILQ_FOREACH(driver, &drivers_list, next) {
646 if ((driver->drv_class & user_classes) == 0)
648 if (!mlx5_bus_match(driver, cdev->dev))
650 already_loaded = cdev->classes_loaded & driver->drv_class;
651 if (already_loaded && driver->probe_again == 0) {
652 DRV_LOG(ERR, "Device %s is already probed",
657 ret = driver->probe(cdev);
659 DRV_LOG(ERR, "Failed to load driver %s",
663 enabled_classes |= driver->drv_class;
665 cdev->classes_loaded |= enabled_classes;
668 /* Only unload drivers which are enabled which were enabled
669 * in this probe instance.
671 drivers_remove(cdev, enabled_classes);
676 mlx5_common_dev_probe(struct rte_device *eal_dev)
678 struct mlx5_common_device *cdev;
679 uint32_t classes = 0;
680 bool new_device = false;
683 DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name);
684 ret = parse_class_options(eal_dev->devargs);
686 DRV_LOG(ERR, "Unsupported mlx5 class type: %s",
687 eal_dev->devargs->args);
692 /* Default to net class. */
693 classes = MLX5_CLASS_ETH;
694 cdev = to_mlx5_device(eal_dev);
696 cdev = mlx5_common_dev_create(eal_dev, classes);
702 * Validate combination here.
703 * For new device, the classes_loaded field is 0 and it check only
704 * the classes given as user device arguments.
706 ret = is_valid_class_combination(classes | cdev->classes_loaded);
708 DRV_LOG(ERR, "Unsupported mlx5 classes combination.");
711 ret = drivers_probe(cdev, classes);
717 mlx5_common_dev_release(cdev);
722 mlx5_common_dev_remove(struct rte_device *eal_dev)
724 struct mlx5_common_device *cdev;
727 cdev = to_mlx5_device(eal_dev);
730 /* Matching device found, cleanup and unload drivers. */
731 ret = drivers_remove(cdev, cdev->classes_loaded);
733 mlx5_common_dev_release(cdev);
738 * Callback to DMA map external memory to a device.
741 * Pointer to the generic device.
743 * Starting virtual address of memory to be mapped.
745 * Starting IOVA address of memory to be mapped.
747 * Length of memory segment being mapped.
750 * 0 on success, negative value on error.
753 mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
754 uint64_t iova __rte_unused, size_t len)
756 struct mlx5_common_device *dev;
759 dev = to_mlx5_device(rte_dev);
762 "Unable to find matching mlx5 device to device %s",
767 mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len,
768 SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb);
770 DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name);
774 rte_rwlock_write_lock(&dev->mr_scache.rwlock);
775 LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
776 /* Insert to the global cache table. */
777 mlx5_mr_insert_cache(&dev->mr_scache, mr);
778 rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
783 * Callback to DMA unmap external memory to a device.
786 * Pointer to the generic device.
788 * Starting virtual address of memory to be unmapped.
790 * Starting IOVA address of memory to be unmapped.
792 * Length of memory segment being unmapped.
795 * 0 on success, negative value on error.
798 mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr,
799 uint64_t iova __rte_unused, size_t len __rte_unused)
801 struct mlx5_common_device *dev;
802 struct mr_cache_entry entry;
805 dev = to_mlx5_device(rte_dev);
808 "Unable to find matching mlx5 device to device %s.",
813 rte_rwlock_read_lock(&dev->mr_scache.rwlock);
814 mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr);
816 rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
818 "Address 0x%" PRIxPTR " wasn't registered to device %s",
819 (uintptr_t)addr, rte_dev->name);
824 DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr);
825 mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
826 mlx5_mr_rebuild_cache(&dev->mr_scache);
828 * No explicit wmb is needed after updating dev_gen due to
829 * store-release ordering in unlock that provides the
830 * implicit barrier at the software visible level.
832 ++dev->mr_scache.dev_gen;
833 DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.",
834 dev->mr_scache.dev_gen);
835 rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
840 mlx5_class_driver_register(struct mlx5_class_driver *driver)
842 mlx5_common_driver_on_register_pci(driver);
843 TAILQ_INSERT_TAIL(&drivers_list, driver, next);
846 static void mlx5_common_driver_init(void)
848 mlx5_common_pci_init();
849 #ifdef RTE_EXEC_ENV_LINUX
850 mlx5_common_auxiliary_init();
854 static bool mlx5_common_initialized;
857 * One time initialization routine for run-time dependency on glue library
858 * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
859 * must invoke in its constructor.
862 mlx5_common_init(void)
864 if (mlx5_common_initialized)
867 pthread_mutex_init(&devices_list_lock, NULL);
868 mlx5_glue_constructor();
869 mlx5_common_driver_init();
870 mlx5_common_initialized = true;
874 * This function is responsible of initializing the variable
875 * haswell_broadwell_cpu by checking if the cpu is intel
876 * and reading the data returned from mlx5_cpu_id().
877 * since haswell and broadwell cpus don't have improved performance
878 * when using relaxed ordering we want to check the cpu type before
879 * before deciding whether to enable RO or not.
880 * if the cpu is haswell or broadwell the variable will be set to 1
881 * otherwise it will be 0.
883 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
885 #ifdef RTE_ARCH_X86_64
886 unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
887 unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
888 unsigned int i, model, family, brand_id, vendor;
889 unsigned int signature_intel_ebx = 0x756e6547;
890 unsigned int extended_model;
891 unsigned int eax = 0;
892 unsigned int ebx = 0;
893 unsigned int ecx = 0;
894 unsigned int edx = 0;
897 mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
901 haswell_broadwell_cpu = 0;
904 mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
905 model = (eax >> 4) & 0x0f;
906 family = (eax >> 8) & 0x0f;
907 brand_id = ebx & 0xff;
908 extended_model = (eax >> 12) & 0xf0;
909 /* Check if the processor is Haswell or Broadwell */
910 if (vendor == signature_intel_ebx) {
912 model += extended_model;
913 if (brand_id == 0 && family == 0x6) {
914 for (i = 0; i < RTE_DIM(broadwell_models); i++)
915 if (model == broadwell_models[i]) {
916 haswell_broadwell_cpu = 1;
919 for (i = 0; i < RTE_DIM(haswell_models); i++)
920 if (model == haswell_models[i]) {
921 haswell_broadwell_cpu = 1;
927 haswell_broadwell_cpu = 0;
931 * Allocate the User Access Region with DevX on specified device.
932 * This routine handles the following UAR allocation issues:
934 * - Try to allocate the UAR with the most appropriate memory mapping
935 * type from the ones supported by the host.
937 * - Try to allocate the UAR with non-NULL base address OFED 5.0.x and
938 * Upstream rdma_core before v29 returned the NULL as UAR base address
939 * if UAR was not the first object in the UAR page.
940 * It caused the PMD failure and we should try to get another UAR till
941 * we get the first one with non-NULL base address returned.
944 * Pointer to mlx5 device structure to perform allocation on its context.
947 * UAR object pointer on success, NULL otherwise and rte_errno is set.
950 mlx5_devx_alloc_uar(struct mlx5_common_device *cdev)
953 uint32_t retry, uar_mapping;
956 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
957 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
958 /* Control the mapping type according to the settings. */
959 uar_mapping = (cdev->config.dbnc == MLX5_TXDB_NCACHED) ?
960 MLX5DV_UAR_ALLOC_TYPE_NC : MLX5DV_UAR_ALLOC_TYPE_BF;
963 * It seems we have no way to control the memory mapping type
964 * for the UAR, the default "Write-Combining" type is supposed.
968 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
969 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
970 if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
972 * In some environments like virtual machine the
973 * Write Combining mapped might be not supported and
974 * UAR allocation fails. We tried "Non-Cached" mapping
977 DRV_LOG(DEBUG, "Failed to allocate DevX UAR (BF)");
978 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
979 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
980 } else if (!uar && uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
982 * If Verbs/kernel does not support "Non-Cached"
983 * try the "Write-Combining".
985 DRV_LOG(DEBUG, "Failed to allocate DevX UAR (NC)");
986 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
987 uar = mlx5_glue->devx_alloc_uar(cdev->ctx, uar_mapping);
991 DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
995 base_addr = mlx5_os_get_devx_uar_base_addr(uar);
999 * The UARs are allocated by rdma_core within the
1000 * IB device context, on context closure all UARs
1001 * will be freed, should be no memory/object leakage.
1003 DRV_LOG(DEBUG, "Retrying to allocate DevX UAR");
1006 /* Check whether we finally succeeded with valid UAR allocation. */
1008 DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
1012 * Return void * instead of struct mlx5dv_devx_uar *
1013 * is for compatibility with older rdma-core library headers.
1020 mlx5_devx_uar_release(struct mlx5_uar *uar)
1022 if (uar->obj != NULL)
1023 mlx5_glue->devx_free_uar(uar->obj);
1024 memset(uar, 0, sizeof(*uar));
1028 mlx5_devx_uar_prepare(struct mlx5_common_device *cdev, struct mlx5_uar *uar)
1030 off_t uar_mmap_offset;
1031 const size_t page_size = rte_mem_page_size();
1035 if (page_size == (size_t)-1) {
1036 DRV_LOG(ERR, "Failed to get mem page size");
1040 uar_obj = mlx5_devx_alloc_uar(cdev);
1041 if (uar_obj == NULL || mlx5_os_get_devx_uar_reg_addr(uar_obj) == NULL) {
1043 DRV_LOG(ERR, "Failed to allocate UAR.");
1047 uar_mmap_offset = mlx5_os_get_devx_uar_mmap_offset(uar_obj);
1048 base_addr = mlx5_os_get_devx_uar_base_addr(uar_obj);
1049 uar->dbnc = mlx5_db_map_type_get(uar_mmap_offset, page_size);
1050 uar->bf_db.db = mlx5_os_get_devx_uar_reg_addr(uar_obj);
1051 uar->cq_db.db = RTE_PTR_ADD(base_addr, MLX5_CQ_DOORBELL);
1053 rte_spinlock_init(&uar->bf_sl);
1054 rte_spinlock_init(&uar->cq_sl);
1055 uar->bf_db.sl_p = &uar->bf_sl;
1056 uar->cq_db.sl_p = &uar->cq_sl;
1057 #endif /* RTE_ARCH_64 */
1061 RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);