1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
10 #include <rte_mempool.h>
11 #include <rte_class.h>
12 #include <rte_malloc.h>
14 #include "mlx5_common.h"
15 #include "mlx5_common_os.h"
16 #include "mlx5_common_log.h"
17 #include "mlx5_common_defs.h"
18 #include "mlx5_common_private.h"
20 uint8_t haswell_broadwell_cpu;
22 /* In case this is an x86_64 intel processor to check if
23 * we should use relaxed ordering.
25 #ifdef RTE_ARCH_X86_64
27 * This function returns processor identification and feature information
30 * @param eax, ebx, ecx, edx
31 * Pointers to the registers that will hold cpu information.
33 * The main category of information returned.
35 static inline void mlx5_cpu_id(unsigned int level,
36 unsigned int *eax, unsigned int *ebx,
37 unsigned int *ecx, unsigned int *edx)
40 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
45 RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE)
47 /* Head of list of drivers. */
48 static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list =
49 TAILQ_HEAD_INITIALIZER(drivers_list);
51 /* Head of devices. */
52 static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list =
53 TAILQ_HEAD_INITIALIZER(devices_list);
54 static pthread_mutex_t devices_list_lock;
58 unsigned int drv_class;
60 { .name = "vdpa", .drv_class = MLX5_CLASS_VDPA },
61 { .name = "eth", .drv_class = MLX5_CLASS_ETH },
62 /* Keep class "net" for backward compatibility. */
63 { .name = "net", .drv_class = MLX5_CLASS_ETH },
64 { .name = "regex", .drv_class = MLX5_CLASS_REGEX },
65 { .name = "compress", .drv_class = MLX5_CLASS_COMPRESS },
66 { .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO },
70 class_name_to_value(const char *class_name)
74 for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
75 if (strcmp(class_name, mlx5_classes[i].name) == 0)
76 return mlx5_classes[i].drv_class;
81 static struct mlx5_class_driver *
82 driver_get(uint32_t class)
84 struct mlx5_class_driver *driver;
86 TAILQ_FOREACH(driver, &drivers_list, next) {
87 if ((uint32_t)driver->drv_class == class)
94 * Verify and store value for devargs.
97 * Key argument to verify.
99 * Value associated with key.
104 * 0 on success, a negative errno value otherwise and rte_errno is set.
107 mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
109 struct mlx5_common_dev_config *config = opaque;
113 tmp = strtol(val, NULL, 0);
116 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
119 if (strcmp(key, "tx_db_nc") == 0) {
120 if (tmp != MLX5_TXDB_CACHED &&
121 tmp != MLX5_TXDB_NCACHED &&
122 tmp != MLX5_TXDB_HEURISTIC) {
123 DRV_LOG(ERR, "Invalid Tx doorbell mapping parameter.");
128 } else if (strcmp(key, "mr_ext_memseg_en") == 0) {
129 config->mr_ext_memseg_en = !!tmp;
130 } else if (strcmp(key, "mr_mempool_reg_en") == 0) {
131 config->mr_mempool_reg_en = !!tmp;
132 } else if (strcmp(key, "sys_mem_en") == 0) {
133 config->sys_mem_en = !!tmp;
139 * Parse common device parameters.
142 * Device arguments structure.
144 * Pointer to device configuration structure.
147 * 0 on success, a negative errno value otherwise and rte_errno is set.
150 mlx5_common_config_get(struct rte_devargs *devargs,
151 struct mlx5_common_dev_config *config)
153 struct rte_kvargs *kvlist;
157 config->mr_ext_memseg_en = 1;
158 config->mr_mempool_reg_en = 1;
159 config->sys_mem_en = 0;
160 config->dbnc = MLX5_ARG_UNSET;
163 kvlist = rte_kvargs_parse(devargs->args, NULL);
164 if (kvlist == NULL) {
168 ret = rte_kvargs_process(kvlist, NULL, mlx5_common_args_check_handler,
172 rte_kvargs_free(kvlist);
173 DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en);
174 DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en);
175 DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en);
176 DRV_LOG(DEBUG, "Tx doorbell mapping parameter is %d.", config->dbnc);
181 devargs_class_handler(__rte_unused const char *key,
182 const char *class_names, void *opaque)
191 scratch = strdup(class_names);
192 if (scratch == NULL) {
196 found = strtok_r(scratch, ":", &refstr);
201 /* Extract each individual class name. Multiple
202 * classes can be supplied as class=net:regex:foo:bar.
204 class_val = class_name_to_value(found);
205 /* Check if its a valid class. */
211 found = strtok_r(NULL, ":", &refstr);
212 } while (found != NULL);
216 DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names);
221 parse_class_options(const struct rte_devargs *devargs)
223 struct rte_kvargs *kvlist;
228 if (devargs->cls != NULL && devargs->cls->name != NULL)
229 /* Global syntax, only one class type. */
230 return class_name_to_value(devargs->cls->name);
231 /* Legacy devargs support multiple classes. */
232 kvlist = rte_kvargs_parse(devargs->args, NULL);
235 rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_CLASS,
236 devargs_class_handler, &ret);
237 rte_kvargs_free(kvlist);
241 static const unsigned int mlx5_class_invalid_combinations[] = {
242 MLX5_CLASS_ETH | MLX5_CLASS_VDPA,
243 /* New class combination should be added here. */
247 is_valid_class_combination(uint32_t user_classes)
251 /* Verify if user specified unsupported combination. */
252 for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) {
253 if ((mlx5_class_invalid_combinations[i] & user_classes) ==
254 mlx5_class_invalid_combinations[i])
257 /* Not found any invalid class combination. */
262 mlx5_bus_match(const struct mlx5_class_driver *drv,
263 const struct rte_device *dev)
265 if (mlx5_dev_is_pci(dev))
266 return mlx5_dev_pci_match(drv, dev);
270 static struct mlx5_common_device *
271 to_mlx5_device(const struct rte_device *rte_dev)
273 struct mlx5_common_device *cdev;
275 TAILQ_FOREACH(cdev, &devices_list, next) {
276 if (rte_dev == cdev->dev)
283 mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
285 struct rte_pci_addr pci_addr = { 0 };
288 if (mlx5_dev_is_pci(dev)) {
289 /* Input might be <BDF>, format PCI address to <DBDF>. */
290 ret = rte_pci_addr_parse(dev->name, &pci_addr);
293 rte_pci_device_name(&pci_addr, addr, size);
296 #ifdef RTE_EXEC_ENV_LINUX
297 return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev),
306 * Callback for memory event.
316 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
317 size_t len, void *arg __rte_unused)
319 struct mlx5_common_device *cdev;
321 /* Must be called from the primary process. */
322 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
323 switch (event_type) {
324 case RTE_MEM_EVENT_FREE:
325 pthread_mutex_lock(&devices_list_lock);
326 /* Iterate all the existing mlx5 devices. */
327 TAILQ_FOREACH(cdev, &devices_list, next)
328 mlx5_free_mr_by_addr(&cdev->mr_scache,
329 mlx5_os_get_ctx_device_name
332 pthread_mutex_unlock(&devices_list_lock);
334 case RTE_MEM_EVENT_ALLOC:
341 * Uninitialize all HW global of device context.
344 * Pointer to mlx5 device structure.
347 * 0 on success, a negative errno value otherwise and rte_errno is set.
350 mlx5_dev_hw_global_release(struct mlx5_common_device *cdev)
352 if (cdev->pd != NULL) {
353 claim_zero(mlx5_os_dealloc_pd(cdev->pd));
356 if (cdev->ctx != NULL) {
357 claim_zero(mlx5_glue->close_device(cdev->ctx));
363 * Initialize all HW global of device context.
366 * Pointer to mlx5 device structure.
368 * Chosen classes come from user device arguments.
371 * 0 on success, a negative errno value otherwise and rte_errno is set.
374 mlx5_dev_hw_global_prepare(struct mlx5_common_device *cdev, uint32_t classes)
378 /* Create context device */
379 ret = mlx5_os_open_device(cdev, classes);
382 /* Allocate Protection Domain object and extract its pdn. */
383 ret = mlx5_os_pd_create(cdev);
386 /* All actions taken below are relevant only when DevX is supported */
387 if (cdev->config.devx == 0)
389 /* Query HCA attributes. */
390 ret = mlx5_devx_cmd_query_hca_attr(cdev->ctx, &cdev->config.hca_attr);
392 DRV_LOG(ERR, "Unable to read HCA capabilities.");
398 mlx5_dev_hw_global_release(cdev);
403 mlx5_common_dev_release(struct mlx5_common_device *cdev)
405 pthread_mutex_lock(&devices_list_lock);
406 TAILQ_REMOVE(&devices_list, cdev, next);
407 pthread_mutex_unlock(&devices_list_lock);
408 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
409 if (TAILQ_EMPTY(&devices_list))
410 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
412 mlx5_mr_release_cache(&cdev->mr_scache);
413 mlx5_dev_hw_global_release(cdev);
418 static struct mlx5_common_device *
419 mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes)
421 struct mlx5_common_device *cdev;
424 cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0);
426 DRV_LOG(ERR, "Device allocation failure.");
431 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
433 /* Parse device parameters. */
434 ret = mlx5_common_config_get(eal_dev->devargs, &cdev->config);
436 DRV_LOG(ERR, "Failed to process device arguments: %s",
437 strerror(rte_errno));
441 mlx5_malloc_mem_select(cdev->config.sys_mem_en);
442 /* Initialize all HW global of device context. */
443 ret = mlx5_dev_hw_global_prepare(cdev, classes);
445 DRV_LOG(ERR, "Failed to initialize device context.");
449 /* Initialize global MR cache resources and update its functions. */
450 ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
452 DRV_LOG(ERR, "Failed to initialize global MR share cache.");
453 mlx5_dev_hw_global_release(cdev);
457 /* Register callback function for global shared MR cache management. */
458 if (TAILQ_EMPTY(&devices_list))
459 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
460 mlx5_mr_mem_event_cb, NULL);
462 pthread_mutex_lock(&devices_list_lock);
463 TAILQ_INSERT_HEAD(&devices_list, cdev, next);
464 pthread_mutex_unlock(&devices_list_lock);
469 drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
471 struct mlx5_class_driver *driver;
472 int local_ret = -ENODEV;
476 enabled_classes &= cdev->classes_loaded;
477 while (enabled_classes) {
478 driver = driver_get(RTE_BIT64(i));
479 if (driver != NULL) {
480 local_ret = driver->remove(cdev);
482 cdev->classes_loaded &= ~RTE_BIT64(i);
486 enabled_classes &= ~RTE_BIT64(i);
489 if (local_ret != 0 && ret == 0)
495 drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
497 struct mlx5_class_driver *driver;
498 uint32_t enabled_classes = 0;
502 TAILQ_FOREACH(driver, &drivers_list, next) {
503 if ((driver->drv_class & user_classes) == 0)
505 if (!mlx5_bus_match(driver, cdev->dev))
507 already_loaded = cdev->classes_loaded & driver->drv_class;
508 if (already_loaded && driver->probe_again == 0) {
509 DRV_LOG(ERR, "Device %s is already probed",
514 ret = driver->probe(cdev);
516 DRV_LOG(ERR, "Failed to load driver %s",
520 enabled_classes |= driver->drv_class;
522 cdev->classes_loaded |= enabled_classes;
525 /* Only unload drivers which are enabled which were enabled
526 * in this probe instance.
528 drivers_remove(cdev, enabled_classes);
533 mlx5_common_dev_probe(struct rte_device *eal_dev)
535 struct mlx5_common_device *cdev;
536 uint32_t classes = 0;
537 bool new_device = false;
540 DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name);
541 ret = parse_class_options(eal_dev->devargs);
543 DRV_LOG(ERR, "Unsupported mlx5 class type: %s",
544 eal_dev->devargs->args);
549 /* Default to net class. */
550 classes = MLX5_CLASS_ETH;
551 cdev = to_mlx5_device(eal_dev);
553 cdev = mlx5_common_dev_create(eal_dev, classes);
559 * Validate combination here.
560 * For new device, the classes_loaded field is 0 and it check only
561 * the classes given as user device arguments.
563 ret = is_valid_class_combination(classes | cdev->classes_loaded);
565 DRV_LOG(ERR, "Unsupported mlx5 classes combination.");
568 ret = drivers_probe(cdev, classes);
574 mlx5_common_dev_release(cdev);
579 mlx5_common_dev_remove(struct rte_device *eal_dev)
581 struct mlx5_common_device *cdev;
584 cdev = to_mlx5_device(eal_dev);
587 /* Matching device found, cleanup and unload drivers. */
588 ret = drivers_remove(cdev, cdev->classes_loaded);
590 mlx5_common_dev_release(cdev);
595 * Callback to DMA map external memory to a device.
598 * Pointer to the generic device.
600 * Starting virtual address of memory to be mapped.
602 * Starting IOVA address of memory to be mapped.
604 * Length of memory segment being mapped.
607 * 0 on success, negative value on error.
610 mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
611 uint64_t iova __rte_unused, size_t len)
613 struct mlx5_common_device *dev;
616 dev = to_mlx5_device(rte_dev);
619 "Unable to find matching mlx5 device to device %s",
624 mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len,
625 SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb);
627 DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name);
631 rte_rwlock_write_lock(&dev->mr_scache.rwlock);
632 LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
633 /* Insert to the global cache table. */
634 mlx5_mr_insert_cache(&dev->mr_scache, mr);
635 rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
640 * Callback to DMA unmap external memory to a device.
643 * Pointer to the generic device.
645 * Starting virtual address of memory to be unmapped.
647 * Starting IOVA address of memory to be unmapped.
649 * Length of memory segment being unmapped.
652 * 0 on success, negative value on error.
655 mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr,
656 uint64_t iova __rte_unused, size_t len __rte_unused)
658 struct mlx5_common_device *dev;
659 struct mr_cache_entry entry;
662 dev = to_mlx5_device(rte_dev);
665 "Unable to find matching mlx5 device to device %s.",
670 rte_rwlock_read_lock(&dev->mr_scache.rwlock);
671 mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr);
673 rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
675 "Address 0x%" PRIxPTR " wasn't registered to device %s",
676 (uintptr_t)addr, rte_dev->name);
681 DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr);
682 mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
683 mlx5_mr_rebuild_cache(&dev->mr_scache);
685 * No explicit wmb is needed after updating dev_gen due to
686 * store-release ordering in unlock that provides the
687 * implicit barrier at the software visible level.
689 ++dev->mr_scache.dev_gen;
690 DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.",
691 dev->mr_scache.dev_gen);
692 rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
697 mlx5_class_driver_register(struct mlx5_class_driver *driver)
699 mlx5_common_driver_on_register_pci(driver);
700 TAILQ_INSERT_TAIL(&drivers_list, driver, next);
703 static void mlx5_common_driver_init(void)
705 mlx5_common_pci_init();
706 #ifdef RTE_EXEC_ENV_LINUX
707 mlx5_common_auxiliary_init();
711 static bool mlx5_common_initialized;
714 * One time innitialization routine for run-time dependency on glue library
715 * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
716 * must invoke in its constructor.
719 mlx5_common_init(void)
721 if (mlx5_common_initialized)
724 pthread_mutex_init(&devices_list_lock, NULL);
725 mlx5_glue_constructor();
726 mlx5_common_driver_init();
727 mlx5_common_initialized = true;
731 * This function is responsible of initializing the variable
732 * haswell_broadwell_cpu by checking if the cpu is intel
733 * and reading the data returned from mlx5_cpu_id().
734 * since haswell and broadwell cpus don't have improved performance
735 * when using relaxed ordering we want to check the cpu type before
736 * before deciding whether to enable RO or not.
737 * if the cpu is haswell or broadwell the variable will be set to 1
738 * otherwise it will be 0.
740 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
742 #ifdef RTE_ARCH_X86_64
743 unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
744 unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
745 unsigned int i, model, family, brand_id, vendor;
746 unsigned int signature_intel_ebx = 0x756e6547;
747 unsigned int extended_model;
748 unsigned int eax = 0;
749 unsigned int ebx = 0;
750 unsigned int ecx = 0;
751 unsigned int edx = 0;
754 mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
758 haswell_broadwell_cpu = 0;
761 mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
762 model = (eax >> 4) & 0x0f;
763 family = (eax >> 8) & 0x0f;
764 brand_id = ebx & 0xff;
765 extended_model = (eax >> 12) & 0xf0;
766 /* Check if the processor is Haswell or Broadwell */
767 if (vendor == signature_intel_ebx) {
769 model += extended_model;
770 if (brand_id == 0 && family == 0x6) {
771 for (i = 0; i < RTE_DIM(broadwell_models); i++)
772 if (model == broadwell_models[i]) {
773 haswell_broadwell_cpu = 1;
776 for (i = 0; i < RTE_DIM(haswell_models); i++)
777 if (model == haswell_models[i]) {
778 haswell_broadwell_cpu = 1;
784 haswell_broadwell_cpu = 0;
788 * Allocate the User Access Region with DevX on specified device.
791 * Infiniband device context to perform allocation on.
792 * @param [in] mapping
793 * MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining
794 * attributes (if supported by the host), the
795 * writes to the UAR registers must be followed
796 * by write memory barrier.
797 * MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are
798 * promoted to the registers immediately, no
799 * memory barriers needed.
800 * mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF,
801 * if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC
802 * is performed. The drivers specifying negative values should
803 * always provide the write memory barrier operation after UAR
805 * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma
806 * library headers), the caller can specify 0.
809 * UAR object pointer on success, NULL otherwise and rte_errno is set.
812 mlx5_devx_alloc_uar(void *ctx, int mapping)
815 uint32_t retry, uar_mapping;
818 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
819 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
820 /* Control the mapping type according to the settings. */
821 uar_mapping = (mapping < 0) ?
822 MLX5DV_UAR_ALLOC_TYPE_NC : mapping;
825 * It seems we have no way to control the memory mapping type
826 * for the UAR, the default "Write-Combining" type is supposed.
829 RTE_SET_USED(mapping);
831 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
832 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
835 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
837 * In some environments like virtual machine the
838 * Write Combining mapped might be not supported and
839 * UAR allocation fails. We tried "Non-Cached" mapping
842 DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)");
843 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
844 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
847 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
849 * If Verbs/kernel does not support "Non-Cached"
850 * try the "Write-Combining".
852 DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)");
853 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
854 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
858 DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
862 base_addr = mlx5_os_get_devx_uar_base_addr(uar);
866 * The UARs are allocated by rdma_core within the
867 * IB device context, on context closure all UARs
868 * will be freed, should be no memory/object leakage.
870 DRV_LOG(WARNING, "Retrying to allocate DevX UAR");
873 /* Check whether we finally succeeded with valid UAR allocation. */
875 DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
879 * Return void * instead of struct mlx5dv_devx_uar *
880 * is for compatibility with older rdma-core library headers.
886 RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);