1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2019 Mellanox Technologies, Ltd
10 #include <rte_mempool.h>
11 #include <rte_class.h>
12 #include <rte_malloc.h>
14 #include "mlx5_common.h"
15 #include "mlx5_common_os.h"
16 #include "mlx5_common_log.h"
17 #include "mlx5_common_defs.h"
18 #include "mlx5_common_private.h"
20 uint8_t haswell_broadwell_cpu;
22 /* In case this is an x86_64 intel processor to check if
23 * we should use relaxed ordering.
25 #ifdef RTE_ARCH_X86_64
27 * This function returns processor identification and feature information
30 * @param eax, ebx, ecx, edx
31 * Pointers to the registers that will hold cpu information.
33 * The main category of information returned.
35 static inline void mlx5_cpu_id(unsigned int level,
36 unsigned int *eax, unsigned int *ebx,
37 unsigned int *ecx, unsigned int *edx)
40 : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
45 RTE_LOG_REGISTER_DEFAULT(mlx5_common_logtype, NOTICE)
47 /* Head of list of drivers. */
48 static TAILQ_HEAD(mlx5_drivers, mlx5_class_driver) drivers_list =
49 TAILQ_HEAD_INITIALIZER(drivers_list);
51 /* Head of devices. */
52 static TAILQ_HEAD(mlx5_devices, mlx5_common_device) devices_list =
53 TAILQ_HEAD_INITIALIZER(devices_list);
54 static pthread_mutex_t devices_list_lock;
58 unsigned int drv_class;
60 { .name = "vdpa", .drv_class = MLX5_CLASS_VDPA },
61 { .name = "eth", .drv_class = MLX5_CLASS_ETH },
62 /* Keep class "net" for backward compatibility. */
63 { .name = "net", .drv_class = MLX5_CLASS_ETH },
64 { .name = "regex", .drv_class = MLX5_CLASS_REGEX },
65 { .name = "compress", .drv_class = MLX5_CLASS_COMPRESS },
66 { .name = "crypto", .drv_class = MLX5_CLASS_CRYPTO },
70 class_name_to_value(const char *class_name)
74 for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
75 if (strcmp(class_name, mlx5_classes[i].name) == 0)
76 return mlx5_classes[i].drv_class;
81 static struct mlx5_class_driver *
82 driver_get(uint32_t class)
84 struct mlx5_class_driver *driver;
86 TAILQ_FOREACH(driver, &drivers_list, next) {
87 if ((uint32_t)driver->drv_class == class)
94 * Verify and store value for devargs.
97 * Key argument to verify.
99 * Value associated with key.
104 * 0 on success, a negative errno value otherwise and rte_errno is set.
107 mlx5_common_args_check_handler(const char *key, const char *val, void *opaque)
109 struct mlx5_common_dev_config *config = opaque;
113 tmp = strtol(val, NULL, 0);
116 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
119 if (strcmp(key, "tx_db_nc") == 0) {
120 if (tmp != MLX5_TXDB_CACHED &&
121 tmp != MLX5_TXDB_NCACHED &&
122 tmp != MLX5_TXDB_HEURISTIC) {
123 DRV_LOG(ERR, "Invalid Tx doorbell mapping parameter.");
128 } else if (strcmp(key, "mr_ext_memseg_en") == 0) {
129 config->mr_ext_memseg_en = !!tmp;
130 } else if (strcmp(key, "mr_mempool_reg_en") == 0) {
131 config->mr_mempool_reg_en = !!tmp;
132 } else if (strcmp(key, "sys_mem_en") == 0) {
133 config->sys_mem_en = !!tmp;
139 * Parse common device parameters.
142 * Device arguments structure.
144 * Pointer to device configuration structure.
147 * 0 on success, a negative errno value otherwise and rte_errno is set.
150 mlx5_common_config_get(struct rte_devargs *devargs,
151 struct mlx5_common_dev_config *config)
153 struct rte_kvargs *kvlist;
157 config->mr_ext_memseg_en = 1;
158 config->mr_mempool_reg_en = 1;
159 config->sys_mem_en = 0;
160 config->dbnc = MLX5_ARG_UNSET;
163 kvlist = rte_kvargs_parse(devargs->args, NULL);
164 if (kvlist == NULL) {
168 ret = rte_kvargs_process(kvlist, NULL, mlx5_common_args_check_handler,
172 rte_kvargs_free(kvlist);
173 DRV_LOG(DEBUG, "mr_ext_memseg_en is %u.", config->mr_ext_memseg_en);
174 DRV_LOG(DEBUG, "mr_mempool_reg_en is %u.", config->mr_mempool_reg_en);
175 DRV_LOG(DEBUG, "sys_mem_en is %u.", config->sys_mem_en);
176 DRV_LOG(DEBUG, "Tx doorbell mapping parameter is %d.", config->dbnc);
181 devargs_class_handler(__rte_unused const char *key,
182 const char *class_names, void *opaque)
191 scratch = strdup(class_names);
192 if (scratch == NULL) {
196 found = strtok_r(scratch, ":", &refstr);
201 /* Extract each individual class name. Multiple
202 * classes can be supplied as class=net:regex:foo:bar.
204 class_val = class_name_to_value(found);
205 /* Check if its a valid class. */
211 found = strtok_r(NULL, ":", &refstr);
212 } while (found != NULL);
216 DRV_LOG(ERR, "Invalid mlx5 class options: %s.\n", class_names);
221 parse_class_options(const struct rte_devargs *devargs)
223 struct rte_kvargs *kvlist;
228 if (devargs->cls != NULL && devargs->cls->name != NULL)
229 /* Global syntax, only one class type. */
230 return class_name_to_value(devargs->cls->name);
231 /* Legacy devargs support multiple classes. */
232 kvlist = rte_kvargs_parse(devargs->args, NULL);
235 rte_kvargs_process(kvlist, RTE_DEVARGS_KEY_CLASS,
236 devargs_class_handler, &ret);
237 rte_kvargs_free(kvlist);
241 static const unsigned int mlx5_class_invalid_combinations[] = {
242 MLX5_CLASS_ETH | MLX5_CLASS_VDPA,
243 /* New class combination should be added here. */
247 is_valid_class_combination(uint32_t user_classes)
251 /* Verify if user specified unsupported combination. */
252 for (i = 0; i < RTE_DIM(mlx5_class_invalid_combinations); i++) {
253 if ((mlx5_class_invalid_combinations[i] & user_classes) ==
254 mlx5_class_invalid_combinations[i])
257 /* Not found any invalid class combination. */
262 device_class_enabled(const struct mlx5_common_device *device, uint32_t class)
264 return (device->classes_loaded & class) > 0;
268 mlx5_bus_match(const struct mlx5_class_driver *drv,
269 const struct rte_device *dev)
271 if (mlx5_dev_is_pci(dev))
272 return mlx5_dev_pci_match(drv, dev);
276 static struct mlx5_common_device *
277 to_mlx5_device(const struct rte_device *rte_dev)
279 struct mlx5_common_device *cdev;
281 TAILQ_FOREACH(cdev, &devices_list, next) {
282 if (rte_dev == cdev->dev)
289 mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size)
291 struct rte_pci_addr pci_addr = { 0 };
294 if (mlx5_dev_is_pci(dev)) {
295 /* Input might be <BDF>, format PCI address to <DBDF>. */
296 ret = rte_pci_addr_parse(dev->name, &pci_addr);
299 rte_pci_device_name(&pci_addr, addr, size);
302 #ifdef RTE_EXEC_ENV_LINUX
303 return mlx5_auxiliary_get_pci_str(RTE_DEV_TO_AUXILIARY_CONST(dev),
312 mlx5_common_dev_release(struct mlx5_common_device *cdev)
314 pthread_mutex_lock(&devices_list_lock);
315 TAILQ_REMOVE(&devices_list, cdev, next);
316 pthread_mutex_unlock(&devices_list_lock);
320 static struct mlx5_common_device *
321 mlx5_common_dev_create(struct rte_device *eal_dev)
323 struct mlx5_common_device *cdev;
326 cdev = rte_zmalloc("mlx5_common_device", sizeof(*cdev), 0);
328 DRV_LOG(ERR, "Device allocation failure.");
333 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
335 /* Parse device parameters. */
336 ret = mlx5_common_config_get(eal_dev->devargs, &cdev->config);
338 DRV_LOG(ERR, "Failed to process device arguments: %s",
339 strerror(rte_errno));
343 mlx5_malloc_mem_select(cdev->config.sys_mem_en);
345 pthread_mutex_lock(&devices_list_lock);
346 TAILQ_INSERT_HEAD(&devices_list, cdev, next);
347 pthread_mutex_unlock(&devices_list_lock);
352 drivers_remove(struct mlx5_common_device *cdev, uint32_t enabled_classes)
354 struct mlx5_class_driver *driver;
355 int local_ret = -ENODEV;
359 enabled_classes &= cdev->classes_loaded;
360 while (enabled_classes) {
361 driver = driver_get(RTE_BIT64(i));
362 if (driver != NULL) {
363 local_ret = driver->remove(cdev);
365 cdev->classes_loaded &= ~RTE_BIT64(i);
369 enabled_classes &= ~RTE_BIT64(i);
372 if (local_ret != 0 && ret == 0)
378 drivers_probe(struct mlx5_common_device *cdev, uint32_t user_classes)
380 struct mlx5_class_driver *driver;
381 uint32_t enabled_classes = 0;
385 TAILQ_FOREACH(driver, &drivers_list, next) {
386 if ((driver->drv_class & user_classes) == 0)
388 if (!mlx5_bus_match(driver, cdev->dev))
390 already_loaded = cdev->classes_loaded & driver->drv_class;
391 if (already_loaded && driver->probe_again == 0) {
392 DRV_LOG(ERR, "Device %s is already probed",
397 ret = driver->probe(cdev);
399 DRV_LOG(ERR, "Failed to load driver %s",
403 enabled_classes |= driver->drv_class;
405 cdev->classes_loaded |= enabled_classes;
408 /* Only unload drivers which are enabled which were enabled
409 * in this probe instance.
411 drivers_remove(cdev, enabled_classes);
416 mlx5_common_dev_probe(struct rte_device *eal_dev)
418 struct mlx5_common_device *cdev;
419 uint32_t classes = 0;
420 bool new_device = false;
423 DRV_LOG(INFO, "probe device \"%s\".", eal_dev->name);
424 ret = parse_class_options(eal_dev->devargs);
426 DRV_LOG(ERR, "Unsupported mlx5 class type: %s",
427 eal_dev->devargs->args);
432 /* Default to net class. */
433 classes = MLX5_CLASS_ETH;
434 cdev = to_mlx5_device(eal_dev);
436 cdev = mlx5_common_dev_create(eal_dev);
442 * Validate combination here.
443 * For new device, the classes_loaded field is 0 and it check only
444 * the classes given as user device arguments.
446 ret = is_valid_class_combination(classes | cdev->classes_loaded);
448 DRV_LOG(ERR, "Unsupported mlx5 classes combination.");
451 ret = drivers_probe(cdev, classes);
457 mlx5_common_dev_release(cdev);
462 mlx5_common_dev_remove(struct rte_device *eal_dev)
464 struct mlx5_common_device *cdev;
467 cdev = to_mlx5_device(eal_dev);
470 /* Matching device found, cleanup and unload drivers. */
471 ret = drivers_remove(cdev, cdev->classes_loaded);
473 mlx5_common_dev_release(cdev);
478 mlx5_common_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova,
481 struct mlx5_class_driver *driver = NULL;
482 struct mlx5_class_driver *temp;
483 struct mlx5_common_device *mdev;
486 mdev = to_mlx5_device(dev);
489 TAILQ_FOREACH(driver, &drivers_list, next) {
490 if (!device_class_enabled(mdev, driver->drv_class) ||
491 driver->dma_map == NULL)
493 ret = driver->dma_map(dev, addr, iova, len);
499 TAILQ_FOREACH(temp, &drivers_list, next) {
502 if (device_class_enabled(mdev, temp->drv_class) &&
503 temp->dma_map && temp->dma_unmap)
504 temp->dma_unmap(dev, addr, iova, len);
510 mlx5_common_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
513 struct mlx5_class_driver *driver;
514 struct mlx5_common_device *mdev;
515 int local_ret = -EINVAL;
518 mdev = to_mlx5_device(dev);
521 /* There is no unmap error recovery in current implementation. */
522 TAILQ_FOREACH_REVERSE(driver, &drivers_list, mlx5_drivers, next) {
523 if (!device_class_enabled(mdev, driver->drv_class) ||
524 driver->dma_unmap == NULL)
526 local_ret = driver->dma_unmap(dev, addr, iova, len);
527 if (local_ret && (ret == 0))
536 mlx5_class_driver_register(struct mlx5_class_driver *driver)
538 mlx5_common_driver_on_register_pci(driver);
539 TAILQ_INSERT_TAIL(&drivers_list, driver, next);
542 static void mlx5_common_driver_init(void)
544 mlx5_common_pci_init();
545 #ifdef RTE_EXEC_ENV_LINUX
546 mlx5_common_auxiliary_init();
550 static bool mlx5_common_initialized;
553 * One time innitialization routine for run-time dependency on glue library
554 * for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
555 * must invoke in its constructor.
558 mlx5_common_init(void)
560 if (mlx5_common_initialized)
563 pthread_mutex_init(&devices_list_lock, NULL);
564 mlx5_glue_constructor();
565 mlx5_common_driver_init();
566 mlx5_common_initialized = true;
570 * This function is responsible of initializing the variable
571 * haswell_broadwell_cpu by checking if the cpu is intel
572 * and reading the data returned from mlx5_cpu_id().
573 * since haswell and broadwell cpus don't have improved performance
574 * when using relaxed ordering we want to check the cpu type before
575 * before deciding whether to enable RO or not.
576 * if the cpu is haswell or broadwell the variable will be set to 1
577 * otherwise it will be 0.
579 RTE_INIT_PRIO(mlx5_is_haswell_broadwell_cpu, LOG)
581 #ifdef RTE_ARCH_X86_64
582 unsigned int broadwell_models[4] = {0x3d, 0x47, 0x4F, 0x56};
583 unsigned int haswell_models[4] = {0x3c, 0x3f, 0x45, 0x46};
584 unsigned int i, model, family, brand_id, vendor;
585 unsigned int signature_intel_ebx = 0x756e6547;
586 unsigned int extended_model;
587 unsigned int eax = 0;
588 unsigned int ebx = 0;
589 unsigned int ecx = 0;
590 unsigned int edx = 0;
593 mlx5_cpu_id(0, &eax, &ebx, &ecx, &edx);
597 haswell_broadwell_cpu = 0;
600 mlx5_cpu_id(1, &eax, &ebx, &ecx, &edx);
601 model = (eax >> 4) & 0x0f;
602 family = (eax >> 8) & 0x0f;
603 brand_id = ebx & 0xff;
604 extended_model = (eax >> 12) & 0xf0;
605 /* Check if the processor is Haswell or Broadwell */
606 if (vendor == signature_intel_ebx) {
608 model += extended_model;
609 if (brand_id == 0 && family == 0x6) {
610 for (i = 0; i < RTE_DIM(broadwell_models); i++)
611 if (model == broadwell_models[i]) {
612 haswell_broadwell_cpu = 1;
615 for (i = 0; i < RTE_DIM(haswell_models); i++)
616 if (model == haswell_models[i]) {
617 haswell_broadwell_cpu = 1;
623 haswell_broadwell_cpu = 0;
627 * Allocate the User Access Region with DevX on specified device.
630 * Infiniband device context to perform allocation on.
631 * @param [in] mapping
632 * MLX5DV_UAR_ALLOC_TYPE_BF - allocate as cached memory with write-combining
633 * attributes (if supported by the host), the
634 * writes to the UAR registers must be followed
635 * by write memory barrier.
636 * MLX5DV_UAR_ALLOC_TYPE_NC - allocate as non-cached nenory, all writes are
637 * promoted to the registers immediately, no
638 * memory barriers needed.
639 * mapping < 0 - the first attempt is performed with MLX5DV_UAR_ALLOC_TYPE_BF,
640 * if this fails the next attempt with MLX5DV_UAR_ALLOC_TYPE_NC
641 * is performed. The drivers specifying negative values should
642 * always provide the write memory barrier operation after UAR
644 * If there is no definitions for the MLX5DV_UAR_ALLOC_TYPE_xx (older rdma
645 * library headers), the caller can specify 0.
648 * UAR object pointer on success, NULL otherwise and rte_errno is set.
651 mlx5_devx_alloc_uar(void *ctx, int mapping)
654 uint32_t retry, uar_mapping;
657 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
658 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
659 /* Control the mapping type according to the settings. */
660 uar_mapping = (mapping < 0) ?
661 MLX5DV_UAR_ALLOC_TYPE_NC : mapping;
664 * It seems we have no way to control the memory mapping type
665 * for the UAR, the default "Write-Combining" type is supposed.
668 RTE_SET_USED(mapping);
670 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
671 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
674 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
676 * In some environments like virtual machine the
677 * Write Combining mapped might be not supported and
678 * UAR allocation fails. We tried "Non-Cached" mapping
681 DRV_LOG(WARNING, "Failed to allocate DevX UAR (BF)");
682 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
683 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
686 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
688 * If Verbs/kernel does not support "Non-Cached"
689 * try the "Write-Combining".
691 DRV_LOG(WARNING, "Failed to allocate DevX UAR (NC)");
692 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
693 uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
697 DRV_LOG(ERR, "Failed to allocate DevX UAR (BF/NC)");
701 base_addr = mlx5_os_get_devx_uar_base_addr(uar);
705 * The UARs are allocated by rdma_core within the
706 * IB device context, on context closure all UARs
707 * will be freed, should be no memory/object leakage.
709 DRV_LOG(WARNING, "Retrying to allocate DevX UAR");
712 /* Check whether we finally succeeded with valid UAR allocation. */
714 DRV_LOG(ERR, "Failed to allocate DevX UAR (NULL base)");
718 * Return void * instead of struct mlx5dv_devx_uar *
719 * is for compatibility with older rdma-core library headers.
725 RTE_PMD_EXPORT_NAME(mlx5_common_driver, __COUNTER__);