1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies Ltd
6 #include <rte_malloc.h>
9 #include "mlx5_common_log.h"
10 #include "mlx5_common_pci.h"
12 struct mlx5_pci_device {
13 struct rte_pci_device *pci_dev;
14 TAILQ_ENTRY(mlx5_pci_device) next;
15 uint32_t classes_loaded;
18 /* Head of list of drivers. */
19 static TAILQ_HEAD(mlx5_pci_bus_drv_head, mlx5_pci_driver) drv_list =
20 TAILQ_HEAD_INITIALIZER(drv_list);
22 /* Head of mlx5 pci devices. */
23 static TAILQ_HEAD(mlx5_pci_devices_head, mlx5_pci_device) devices_list =
24 TAILQ_HEAD_INITIALIZER(devices_list);
28 unsigned int driver_class;
30 { .name = "vdpa", .driver_class = MLX5_CLASS_VDPA },
31 { .name = "eth", .driver_class = MLX5_CLASS_ETH },
32 /* Keep name "net" for backward compatibility. */
33 { .name = "net", .driver_class = MLX5_CLASS_ETH },
34 { .name = "regex", .driver_class = MLX5_CLASS_REGEX },
35 { .name = "compress", .driver_class = MLX5_CLASS_COMPRESS },
36 { .name = "crypto", .driver_class = MLX5_CLASS_CRYPTO },
39 static const unsigned int mlx5_class_combinations[] = {
45 MLX5_CLASS_ETH | MLX5_CLASS_REGEX,
46 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX,
47 MLX5_CLASS_ETH | MLX5_CLASS_COMPRESS,
48 MLX5_CLASS_VDPA | MLX5_CLASS_COMPRESS,
49 MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS,
50 MLX5_CLASS_ETH | MLX5_CLASS_CRYPTO,
51 MLX5_CLASS_ETH | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS,
52 MLX5_CLASS_VDPA | MLX5_CLASS_CRYPTO,
53 MLX5_CLASS_REGEX | MLX5_CLASS_CRYPTO,
54 MLX5_CLASS_COMPRESS | MLX5_CLASS_CRYPTO,
55 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS,
56 MLX5_CLASS_ETH | MLX5_CLASS_REGEX | MLX5_CLASS_CRYPTO,
57 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX | MLX5_CLASS_CRYPTO,
58 MLX5_CLASS_ETH | MLX5_CLASS_COMPRESS | MLX5_CLASS_CRYPTO,
59 MLX5_CLASS_VDPA | MLX5_CLASS_COMPRESS | MLX5_CLASS_CRYPTO,
60 MLX5_CLASS_ETH | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS |
62 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS |
64 /* New class combination should be added here. */
68 class_name_to_value(const char *class_name)
72 for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
73 if (strcmp(class_name, mlx5_classes[i].name) == 0)
74 return mlx5_classes[i].driver_class;
79 static struct mlx5_pci_driver *
80 driver_get(uint32_t class)
82 struct mlx5_pci_driver *driver;
84 TAILQ_FOREACH(driver, &drv_list, next) {
85 if (driver->driver_class == class)
92 bus_cmdline_options_handler(__rte_unused const char *key,
93 const char *class_names, void *opaque)
103 nstr = strdup(class_names);
109 found = strtok_r(nstr, ":", &refstr);
113 /* Extract each individual class name. Multiple
114 * class key,value is supplied as class=net:vdpa:foo:bar.
116 class_val = class_name_to_value(found);
117 /* Check if its a valid class. */
123 found = strtok_r(NULL, ":", &refstr);
128 DRV_LOG(ERR, "Invalid mlx5 class options %s."
129 " Maybe typo in device class argument setting?",
135 parse_class_options(const struct rte_devargs *devargs)
137 const char *key = RTE_DEVARGS_KEY_CLASS;
138 struct rte_kvargs *kvlist;
143 kvlist = rte_kvargs_parse(devargs->args, NULL);
146 if (rte_kvargs_count(kvlist, key))
147 rte_kvargs_process(kvlist, key, bus_cmdline_options_handler,
149 rte_kvargs_free(kvlist);
154 mlx5_bus_match(const struct mlx5_pci_driver *drv,
155 const struct rte_pci_device *pci_dev)
157 const struct rte_pci_id *id_table;
159 for (id_table = drv->pci_driver.id_table; id_table->vendor_id != 0;
161 /* Check if device's ids match the class driver's ids. */
162 if (id_table->vendor_id != pci_dev->id.vendor_id &&
163 id_table->vendor_id != RTE_PCI_ANY_ID)
165 if (id_table->device_id != pci_dev->id.device_id &&
166 id_table->device_id != RTE_PCI_ANY_ID)
168 if (id_table->subsystem_vendor_id !=
169 pci_dev->id.subsystem_vendor_id &&
170 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID)
172 if (id_table->subsystem_device_id !=
173 pci_dev->id.subsystem_device_id &&
174 id_table->subsystem_device_id != RTE_PCI_ANY_ID)
176 if (id_table->class_id != pci_dev->id.class_id &&
177 id_table->class_id != RTE_CLASS_ANY_ID)
185 is_valid_class_combination(uint32_t user_classes)
189 /* Verify if user specified valid supported combination. */
190 for (i = 0; i < RTE_DIM(mlx5_class_combinations); i++) {
191 if (mlx5_class_combinations[i] == user_classes)
194 /* Not found any valid class combination. */
198 static struct mlx5_pci_device *
199 pci_to_mlx5_device(const struct rte_pci_device *pci_dev)
201 struct mlx5_pci_device *dev;
203 TAILQ_FOREACH(dev, &devices_list, next) {
204 if (dev->pci_dev == pci_dev)
211 device_class_enabled(const struct mlx5_pci_device *device, uint32_t class)
213 return (device->classes_loaded & class) ? true : false;
217 dev_release(struct mlx5_pci_device *dev)
219 TAILQ_REMOVE(&devices_list, dev, next);
224 drivers_remove(struct mlx5_pci_device *dev, uint32_t enabled_classes)
226 struct mlx5_pci_driver *driver;
227 int local_ret = -ENODEV;
231 enabled_classes &= dev->classes_loaded;
232 while (enabled_classes) {
233 driver = driver_get(RTE_BIT64(i));
235 local_ret = driver->pci_driver.remove(dev->pci_dev);
237 dev->classes_loaded &= ~RTE_BIT64(i);
241 enabled_classes &= ~RTE_BIT64(i);
250 drivers_probe(struct mlx5_pci_device *dev, struct rte_pci_driver *pci_drv,
251 struct rte_pci_device *pci_dev, uint32_t user_classes)
253 struct mlx5_pci_driver *driver;
254 uint32_t enabled_classes = 0;
258 TAILQ_FOREACH(driver, &drv_list, next) {
259 if ((driver->driver_class & user_classes) == 0)
261 if (!mlx5_bus_match(driver, pci_dev))
263 already_loaded = dev->classes_loaded & driver->driver_class;
264 if (already_loaded &&
265 !(driver->pci_driver.drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
266 DRV_LOG(ERR, "Device %s is already probed",
267 pci_dev->device.name);
271 ret = driver->pci_driver.probe(pci_drv, pci_dev);
273 DRV_LOG(ERR, "Failed to load driver %s",
274 driver->pci_driver.driver.name);
277 enabled_classes |= driver->driver_class;
279 dev->classes_loaded |= enabled_classes;
282 /* Only unload drivers which are enabled which were enabled
283 * in this probe instance.
285 drivers_remove(dev, enabled_classes);
290 * DPDK callback to register to probe multiple drivers for a PCI device.
293 * PCI driver structure.
295 * PCI device information.
298 * 0 on success, a negative errno value otherwise and rte_errno is set.
301 mlx5_common_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
302 struct rte_pci_device *pci_dev)
304 struct mlx5_pci_device *dev;
305 uint32_t user_classes = 0;
306 bool new_device = false;
309 ret = parse_class_options(pci_dev->device.devargs);
314 /* Validate combination here. */
315 ret = is_valid_class_combination(user_classes);
317 DRV_LOG(ERR, "Unsupported mlx5 classes supplied.");
321 /* Default to net class. */
322 user_classes = MLX5_CLASS_ETH;
324 dev = pci_to_mlx5_device(pci_dev);
326 dev = rte_zmalloc("mlx5_pci_device", sizeof(*dev), 0);
329 dev->pci_dev = pci_dev;
330 TAILQ_INSERT_HEAD(&devices_list, dev, next);
333 ret = drivers_probe(dev, pci_drv, pci_dev, user_classes);
344 * DPDK callback to remove one or more drivers for a PCI device.
346 * This function removes all drivers probed for a given PCI device.
349 * Pointer to the PCI device.
352 * 0 on success, the function cannot fail.
355 mlx5_common_pci_remove(struct rte_pci_device *pci_dev)
357 struct mlx5_pci_device *dev;
360 dev = pci_to_mlx5_device(pci_dev);
363 /* Matching device found, cleanup and unload drivers. */
364 ret = drivers_remove(dev, dev->classes_loaded);
371 mlx5_common_pci_dma_map(struct rte_pci_device *pci_dev, void *addr,
372 uint64_t iova, size_t len)
374 struct mlx5_pci_driver *driver = NULL;
375 struct mlx5_pci_driver *temp;
376 struct mlx5_pci_device *dev;
379 dev = pci_to_mlx5_device(pci_dev);
382 TAILQ_FOREACH(driver, &drv_list, next) {
383 if (device_class_enabled(dev, driver->driver_class) &&
384 driver->pci_driver.dma_map) {
385 ret = driver->pci_driver.dma_map(pci_dev, addr,
393 TAILQ_FOREACH(temp, &drv_list, next) {
396 if (device_class_enabled(dev, temp->driver_class) &&
397 temp->pci_driver.dma_map && temp->pci_driver.dma_unmap)
398 temp->pci_driver.dma_unmap(pci_dev, addr, iova, len);
404 mlx5_common_pci_dma_unmap(struct rte_pci_device *pci_dev, void *addr,
405 uint64_t iova, size_t len)
407 struct mlx5_pci_driver *driver;
408 struct mlx5_pci_device *dev;
409 int local_ret = -EINVAL;
412 dev = pci_to_mlx5_device(pci_dev);
416 /* There is no unmap error recovery in current implementation. */
417 TAILQ_FOREACH_REVERSE(driver, &drv_list, mlx5_pci_bus_drv_head, next) {
418 if (device_class_enabled(dev, driver->driver_class) &&
419 driver->pci_driver.dma_unmap) {
420 local_ret = driver->pci_driver.dma_unmap(pci_dev, addr,
422 if (local_ret && (ret == 0))
431 /* PCI ID table is build dynamically based on registered mlx5 drivers. */
432 static struct rte_pci_id *mlx5_pci_id_table;
434 static struct rte_pci_driver mlx5_pci_driver = {
436 .name = MLX5_PCI_DRIVER_NAME,
438 .probe = mlx5_common_pci_probe,
439 .remove = mlx5_common_pci_remove,
440 .dma_map = mlx5_common_pci_dma_map,
441 .dma_unmap = mlx5_common_pci_dma_unmap,
445 pci_id_table_size_get(const struct rte_pci_id *id_table)
449 for (; id_table->vendor_id != 0; id_table++)
455 pci_id_exists(const struct rte_pci_id *id, const struct rte_pci_id *table,
458 int current_size = next_idx - 1;
461 for (i = 0; i < current_size; i++) {
462 if (id->device_id == table[i].device_id &&
463 id->vendor_id == table[i].vendor_id &&
464 id->subsystem_vendor_id == table[i].subsystem_vendor_id &&
465 id->subsystem_device_id == table[i].subsystem_device_id)
472 pci_id_insert(struct rte_pci_id *new_table, int *next_idx,
473 const struct rte_pci_id *id_table)
475 /* Traverse the id_table, check if entry exists in new_table;
476 * Add non duplicate entries to new table.
478 for (; id_table->vendor_id != 0; id_table++) {
479 if (!pci_id_exists(id_table, new_table, *next_idx)) {
480 /* New entry; add to the table. */
481 new_table[*next_idx] = *id_table;
488 pci_ids_table_update(const struct rte_pci_id *driver_id_table)
490 const struct rte_pci_id *id_iter;
491 struct rte_pci_id *updated_table;
492 struct rte_pci_id *old_table;
496 old_table = mlx5_pci_id_table;
498 num_ids = pci_id_table_size_get(old_table);
499 num_ids += pci_id_table_size_get(driver_id_table);
500 /* Increase size by one for the termination entry of vendor_id = 0. */
502 updated_table = calloc(num_ids, sizeof(*updated_table));
505 if (TAILQ_EMPTY(&drv_list)) {
506 /* Copy the first driver's ID table. */
507 for (id_iter = driver_id_table; id_iter->vendor_id != 0;
509 updated_table[i] = *id_iter;
511 /* First copy existing table entries. */
512 for (id_iter = old_table; id_iter->vendor_id != 0;
514 updated_table[i] = *id_iter;
515 /* New id to be added at the end of current ID table. */
516 pci_id_insert(updated_table, &i, driver_id_table);
518 /* Terminate table with empty entry. */
519 updated_table[i].vendor_id = 0;
520 mlx5_pci_driver.id_table = updated_table;
521 mlx5_pci_id_table = updated_table;
528 mlx5_pci_driver_register(struct mlx5_pci_driver *driver)
532 ret = pci_ids_table_update(driver->pci_driver.id_table);
535 mlx5_pci_driver.drv_flags |= driver->pci_driver.drv_flags;
536 TAILQ_INSERT_TAIL(&drv_list, driver, next);
539 void mlx5_common_pci_init(void)
541 const struct rte_pci_id empty_table[] = {
547 /* All mlx5 PMDs constructor runs at same priority. So any of the PMD
548 * including this one can register the PCI table first. If any other
549 * PMD(s) have registered the PCI ID table, No need to register an empty
552 if (mlx5_pci_id_table == NULL && pci_ids_table_update(empty_table))
554 rte_pci_register(&mlx5_pci_driver);
557 RTE_FINI(mlx5_common_pci_finish)
559 if (mlx5_pci_id_table != NULL) {
560 /* Constructor doesn't register with PCI bus if it failed
561 * to build the table.
563 rte_pci_unregister(&mlx5_pci_driver);
564 free(mlx5_pci_id_table);
567 RTE_PMD_EXPORT_NAME(mlx5_common_pci, __COUNTER__);