1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies Ltd
6 #include <rte_malloc.h>
9 #include "mlx5_common_log.h"
10 #include "mlx5_common_pci.h"
12 struct mlx5_pci_device {
13 struct rte_pci_device *pci_dev;
14 TAILQ_ENTRY(mlx5_pci_device) next;
15 uint32_t classes_loaded;
18 /* Head of list of drivers. */
19 static TAILQ_HEAD(mlx5_pci_bus_drv_head, mlx5_pci_driver) drv_list =
20 TAILQ_HEAD_INITIALIZER(drv_list);
22 /* Head of mlx5 pci devices. */
23 static TAILQ_HEAD(mlx5_pci_devices_head, mlx5_pci_device) devices_list =
24 TAILQ_HEAD_INITIALIZER(devices_list);
28 unsigned int driver_class;
30 { .name = "vdpa", .driver_class = MLX5_CLASS_VDPA },
31 { .name = "net", .driver_class = MLX5_CLASS_NET },
32 { .name = "regex", .driver_class = MLX5_CLASS_REGEX },
33 { .name = "compress", .driver_class = MLX5_CLASS_COMPRESS },
34 { .name = "crypto", .driver_class = MLX5_CLASS_CRYPTO },
37 static const unsigned int mlx5_class_combinations[] = {
43 MLX5_CLASS_NET | MLX5_CLASS_REGEX,
44 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX,
45 MLX5_CLASS_NET | MLX5_CLASS_COMPRESS,
46 MLX5_CLASS_VDPA | MLX5_CLASS_COMPRESS,
47 MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS,
48 MLX5_CLASS_NET | MLX5_CLASS_CRYPTO,
49 MLX5_CLASS_VDPA | MLX5_CLASS_CRYPTO,
50 MLX5_CLASS_REGEX | MLX5_CLASS_CRYPTO,
51 MLX5_CLASS_COMPRESS | MLX5_CLASS_CRYPTO,
52 MLX5_CLASS_NET | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS,
53 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS,
54 MLX5_CLASS_NET | MLX5_CLASS_REGEX | MLX5_CLASS_CRYPTO,
55 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX | MLX5_CLASS_CRYPTO,
56 MLX5_CLASS_NET | MLX5_CLASS_COMPRESS | MLX5_CLASS_CRYPTO,
57 MLX5_CLASS_VDPA | MLX5_CLASS_COMPRESS | MLX5_CLASS_CRYPTO,
58 MLX5_CLASS_NET | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS |
60 MLX5_CLASS_VDPA | MLX5_CLASS_REGEX | MLX5_CLASS_COMPRESS |
62 /* New class combination should be added here. */
66 class_name_to_value(const char *class_name)
70 for (i = 0; i < RTE_DIM(mlx5_classes); i++) {
71 if (strcmp(class_name, mlx5_classes[i].name) == 0)
72 return mlx5_classes[i].driver_class;
77 static struct mlx5_pci_driver *
78 driver_get(uint32_t class)
80 struct mlx5_pci_driver *driver;
82 TAILQ_FOREACH(driver, &drv_list, next) {
83 if (driver->driver_class == class)
90 bus_cmdline_options_handler(__rte_unused const char *key,
91 const char *class_names, void *opaque)
101 nstr = strdup(class_names);
107 found = strtok_r(nstr, ":", &refstr);
111 /* Extract each individual class name. Multiple
112 * class key,value is supplied as class=net:vdpa:foo:bar.
114 class_val = class_name_to_value(found);
115 /* Check if its a valid class. */
121 found = strtok_r(NULL, ":", &refstr);
126 DRV_LOG(ERR, "Invalid mlx5 class options %s."
127 " Maybe typo in device class argument setting?",
133 parse_class_options(const struct rte_devargs *devargs)
135 const char *key = RTE_DEVARGS_KEY_CLASS;
136 struct rte_kvargs *kvlist;
141 kvlist = rte_kvargs_parse(devargs->args, NULL);
144 if (rte_kvargs_count(kvlist, key))
145 rte_kvargs_process(kvlist, key, bus_cmdline_options_handler,
147 rte_kvargs_free(kvlist);
152 mlx5_bus_match(const struct mlx5_pci_driver *drv,
153 const struct rte_pci_device *pci_dev)
155 const struct rte_pci_id *id_table;
157 for (id_table = drv->pci_driver.id_table; id_table->vendor_id != 0;
159 /* Check if device's ids match the class driver's ids. */
160 if (id_table->vendor_id != pci_dev->id.vendor_id &&
161 id_table->vendor_id != RTE_PCI_ANY_ID)
163 if (id_table->device_id != pci_dev->id.device_id &&
164 id_table->device_id != RTE_PCI_ANY_ID)
166 if (id_table->subsystem_vendor_id !=
167 pci_dev->id.subsystem_vendor_id &&
168 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID)
170 if (id_table->subsystem_device_id !=
171 pci_dev->id.subsystem_device_id &&
172 id_table->subsystem_device_id != RTE_PCI_ANY_ID)
174 if (id_table->class_id != pci_dev->id.class_id &&
175 id_table->class_id != RTE_CLASS_ANY_ID)
183 is_valid_class_combination(uint32_t user_classes)
187 /* Verify if user specified valid supported combination. */
188 for (i = 0; i < RTE_DIM(mlx5_class_combinations); i++) {
189 if (mlx5_class_combinations[i] == user_classes)
192 /* Not found any valid class combination. */
196 static struct mlx5_pci_device *
197 pci_to_mlx5_device(const struct rte_pci_device *pci_dev)
199 struct mlx5_pci_device *dev;
201 TAILQ_FOREACH(dev, &devices_list, next) {
202 if (dev->pci_dev == pci_dev)
209 device_class_enabled(const struct mlx5_pci_device *device, uint32_t class)
211 return (device->classes_loaded & class) ? true : false;
215 dev_release(struct mlx5_pci_device *dev)
217 TAILQ_REMOVE(&devices_list, dev, next);
222 drivers_remove(struct mlx5_pci_device *dev, uint32_t enabled_classes)
224 struct mlx5_pci_driver *driver;
225 int local_ret = -ENODEV;
229 enabled_classes &= dev->classes_loaded;
230 while (enabled_classes) {
231 driver = driver_get(RTE_BIT64(i));
233 local_ret = driver->pci_driver.remove(dev->pci_dev);
235 dev->classes_loaded &= ~RTE_BIT64(i);
239 enabled_classes &= ~RTE_BIT64(i);
248 drivers_probe(struct mlx5_pci_device *dev, struct rte_pci_driver *pci_drv,
249 struct rte_pci_device *pci_dev, uint32_t user_classes)
251 struct mlx5_pci_driver *driver;
252 uint32_t enabled_classes = 0;
256 TAILQ_FOREACH(driver, &drv_list, next) {
257 if ((driver->driver_class & user_classes) == 0)
259 if (!mlx5_bus_match(driver, pci_dev))
261 already_loaded = dev->classes_loaded & driver->driver_class;
262 if (already_loaded &&
263 !(driver->pci_driver.drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
264 DRV_LOG(ERR, "Device %s is already probed",
265 pci_dev->device.name);
269 ret = driver->pci_driver.probe(pci_drv, pci_dev);
271 DRV_LOG(ERR, "Failed to load driver %s",
272 driver->pci_driver.driver.name);
275 enabled_classes |= driver->driver_class;
277 dev->classes_loaded |= enabled_classes;
280 /* Only unload drivers which are enabled which were enabled
281 * in this probe instance.
283 drivers_remove(dev, enabled_classes);
288 * DPDK callback to register to probe multiple drivers for a PCI device.
291 * PCI driver structure.
293 * PCI device information.
296 * 0 on success, a negative errno value otherwise and rte_errno is set.
299 mlx5_common_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
300 struct rte_pci_device *pci_dev)
302 struct mlx5_pci_device *dev;
303 uint32_t user_classes = 0;
304 bool new_device = false;
307 ret = parse_class_options(pci_dev->device.devargs);
312 /* Validate combination here. */
313 ret = is_valid_class_combination(user_classes);
315 DRV_LOG(ERR, "Unsupported mlx5 classes supplied.");
319 /* Default to net class. */
320 user_classes = MLX5_CLASS_NET;
322 dev = pci_to_mlx5_device(pci_dev);
324 dev = rte_zmalloc("mlx5_pci_device", sizeof(*dev), 0);
327 dev->pci_dev = pci_dev;
328 TAILQ_INSERT_HEAD(&devices_list, dev, next);
331 ret = drivers_probe(dev, pci_drv, pci_dev, user_classes);
342 * DPDK callback to remove one or more drivers for a PCI device.
344 * This function removes all drivers probed for a given PCI device.
347 * Pointer to the PCI device.
350 * 0 on success, the function cannot fail.
353 mlx5_common_pci_remove(struct rte_pci_device *pci_dev)
355 struct mlx5_pci_device *dev;
358 dev = pci_to_mlx5_device(pci_dev);
361 /* Matching device found, cleanup and unload drivers. */
362 ret = drivers_remove(dev, dev->classes_loaded);
369 mlx5_common_pci_dma_map(struct rte_pci_device *pci_dev, void *addr,
370 uint64_t iova, size_t len)
372 struct mlx5_pci_driver *driver = NULL;
373 struct mlx5_pci_driver *temp;
374 struct mlx5_pci_device *dev;
377 dev = pci_to_mlx5_device(pci_dev);
380 TAILQ_FOREACH(driver, &drv_list, next) {
381 if (device_class_enabled(dev, driver->driver_class) &&
382 driver->pci_driver.dma_map) {
383 ret = driver->pci_driver.dma_map(pci_dev, addr,
391 TAILQ_FOREACH(temp, &drv_list, next) {
394 if (device_class_enabled(dev, temp->driver_class) &&
395 temp->pci_driver.dma_map && temp->pci_driver.dma_unmap)
396 temp->pci_driver.dma_unmap(pci_dev, addr, iova, len);
402 mlx5_common_pci_dma_unmap(struct rte_pci_device *pci_dev, void *addr,
403 uint64_t iova, size_t len)
405 struct mlx5_pci_driver *driver;
406 struct mlx5_pci_device *dev;
407 int local_ret = -EINVAL;
410 dev = pci_to_mlx5_device(pci_dev);
414 /* There is no unmap error recovery in current implementation. */
415 TAILQ_FOREACH_REVERSE(driver, &drv_list, mlx5_pci_bus_drv_head, next) {
416 if (device_class_enabled(dev, driver->driver_class) &&
417 driver->pci_driver.dma_unmap) {
418 local_ret = driver->pci_driver.dma_unmap(pci_dev, addr,
420 if (local_ret && (ret == 0))
429 /* PCI ID table is build dynamically based on registered mlx5 drivers. */
430 static struct rte_pci_id *mlx5_pci_id_table;
432 static struct rte_pci_driver mlx5_pci_driver = {
434 .name = MLX5_PCI_DRIVER_NAME,
436 .probe = mlx5_common_pci_probe,
437 .remove = mlx5_common_pci_remove,
438 .dma_map = mlx5_common_pci_dma_map,
439 .dma_unmap = mlx5_common_pci_dma_unmap,
443 pci_id_table_size_get(const struct rte_pci_id *id_table)
447 for (; id_table->vendor_id != 0; id_table++)
453 pci_id_exists(const struct rte_pci_id *id, const struct rte_pci_id *table,
456 int current_size = next_idx - 1;
459 for (i = 0; i < current_size; i++) {
460 if (id->device_id == table[i].device_id &&
461 id->vendor_id == table[i].vendor_id &&
462 id->subsystem_vendor_id == table[i].subsystem_vendor_id &&
463 id->subsystem_device_id == table[i].subsystem_device_id)
470 pci_id_insert(struct rte_pci_id *new_table, int *next_idx,
471 const struct rte_pci_id *id_table)
473 /* Traverse the id_table, check if entry exists in new_table;
474 * Add non duplicate entries to new table.
476 for (; id_table->vendor_id != 0; id_table++) {
477 if (!pci_id_exists(id_table, new_table, *next_idx)) {
478 /* New entry; add to the table. */
479 new_table[*next_idx] = *id_table;
486 pci_ids_table_update(const struct rte_pci_id *driver_id_table)
488 const struct rte_pci_id *id_iter;
489 struct rte_pci_id *updated_table;
490 struct rte_pci_id *old_table;
494 old_table = mlx5_pci_id_table;
496 num_ids = pci_id_table_size_get(old_table);
497 num_ids += pci_id_table_size_get(driver_id_table);
498 /* Increase size by one for the termination entry of vendor_id = 0. */
500 updated_table = calloc(num_ids, sizeof(*updated_table));
503 if (TAILQ_EMPTY(&drv_list)) {
504 /* Copy the first driver's ID table. */
505 for (id_iter = driver_id_table; id_iter->vendor_id != 0;
507 updated_table[i] = *id_iter;
509 /* First copy existing table entries. */
510 for (id_iter = old_table; id_iter->vendor_id != 0;
512 updated_table[i] = *id_iter;
513 /* New id to be added at the end of current ID table. */
514 pci_id_insert(updated_table, &i, driver_id_table);
516 /* Terminate table with empty entry. */
517 updated_table[i].vendor_id = 0;
518 mlx5_pci_driver.id_table = updated_table;
519 mlx5_pci_id_table = updated_table;
526 mlx5_pci_driver_register(struct mlx5_pci_driver *driver)
530 ret = pci_ids_table_update(driver->pci_driver.id_table);
533 mlx5_pci_driver.drv_flags |= driver->pci_driver.drv_flags;
534 TAILQ_INSERT_TAIL(&drv_list, driver, next);
537 void mlx5_common_pci_init(void)
539 const struct rte_pci_id empty_table[] = {
545 /* All mlx5 PMDs constructor runs at same priority. So any of the PMD
546 * including this one can register the PCI table first. If any other
547 * PMD(s) have registered the PCI ID table, No need to register an empty
550 if (mlx5_pci_id_table == NULL && pci_ids_table_update(empty_table))
552 rte_pci_register(&mlx5_pci_driver);
555 RTE_FINI(mlx5_common_pci_finish)
557 if (mlx5_pci_id_table != NULL) {
558 /* Constructor doesn't register with PCI bus if it failed
559 * to build the table.
561 rte_pci_unregister(&mlx5_pci_driver);
562 free(mlx5_pci_id_table);
565 RTE_PMD_EXPORT_NAME(mlx5_common_pci, __COUNTER__);