1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2013-2014 6WIND S.A.
12 #include <sys/queue.h>
13 #include <rte_errno.h>
14 #include <rte_interrupts.h>
18 #include <rte_bus_pci.h>
19 #include <rte_lcore.h>
20 #include <rte_per_lcore.h>
21 #include <rte_memory.h>
23 #include <rte_eal_paging.h>
24 #include <rte_string_fns.h>
25 #include <rte_common.h>
26 #include <rte_devargs.h>
32 #define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
34 const char *rte_pci_get_sysfs_path(void)
36 const char *path = NULL;
38 #ifdef RTE_EXEC_ENV_LINUX
39 path = getenv("SYSFS_PCI_DEVICES");
41 return SYSFS_PCI_DEVICES;
47 static struct rte_devargs *
48 pci_devargs_lookup(const struct rte_pci_addr *pci_addr)
50 struct rte_devargs *devargs;
51 struct rte_pci_addr addr;
53 RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
54 devargs->bus->parse(devargs->name, &addr);
55 if (!rte_pci_addr_cmp(pci_addr, &addr))
62 pci_name_set(struct rte_pci_device *dev)
64 struct rte_devargs *devargs;
66 /* Each device has its internal, canonical name set. */
67 rte_pci_device_name(&dev->addr,
68 dev->name, sizeof(dev->name));
69 devargs = pci_devargs_lookup(&dev->addr);
70 dev->device.devargs = devargs;
72 /* When using a blocklist, only blocked devices will have
73 * an rte_devargs. Allowed devices won't have one.
76 /* If an rte_devargs exists, the generic rte_device uses the
77 * given name as its name.
79 dev->device.name = dev->device.devargs->name;
81 /* Otherwise, it uses the internal, canonical form. */
82 dev->device.name = dev->name;
85 /* map a particular resource from a file */
87 pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
92 /* Map the PCI memory resource of device */
93 mapaddr = rte_mem_map(requested_addr, size,
94 RTE_PROT_READ | RTE_PROT_WRITE,
95 RTE_MAP_SHARED | additional_flags, fd, offset);
96 if (mapaddr == NULL) {
98 "%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)\n",
99 __func__, fd, requested_addr, size,
100 (unsigned long long)offset,
101 rte_strerror(rte_errno), mapaddr);
103 RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
108 /* unmap a particular resource */
110 pci_unmap_resource(void *requested_addr, size_t size)
112 if (requested_addr == NULL)
115 /* Unmap the PCI memory resource of device */
116 if (rte_mem_unmap(requested_addr, size)) {
117 RTE_LOG(ERR, EAL, "%s(): cannot mem unmap(%p, %#zx): %s\n",
118 __func__, requested_addr, size,
119 rte_strerror(rte_errno));
121 RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n",
125 * Match the PCI Driver and Device using the ID Table
128 rte_pci_match(const struct rte_pci_driver *pci_drv,
129 const struct rte_pci_device *pci_dev)
131 const struct rte_pci_id *id_table;
133 for (id_table = pci_drv->id_table; id_table->vendor_id != 0;
135 /* check if device's identifiers match the driver's ones */
136 if (id_table->vendor_id != pci_dev->id.vendor_id &&
137 id_table->vendor_id != RTE_PCI_ANY_ID)
139 if (id_table->device_id != pci_dev->id.device_id &&
140 id_table->device_id != RTE_PCI_ANY_ID)
142 if (id_table->subsystem_vendor_id !=
143 pci_dev->id.subsystem_vendor_id &&
144 id_table->subsystem_vendor_id != RTE_PCI_ANY_ID)
146 if (id_table->subsystem_device_id !=
147 pci_dev->id.subsystem_device_id &&
148 id_table->subsystem_device_id != RTE_PCI_ANY_ID)
150 if (id_table->class_id != pci_dev->id.class_id &&
151 id_table->class_id != RTE_CLASS_ANY_ID)
161 * If vendor/device ID match, call the probe() function of the
165 rte_pci_probe_one_driver(struct rte_pci_driver *dr,
166 struct rte_pci_device *dev)
170 struct rte_pci_addr *loc;
172 if ((dr == NULL) || (dev == NULL))
177 /* The device is not blocked; Check if driver supports it */
178 if (!rte_pci_match(dr, dev))
179 /* Match of device and driver failed */
182 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
183 loc->domain, loc->bus, loc->devid, loc->function,
184 dev->device.numa_node);
186 /* no initialization when marked as blocked, return without error */
187 if (dev->device.devargs != NULL &&
188 dev->device.devargs->policy == RTE_DEV_BLOCKED) {
189 RTE_LOG(INFO, EAL, " Device is blocked, not initializing\n");
193 if (dev->device.numa_node < 0) {
194 if (rte_socket_count() > 1)
195 RTE_LOG(INFO, EAL, "Device %s is not NUMA-aware, defaulting socket to 0\n",
197 dev->device.numa_node = 0;
200 already_probed = rte_dev_is_probed(&dev->device);
201 if (already_probed && !(dr->drv_flags & RTE_PCI_DRV_PROBE_AGAIN)) {
202 RTE_LOG(DEBUG, EAL, "Device %s is already probed\n",
207 RTE_LOG(DEBUG, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
208 dev->id.device_id, dr->driver.name);
211 * reference driver structure
212 * This needs to be before rte_pci_map_device(), as it enables to use
213 * driver flags for adjusting configuration.
215 if (!already_probed) {
216 enum rte_iova_mode dev_iova_mode;
217 enum rte_iova_mode iova_mode;
219 dev_iova_mode = pci_device_iova_mode(dr, dev);
220 iova_mode = rte_eal_iova_mode();
221 if (dev_iova_mode != RTE_IOVA_DC &&
222 dev_iova_mode != iova_mode) {
223 RTE_LOG(ERR, EAL, " Expecting '%s' IOVA mode but current mode is '%s', not initializing\n",
224 dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA",
225 iova_mode == RTE_IOVA_PA ? "PA" : "VA");
229 /* Allocate interrupt instance for pci device */
231 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
232 if (dev->intr_handle == NULL) {
234 "Failed to create interrupt instance for %s\n",
239 dev->vfio_req_intr_handle =
240 rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
241 if (dev->vfio_req_intr_handle == NULL) {
242 rte_intr_instance_free(dev->intr_handle);
243 dev->intr_handle = NULL;
245 "Failed to create vfio req interrupt instance for %s\n",
250 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
251 ret = rte_pci_map_device(dev);
253 rte_intr_instance_free(dev->vfio_req_intr_handle);
254 dev->vfio_req_intr_handle = NULL;
255 rte_intr_instance_free(dev->intr_handle);
256 dev->intr_handle = NULL;
264 RTE_LOG(INFO, EAL, "Probe PCI driver: %s (%x:%x) device: "PCI_PRI_FMT" (socket %i)\n",
265 dr->driver.name, dev->id.vendor_id, dev->id.device_id,
266 loc->domain, loc->bus, loc->devid, loc->function,
267 dev->device.numa_node);
268 /* call the driver probe() function */
269 ret = dr->probe(dr, dev);
271 return ret; /* no rollback if already succeeded earlier */
274 if ((dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) &&
275 /* Don't unmap if device is unsupported and
276 * driver needs mapped resources.
279 (dr->drv_flags & RTE_PCI_DRV_KEEP_MAPPED_RES)))
280 rte_pci_unmap_device(dev);
281 rte_intr_instance_free(dev->vfio_req_intr_handle);
282 dev->vfio_req_intr_handle = NULL;
283 rte_intr_instance_free(dev->intr_handle);
284 dev->intr_handle = NULL;
286 dev->device.driver = &dr->driver;
293 * If vendor/device ID match, call the remove() function of the
297 rte_pci_detach_dev(struct rte_pci_device *dev)
299 struct rte_pci_addr *loc;
300 struct rte_pci_driver *dr;
309 RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
310 loc->domain, loc->bus, loc->devid,
311 loc->function, dev->device.numa_node);
313 RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
314 dev->id.device_id, dr->driver.name);
317 ret = dr->remove(dev);
322 /* clear driver structure */
324 dev->device.driver = NULL;
326 if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
327 /* unmap resources for devices that use igb_uio */
328 rte_pci_unmap_device(dev);
330 rte_intr_instance_free(dev->intr_handle);
331 dev->intr_handle = NULL;
332 rte_intr_instance_free(dev->vfio_req_intr_handle);
333 dev->vfio_req_intr_handle = NULL;
339 * If vendor/device ID match, call the probe() function of all
340 * registered driver for the given device. Return < 0 if initialization
341 * failed, return 1 if no driver is found for this device.
344 pci_probe_all_drivers(struct rte_pci_device *dev)
346 struct rte_pci_driver *dr = NULL;
352 FOREACH_DRIVER_ON_PCIBUS(dr) {
353 rc = rte_pci_probe_one_driver(dr, dev);
355 /* negative value is an error */
358 /* positive value means driver doesn't support it */
366 * Scan the content of the PCI bus, and call the probe() function for
367 * all registered drivers that have a matching entry in its id_table
368 * for discovered devices.
373 struct rte_pci_device *dev = NULL;
374 size_t probed = 0, failed = 0;
377 FOREACH_DEVICE_ON_PCIBUS(dev) {
380 ret = pci_probe_all_drivers(dev);
382 if (ret != -EEXIST) {
383 RTE_LOG(ERR, EAL, "Requested device "
384 PCI_PRI_FMT " cannot be used\n",
385 dev->addr.domain, dev->addr.bus,
386 dev->addr.devid, dev->addr.function);
394 return (probed && probed == failed) ? -1 : 0;
397 /* dump one device */
399 pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
403 fprintf(f, PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
404 dev->addr.devid, dev->addr.function);
405 fprintf(f, " - vendor:%x device:%x\n", dev->id.vendor_id,
408 for (i = 0; i != sizeof(dev->mem_resource) /
409 sizeof(dev->mem_resource[0]); i++) {
410 fprintf(f, " %16.16"PRIx64" %16.16"PRIx64"\n",
411 dev->mem_resource[i].phys_addr,
412 dev->mem_resource[i].len);
417 /* dump devices on the bus */
419 rte_pci_dump(FILE *f)
421 struct rte_pci_device *dev = NULL;
423 FOREACH_DEVICE_ON_PCIBUS(dev) {
424 pci_dump_one_device(f, dev);
429 pci_parse(const char *name, void *addr)
431 struct rte_pci_addr *out = addr;
432 struct rte_pci_addr pci_addr;
435 parse = (rte_pci_addr_parse(name, &pci_addr) == 0);
436 if (parse && addr != NULL)
438 return parse == false;
441 /* register a driver */
443 rte_pci_register(struct rte_pci_driver *driver)
445 TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next);
446 driver->bus = &rte_pci_bus;
449 /* unregister a driver */
451 rte_pci_unregister(struct rte_pci_driver *driver)
453 TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next);
457 /* Add a device to PCI bus */
459 rte_pci_add_device(struct rte_pci_device *pci_dev)
461 TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next);
464 /* Insert a device into a predefined position in PCI bus */
466 rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
467 struct rte_pci_device *new_pci_dev)
469 TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next);
472 /* Remove a device from PCI bus */
474 rte_pci_remove_device(struct rte_pci_device *pci_dev)
476 TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
479 static struct rte_device *
480 pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
483 const struct rte_pci_device *pstart;
484 struct rte_pci_device *pdev;
487 pstart = RTE_DEV_TO_PCI_CONST(start);
488 pdev = TAILQ_NEXT(pstart, next);
490 pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
492 while (pdev != NULL) {
493 if (cmp(&pdev->device, data) == 0)
494 return &pdev->device;
495 pdev = TAILQ_NEXT(pdev, next);
501 * find the device which encounter the failure, by iterate over all device on
502 * PCI bus to check if the memory failure address is located in the range
503 * of the BARs of the device.
505 static struct rte_pci_device *
506 pci_find_device_by_addr(const void *failure_addr)
508 struct rte_pci_device *pdev = NULL;
509 uint64_t check_point, start, end, len;
512 check_point = (uint64_t)(uintptr_t)failure_addr;
514 FOREACH_DEVICE_ON_PCIBUS(pdev) {
515 for (i = 0; i != RTE_DIM(pdev->mem_resource); i++) {
516 start = (uint64_t)(uintptr_t)pdev->mem_resource[i].addr;
517 len = pdev->mem_resource[i].len;
519 if (check_point >= start && check_point < end) {
520 RTE_LOG(DEBUG, EAL, "Failure address %16.16"
521 PRIx64" belongs to device %s!\n",
522 check_point, pdev->device.name);
531 pci_hot_unplug_handler(struct rte_device *dev)
533 struct rte_pci_device *pdev = NULL;
536 pdev = RTE_DEV_TO_PCI(dev);
540 switch (pdev->kdrv) {
541 #ifdef HAVE_VFIO_DEV_REQ_INTERFACE
542 case RTE_PCI_KDRV_VFIO:
544 * vfio kernel module guaranty the pci device would not be
545 * deleted until the user space release the resource, so no
546 * need to remap BARs resource here, just directly notify
547 * the req event to the user space to handle it.
549 rte_dev_event_callback_process(dev->name,
550 RTE_DEV_EVENT_REMOVE);
553 case RTE_PCI_KDRV_IGB_UIO:
554 case RTE_PCI_KDRV_UIO_GENERIC:
555 case RTE_PCI_KDRV_NIC_UIO:
556 /* BARs resource is invalid, remap it to be safe. */
557 ret = pci_uio_remap_resource(pdev);
561 "Not managed by a supported kernel driver, skipped\n");
570 pci_sigbus_handler(const void *failure_addr)
572 struct rte_pci_device *pdev = NULL;
575 pdev = pci_find_device_by_addr(failure_addr);
577 /* It is a generic sigbus error, no bus would handle it. */
580 /* The sigbus error is caused of hot-unplug. */
581 ret = pci_hot_unplug_handler(&pdev->device);
584 "Failed to handle hot-unplug for device %s",
593 pci_plug(struct rte_device *dev)
595 return pci_probe_all_drivers(RTE_DEV_TO_PCI(dev));
599 pci_unplug(struct rte_device *dev)
601 struct rte_pci_device *pdev;
604 pdev = RTE_DEV_TO_PCI(dev);
605 ret = rte_pci_detach_dev(pdev);
607 rte_pci_remove_device(pdev);
608 rte_devargs_remove(dev->devargs);
615 pci_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
617 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
619 if (!pdev || !pdev->driver) {
623 if (pdev->driver->dma_map)
624 return pdev->driver->dma_map(pdev, addr, iova, len);
626 * In case driver don't provides any specific mapping
627 * try fallback to VFIO.
629 if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
630 return rte_vfio_container_dma_map
631 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
638 pci_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, size_t len)
640 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev);
642 if (!pdev || !pdev->driver) {
646 if (pdev->driver->dma_unmap)
647 return pdev->driver->dma_unmap(pdev, addr, iova, len);
649 * In case driver don't provides any specific mapping
650 * try fallback to VFIO.
652 if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
653 return rte_vfio_container_dma_unmap
654 (RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
661 rte_pci_ignore_device(const struct rte_pci_addr *pci_addr)
663 struct rte_devargs *devargs = pci_devargs_lookup(pci_addr);
665 switch (rte_pci_bus.bus.conf.scan_mode) {
666 case RTE_BUS_SCAN_ALLOWLIST:
667 if (devargs && devargs->policy == RTE_DEV_ALLOWED)
670 case RTE_BUS_SCAN_UNDEFINED:
671 case RTE_BUS_SCAN_BLOCKLIST:
672 if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED)
680 rte_pci_get_iommu_class(void)
682 enum rte_iova_mode iova_mode = RTE_IOVA_DC;
683 const struct rte_pci_device *dev;
684 const struct rte_pci_driver *drv;
685 bool devices_want_va = false;
686 bool devices_want_pa = false;
687 int iommu_no_va = -1;
689 FOREACH_DEVICE_ON_PCIBUS(dev) {
691 * We can check this only once, because the IOMMU hardware is
692 * the same for all of them.
694 if (iommu_no_va == -1)
695 iommu_no_va = pci_device_iommu_support_va(dev)
698 if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN ||
699 dev->kdrv == RTE_PCI_KDRV_NONE)
701 FOREACH_DRIVER_ON_PCIBUS(drv) {
702 enum rte_iova_mode dev_iova_mode;
704 if (!rte_pci_match(drv, dev))
707 dev_iova_mode = pci_device_iova_mode(drv, dev);
708 RTE_LOG(DEBUG, EAL, "PCI driver %s for device "
709 PCI_PRI_FMT " wants IOVA as '%s'\n",
711 dev->addr.domain, dev->addr.bus,
712 dev->addr.devid, dev->addr.function,
713 dev_iova_mode == RTE_IOVA_DC ? "DC" :
714 (dev_iova_mode == RTE_IOVA_PA ? "PA" : "VA"));
715 if (dev_iova_mode == RTE_IOVA_PA)
716 devices_want_pa = true;
717 else if (dev_iova_mode == RTE_IOVA_VA)
718 devices_want_va = true;
721 if (iommu_no_va == 1) {
722 iova_mode = RTE_IOVA_PA;
723 if (devices_want_va) {
724 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but IOMMU does not support 'VA'.\n");
725 RTE_LOG(WARNING, EAL, "The devices that want 'VA' won't initialize.\n");
727 } else if (devices_want_va && !devices_want_pa) {
728 iova_mode = RTE_IOVA_VA;
729 } else if (devices_want_pa && !devices_want_va) {
730 iova_mode = RTE_IOVA_PA;
732 iova_mode = RTE_IOVA_DC;
733 if (devices_want_va) {
734 RTE_LOG(WARNING, EAL, "Some devices want 'VA' but forcing 'DC' because other devices want 'PA'.\n");
735 RTE_LOG(WARNING, EAL, "Depending on the final decision by the EAL, not all devices may be able to initialize.\n");
742 rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap)
744 off_t offset = RTE_PCI_CFG_SPACE_SIZE;
748 /* minimum 8 bytes per capability */
749 ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8;
751 if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
752 RTE_LOG(ERR, EAL, "error in reading extended capabilities\n");
757 * If we have no capabilities, this is indicated by cap ID,
758 * cap version and next pointer all being 0.
764 if (RTE_PCI_EXT_CAP_ID(header) == cap)
767 offset = RTE_PCI_EXT_CAP_NEXT(header);
769 if (offset < RTE_PCI_CFG_SPACE_SIZE)
772 if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
774 "error in reading extended capabilities\n");
785 rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable)
787 uint16_t old_cmd, cmd;
789 if (rte_pci_read_config(dev, &old_cmd, sizeof(old_cmd),
790 RTE_PCI_COMMAND) < 0) {
791 RTE_LOG(ERR, EAL, "error in reading PCI command register\n");
796 cmd = old_cmd | RTE_PCI_COMMAND_MASTER;
798 cmd = old_cmd & ~RTE_PCI_COMMAND_MASTER;
803 if (rte_pci_write_config(dev, &cmd, sizeof(cmd),
804 RTE_PCI_COMMAND) < 0) {
805 RTE_LOG(ERR, EAL, "error in writing PCI command register\n");
812 struct rte_pci_bus rte_pci_bus = {
814 .scan = rte_pci_scan,
816 .find_device = pci_find_device,
818 .unplug = pci_unplug,
820 .devargs_parse = rte_pci_devargs_parse,
821 .dma_map = pci_dma_map,
822 .dma_unmap = pci_dma_unmap,
823 .get_iommu_class = rte_pci_get_iommu_class,
824 .dev_iterate = rte_pci_dev_iterate,
825 .hot_unplug_handler = pci_hot_unplug_handler,
826 .sigbus_handler = pci_sigbus_handler,
828 .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
829 .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
832 RTE_REGISTER_BUS(pci, rte_pci_bus.bus);