1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_bus_pci.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_malloc.h>
14 #include <rte_devargs.h>
15 #include <rte_memcpy.h>
18 #include "eal_filesystem.h"
25 * PCI probing under linux
27 * This code is used to simulate a PCI probe by parsing information in sysfs.
28 * When a registered device matches a driver, it is then initialized with
29 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
32 extern struct rte_pci_bus rte_pci_bus;
35 pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
42 if (!filename || !dri_name)
45 count = readlink(filename, path, PATH_MAX);
46 if (count >= PATH_MAX)
49 /* For device does not have a driver */
55 name = strrchr(path, '/');
57 strlcpy(dri_name, name + 1, len);
66 rte_pci_map_device(struct rte_pci_device *dev)
70 /* try mapping the NIC resources using VFIO if it exists */
74 if (pci_vfio_is_enabled())
75 ret = pci_vfio_map_resource(dev);
78 case RTE_KDRV_IGB_UIO:
79 case RTE_KDRV_UIO_GENERIC:
80 if (rte_eal_using_phys_addrs()) {
81 /* map resources for devices that use uio */
82 ret = pci_uio_map_resource(dev);
87 " Not managed by a supported kernel driver, skipped\n");
95 /* Unmap pci device */
97 rte_pci_unmap_device(struct rte_pci_device *dev)
99 /* try unmapping the NIC resources using VFIO if it exists */
103 if (pci_vfio_is_enabled())
104 pci_vfio_unmap_resource(dev);
107 case RTE_KDRV_IGB_UIO:
108 case RTE_KDRV_UIO_GENERIC:
109 /* unmap resources for devices that use uio */
110 pci_uio_unmap_resource(dev);
114 " Not managed by a supported kernel driver, skipped\n");
120 find_max_end_va(const struct rte_memseg_list *msl, void *arg)
122 size_t sz = msl->len;
123 void *end_va = RTE_PTR_ADD(msl->base_va, sz);
126 if (*max_va < end_va)
132 pci_find_max_end_va(void)
136 rte_memseg_list_walk(find_max_end_va, &va);
141 /* parse one line of the "resource" sysfs file (note that the 'line'
142 * string is modified)
145 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
146 uint64_t *end_addr, uint64_t *flags)
148 union pci_resource_info {
154 char *ptrs[PCI_RESOURCE_FMT_NVAL];
157 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) {
159 "%s(): bad resource format\n", __func__);
163 *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
164 *end_addr = strtoull(res_info.end_addr, NULL, 16);
165 *flags = strtoull(res_info.flags, NULL, 16);
168 "%s(): bad resource format\n", __func__);
175 /* parse the "resource" sysfs file */
177 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
182 uint64_t phys_addr, end_addr, flags;
184 f = fopen(filename, "r");
186 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n");
190 for (i = 0; i<PCI_MAX_RESOURCE; i++) {
192 if (fgets(buf, sizeof(buf), f) == NULL) {
194 "%s(): cannot read resource\n", __func__);
197 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
198 &end_addr, &flags) < 0)
201 if (flags & IORESOURCE_MEM) {
202 dev->mem_resource[i].phys_addr = phys_addr;
203 dev->mem_resource[i].len = end_addr - phys_addr + 1;
204 /* not mapped for now */
205 dev->mem_resource[i].addr = NULL;
216 /* Scan one pci sysfs entry, and fill the devices list from it. */
218 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
220 char filename[PATH_MAX];
222 struct rte_pci_device *dev;
223 char driver[PATH_MAX];
226 dev = malloc(sizeof(*dev));
230 memset(dev, 0, sizeof(*dev));
231 dev->device.bus = &rte_pci_bus.bus;
235 snprintf(filename, sizeof(filename), "%s/vendor", dirname);
236 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
240 dev->id.vendor_id = (uint16_t)tmp;
243 snprintf(filename, sizeof(filename), "%s/device", dirname);
244 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
248 dev->id.device_id = (uint16_t)tmp;
250 /* get subsystem_vendor id */
251 snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
253 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
257 dev->id.subsystem_vendor_id = (uint16_t)tmp;
259 /* get subsystem_device id */
260 snprintf(filename, sizeof(filename), "%s/subsystem_device",
262 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
266 dev->id.subsystem_device_id = (uint16_t)tmp;
269 snprintf(filename, sizeof(filename), "%s/class",
271 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
275 /* the least 24 bits are valid: class, subclass, program interface */
276 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
280 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
281 if (!access(filename, F_OK) &&
282 eal_parse_sysfs_value(filename, &tmp) == 0)
283 dev->max_vfs = (uint16_t)tmp;
285 /* for non igb_uio driver, need kernel version >= 3.8 */
286 snprintf(filename, sizeof(filename),
287 "%s/sriov_numvfs", dirname);
288 if (!access(filename, F_OK) &&
289 eal_parse_sysfs_value(filename, &tmp) == 0)
290 dev->max_vfs = (uint16_t)tmp;
293 /* get numa node, default to 0 if not present */
294 snprintf(filename, sizeof(filename), "%s/numa_node",
297 if (access(filename, F_OK) != -1) {
298 if (eal_parse_sysfs_value(filename, &tmp) == 0)
299 dev->device.numa_node = tmp;
301 dev->device.numa_node = -1;
303 dev->device.numa_node = 0;
308 /* parse resources */
309 snprintf(filename, sizeof(filename), "%s/resource", dirname);
310 if (pci_parse_sysfs_resource(filename, dev) < 0) {
311 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
317 snprintf(filename, sizeof(filename), "%s/driver", dirname);
318 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
320 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
326 if (!strcmp(driver, "vfio-pci"))
327 dev->kdrv = RTE_KDRV_VFIO;
328 else if (!strcmp(driver, "igb_uio"))
329 dev->kdrv = RTE_KDRV_IGB_UIO;
330 else if (!strcmp(driver, "uio_pci_generic"))
331 dev->kdrv = RTE_KDRV_UIO_GENERIC;
333 dev->kdrv = RTE_KDRV_UNKNOWN;
335 dev->kdrv = RTE_KDRV_NONE;
337 /* device is valid, add in list (sorted) */
338 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
339 rte_pci_add_device(dev);
341 struct rte_pci_device *dev2;
344 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
345 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
350 rte_pci_insert_device(dev2, dev);
351 } else { /* already registered */
352 dev2->kdrv = dev->kdrv;
353 dev2->max_vfs = dev->max_vfs;
355 memmove(dev2->mem_resource, dev->mem_resource,
356 sizeof(dev->mem_resource));
362 rte_pci_add_device(dev);
369 pci_update_device(const struct rte_pci_addr *addr)
371 char filename[PATH_MAX];
373 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
374 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
377 return pci_scan_one(filename, addr);
381 * split up a pci address into its constituent parts.
384 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr)
386 /* first split on ':' */
394 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */
397 char *buf_copy = strndup(buf, bufsize);
398 if (buf_copy == NULL)
401 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
404 /* final split is on '.' between devid and function */
405 splitaddr.function = strchr(splitaddr.devid,'.');
406 if (splitaddr.function == NULL)
408 *splitaddr.function++ = '\0';
410 /* now convert to int values */
412 addr->domain = strtoul(splitaddr.domain, NULL, 16);
413 addr->bus = strtoul(splitaddr.bus, NULL, 16);
414 addr->devid = strtoul(splitaddr.devid, NULL, 16);
415 addr->function = strtoul(splitaddr.function, NULL, 10);
419 free(buf_copy); /* free the copy made with strdup */
427 * Scan the content of the PCI bus, and the devices in the devices
435 char dirname[PATH_MAX];
436 struct rte_pci_addr addr;
438 /* for debug purposes, PCI can be disabled */
439 if (!rte_eal_has_pci())
443 if (!pci_vfio_is_enabled())
444 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n");
447 dir = opendir(rte_pci_get_sysfs_path());
449 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
450 __func__, strerror(errno));
454 while ((e = readdir(dir)) != NULL) {
455 if (e->d_name[0] == '.')
458 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0)
461 snprintf(dirname, sizeof(dirname), "%s/%s",
462 rte_pci_get_sysfs_path(), e->d_name);
464 if (pci_scan_one(dirname, &addr) < 0)
476 * Is pci device bound to any kdrv
479 pci_one_device_is_bound(void)
481 struct rte_pci_device *dev = NULL;
484 FOREACH_DEVICE_ON_PCIBUS(dev) {
485 if (dev->kdrv == RTE_KDRV_UNKNOWN ||
486 dev->kdrv == RTE_KDRV_NONE) {
497 * Any one of the device bound to uio
500 pci_one_device_bound_uio(void)
502 struct rte_pci_device *dev = NULL;
503 struct rte_devargs *devargs;
506 FOREACH_DEVICE_ON_PCIBUS(dev) {
507 devargs = dev->device.devargs;
510 switch (rte_pci_bus.bus.conf.scan_mode) {
511 case RTE_BUS_SCAN_WHITELIST:
512 if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
515 case RTE_BUS_SCAN_UNDEFINED:
516 case RTE_BUS_SCAN_BLACKLIST:
517 if (devargs == NULL ||
518 devargs->policy != RTE_DEV_BLACKLISTED)
526 if (dev->kdrv == RTE_KDRV_IGB_UIO ||
527 dev->kdrv == RTE_KDRV_UIO_GENERIC) {
535 * Any one of the device has iova as va
538 pci_one_device_has_iova_va(void)
540 struct rte_pci_device *dev = NULL;
541 struct rte_pci_driver *drv = NULL;
543 FOREACH_DRIVER_ON_PCIBUS(drv) {
544 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) {
545 FOREACH_DEVICE_ON_PCIBUS(dev) {
546 if (dev->kdrv == RTE_KDRV_VFIO &&
547 rte_pci_match(drv, dev))
555 #if defined(RTE_ARCH_X86)
557 pci_one_device_iommu_support_va(struct rte_pci_device *dev)
559 #define VTD_CAP_MGAW_SHIFT 16
560 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
561 #define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
562 struct rte_pci_addr *addr = &dev->addr;
563 char filename[PATH_MAX];
565 uint64_t mgaw, vtd_cap_reg = 0;
567 snprintf(filename, sizeof(filename),
568 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap",
569 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
571 if (access(filename, F_OK) == -1) {
572 /* We don't have an Intel IOMMU, assume VA supported*/
576 /* We have an intel IOMMU */
577 fp = fopen(filename, "r");
579 RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename);
583 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) {
584 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename);
591 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
593 return rte_eal_check_dma_mask(mgaw) == 0 ? true : false;
595 #elif defined(RTE_ARCH_PPC_64)
597 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
603 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
610 * All devices IOMMUs support VA as IOVA
613 pci_devices_iommu_support_va(void)
615 struct rte_pci_device *dev = NULL;
616 struct rte_pci_driver *drv = NULL;
618 FOREACH_DRIVER_ON_PCIBUS(drv) {
619 FOREACH_DEVICE_ON_PCIBUS(dev) {
620 if (!rte_pci_match(drv, dev))
623 * just one PCI device needs to be checked out because
624 * the IOMMU hardware is the same for all of them.
626 return pci_one_device_iommu_support_va(dev);
633 * Get iommu class of PCI devices on the bus.
636 rte_pci_get_iommu_class(void)
639 bool is_vfio_noiommu_enabled = true;
644 is_bound = pci_one_device_is_bound();
648 has_iova_va = pci_one_device_has_iova_va();
649 is_bound_uio = pci_one_device_bound_uio();
650 iommu_no_va = !pci_devices_iommu_support_va();
652 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
656 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled &&
661 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. ");
662 if (is_vfio_noiommu_enabled)
663 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n");
665 RTE_LOG(WARNING, EAL, "few device bound to UIO\n");
667 RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n");
673 /* Read PCI config space. */
674 int rte_pci_read_config(const struct rte_pci_device *device,
675 void *buf, size_t len, off_t offset)
677 const struct rte_intr_handle *intr_handle = &device->intr_handle;
679 switch (intr_handle->type) {
680 case RTE_INTR_HANDLE_UIO:
681 case RTE_INTR_HANDLE_UIO_INTX:
682 return pci_uio_read_config(intr_handle, buf, len, offset);
685 case RTE_INTR_HANDLE_VFIO_MSIX:
686 case RTE_INTR_HANDLE_VFIO_MSI:
687 case RTE_INTR_HANDLE_VFIO_LEGACY:
688 return pci_vfio_read_config(intr_handle, buf, len, offset);
692 "Unknown handle type of fd %d\n",
698 /* Write PCI config space. */
699 int rte_pci_write_config(const struct rte_pci_device *device,
700 const void *buf, size_t len, off_t offset)
702 const struct rte_intr_handle *intr_handle = &device->intr_handle;
704 switch (intr_handle->type) {
705 case RTE_INTR_HANDLE_UIO:
706 case RTE_INTR_HANDLE_UIO_INTX:
707 return pci_uio_write_config(intr_handle, buf, len, offset);
710 case RTE_INTR_HANDLE_VFIO_MSIX:
711 case RTE_INTR_HANDLE_VFIO_MSI:
712 case RTE_INTR_HANDLE_VFIO_LEGACY:
713 return pci_vfio_write_config(intr_handle, buf, len, offset);
717 "Unknown handle type of fd %d\n",
723 #if defined(RTE_ARCH_X86)
725 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
726 struct rte_pci_ioport *p)
735 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
736 dev->addr.domain, dev->addr.bus,
737 dev->addr.devid, dev->addr.function);
739 fp = fopen("/proc/ioports", "r");
741 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__);
745 while (getdelim(&line, &linesz, '\n', fp) > 0) {
750 n = strcspn(ptr, ":");
754 while (*left && isspace(*left))
757 if (!strncmp(left, pci_id, strlen(pci_id))) {
760 while (*ptr && isspace(*ptr))
763 sscanf(ptr, "%04hx-%04hx", &start, &end);
776 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
783 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
784 struct rte_pci_ioport *p)
791 if (pci_vfio_is_enabled())
792 ret = pci_vfio_ioport_map(dev, bar, p);
795 case RTE_KDRV_IGB_UIO:
796 ret = pci_uio_ioport_map(dev, bar, p);
798 case RTE_KDRV_UIO_GENERIC:
799 #if defined(RTE_ARCH_X86)
800 ret = pci_ioport_map(dev, bar, p);
802 ret = pci_uio_ioport_map(dev, bar, p);
806 #if defined(RTE_ARCH_X86)
807 ret = pci_ioport_map(dev, bar, p);
821 rte_pci_ioport_read(struct rte_pci_ioport *p,
822 void *data, size_t len, off_t offset)
824 switch (p->dev->kdrv) {
827 pci_vfio_ioport_read(p, data, len, offset);
830 case RTE_KDRV_IGB_UIO:
831 pci_uio_ioport_read(p, data, len, offset);
833 case RTE_KDRV_UIO_GENERIC:
834 pci_uio_ioport_read(p, data, len, offset);
837 #if defined(RTE_ARCH_X86)
838 pci_uio_ioport_read(p, data, len, offset);
847 rte_pci_ioport_write(struct rte_pci_ioport *p,
848 const void *data, size_t len, off_t offset)
850 switch (p->dev->kdrv) {
853 pci_vfio_ioport_write(p, data, len, offset);
856 case RTE_KDRV_IGB_UIO:
857 pci_uio_ioport_write(p, data, len, offset);
859 case RTE_KDRV_UIO_GENERIC:
860 pci_uio_ioport_write(p, data, len, offset);
863 #if defined(RTE_ARCH_X86)
864 pci_uio_ioport_write(p, data, len, offset);
873 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
877 switch (p->dev->kdrv) {
880 if (pci_vfio_is_enabled())
881 ret = pci_vfio_ioport_unmap(p);
884 case RTE_KDRV_IGB_UIO:
885 ret = pci_uio_ioport_unmap(p);
887 case RTE_KDRV_UIO_GENERIC:
888 #if defined(RTE_ARCH_X86)
891 ret = pci_uio_ioport_unmap(p);
895 #if defined(RTE_ARCH_X86)