#include <stdlib.h>
#include <stdio.h>
#include <sys/queue.h>
-#include <sys/mman.h>
-
#include <rte_errno.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_bus.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
+#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_memory.h>
#include <rte_eal.h>
+#include <rte_eal_paging.h>
#include <rte_string_fns.h>
#include <rte_common.h>
#include <rte_devargs.h>
{
const char *path = NULL;
+#ifdef RTE_EXEC_ENV_LINUX
path = getenv("SYSFS_PCI_DEVICES");
if (path == NULL)
return SYSFS_PCI_DEVICES;
+#endif
return path;
}
-static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
+static struct rte_devargs *
+pci_devargs_lookup(const struct rte_pci_addr *pci_addr)
{
struct rte_devargs *devargs;
struct rte_pci_addr addr;
RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
devargs->bus->parse(devargs->name, &addr);
- if (!rte_pci_addr_cmp(&dev->addr, &addr))
+ if (!rte_pci_addr_cmp(pci_addr, &addr))
return devargs;
}
return NULL;
/* Each device has its internal, canonical name set. */
rte_pci_device_name(&dev->addr,
dev->name, sizeof(dev->name));
- devargs = pci_devargs_lookup(dev);
+ devargs = pci_devargs_lookup(&dev->addr);
dev->device.devargs = devargs;
- /* In blacklist mode, if the device is not blacklisted, no
- * rte_devargs exists for it.
+
+ /* When using a blocklist, only blocked devices will have
+ * an rte_devargs. Allowed devices won't have one.
*/
if (devargs != NULL)
/* If an rte_devargs exists, the generic rte_device uses the
dev->device.name = dev->name;
}
+/* map a particular resource from a file */
+void *
+pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int additional_flags)
+{
+ void *mapaddr;
+
+ /* Map the PCI memory resource of device */
+ mapaddr = rte_mem_map(requested_addr, size,
+ RTE_PROT_READ | RTE_PROT_WRITE,
+ RTE_MAP_SHARED | additional_flags, fd, offset);
+ if (mapaddr == NULL) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot map resource(%d, %p, 0x%zx, 0x%llx): %s (%p)\n",
+ __func__, fd, requested_addr, size,
+ (unsigned long long)offset,
+ rte_strerror(rte_errno), mapaddr);
+ } else
+ RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
+
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+pci_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the PCI memory resource of device */
+ if (rte_mem_unmap(requested_addr, size)) {
+ RTE_LOG(ERR, EAL, "%s(): cannot mem unmap(%p, %#zx): %s\n",
+ __func__, requested_addr, size,
+ rte_strerror(rte_errno));
+ } else
+ RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n",
+ requested_addr);
+}
/*
* Match the PCI Driver and Device using the ID Table
*/
id_table++) {
/* check if device's identifiers match the driver's ones */
if (id_table->vendor_id != pci_dev->id.vendor_id &&
- id_table->vendor_id != PCI_ANY_ID)
+ id_table->vendor_id != RTE_PCI_ANY_ID)
continue;
if (id_table->device_id != pci_dev->id.device_id &&
- id_table->device_id != PCI_ANY_ID)
+ id_table->device_id != RTE_PCI_ANY_ID)
continue;
if (id_table->subsystem_vendor_id !=
pci_dev->id.subsystem_vendor_id &&
- id_table->subsystem_vendor_id != PCI_ANY_ID)
+ id_table->subsystem_vendor_id != RTE_PCI_ANY_ID)
continue;
if (id_table->subsystem_device_id !=
pci_dev->id.subsystem_device_id &&
- id_table->subsystem_device_id != PCI_ANY_ID)
+ id_table->subsystem_device_id != RTE_PCI_ANY_ID)
continue;
if (id_table->class_id != pci_dev->id.class_id &&
id_table->class_id != RTE_CLASS_ANY_ID)
struct rte_pci_device *dev)
{
int ret;
+ unsigned socket_id;
bool already_probed;
struct rte_pci_addr *loc;
loc = &dev->addr;
- /* The device is not blacklisted; Check if driver supports it */
+ /* The device is not blocked; Check if driver supports it */
if (!rte_pci_match(dr, dev))
/* Match of device and driver failed */
return 1;
loc->domain, loc->bus, loc->devid, loc->function,
dev->device.numa_node);
- /* no initialization when blacklisted, return without error */
+ /* no initialization when marked as blocked, return without error */
if (dev->device.devargs != NULL &&
- dev->device.devargs->policy ==
- RTE_DEV_BLACKLISTED) {
- RTE_LOG(INFO, EAL, " Device is blacklisted, not"
- " initializing\n");
+ dev->device.devargs->policy == RTE_DEV_BLOCKED) {
+ RTE_LOG(INFO, EAL, " Device is blocked, not initializing\n");
return 1;
}
if (dev->device.numa_node < 0) {
- RTE_LOG(WARNING, EAL, " Invalid NUMA socket, default to 0\n");
- dev->device.numa_node = 0;
+ if (rte_socket_count() > 1)
+ RTE_LOG(INFO, EAL, "Device %s is not NUMA-aware, defaulting socket to 0\n",
+ dev->name);
+ socket_id = rte_lcore_to_socket_id(rte_get_next_lcore(-1, 0, 0));
+ dev->device.numa_node = socket_id;
}
already_probed = rte_dev_is_probed(&dev->device);
* all registered drivers that have a matching entry in its id_table
* for discovered devices.
*/
-int
-rte_pci_probe(void)
+static int
+pci_probe(void)
{
struct rte_pci_device *dev = NULL;
size_t probed = 0, failed = 0;
- struct rte_devargs *devargs;
- int probe_all = 0;
int ret = 0;
- if (rte_pci_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST)
- probe_all = 1;
-
FOREACH_DEVICE_ON_PCIBUS(dev) {
probed++;
- devargs = dev->device.devargs;
- /* probe all or only whitelisted devices */
- if (probe_all)
- ret = pci_probe_all_drivers(dev);
- else if (devargs != NULL &&
- devargs->policy == RTE_DEV_WHITELISTED)
- ret = pci_probe_all_drivers(dev);
+ ret = pci_probe_all_drivers(dev);
if (ret < 0) {
if (ret != -EEXIST) {
RTE_LOG(ERR, EAL, "Requested device "
switch (pdev->kdrv) {
#ifdef HAVE_VFIO_DEV_REQ_INTERFACE
- case RTE_KDRV_VFIO:
+ case RTE_PCI_KDRV_VFIO:
/*
* vfio kernel module guaranty the pci device would not be
* deleted until the user space release the resource, so no
RTE_DEV_EVENT_REMOVE);
break;
#endif
- case RTE_KDRV_IGB_UIO:
- case RTE_KDRV_UIO_GENERIC:
- case RTE_KDRV_NIC_UIO:
+ case RTE_PCI_KDRV_IGB_UIO:
+ case RTE_PCI_KDRV_UIO_GENERIC:
+ case RTE_PCI_KDRV_NIC_UIO:
/* BARs resource is invalid, remap it to be safe. */
ret = pci_uio_remap_resource(pdev);
break;
* In case driver don't provides any specific mapping
* try fallback to VFIO.
*/
- if (pdev->kdrv == RTE_KDRV_VFIO)
+ if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
return rte_vfio_container_dma_map
(RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
iova, len);
* In case driver don't provides any specific mapping
* try fallback to VFIO.
*/
- if (pdev->kdrv == RTE_KDRV_VFIO)
+ if (pdev->kdrv == RTE_PCI_KDRV_VFIO)
return rte_vfio_container_dma_unmap
(RTE_VFIO_DEFAULT_CONTAINER_FD, (uintptr_t)addr,
iova, len);
return -1;
}
-static bool
-pci_ignore_device(const struct rte_pci_device *dev)
+bool
+rte_pci_ignore_device(const struct rte_pci_addr *pci_addr)
{
- struct rte_devargs *devargs = dev->device.devargs;
+ struct rte_devargs *devargs = pci_devargs_lookup(pci_addr);
switch (rte_pci_bus.bus.conf.scan_mode) {
- case RTE_BUS_SCAN_WHITELIST:
- if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
+ case RTE_BUS_SCAN_ALLOWLIST:
+ if (devargs && devargs->policy == RTE_DEV_ALLOWED)
return false;
break;
case RTE_BUS_SCAN_UNDEFINED:
- case RTE_BUS_SCAN_BLACKLIST:
- if (devargs == NULL ||
- devargs->policy != RTE_DEV_BLACKLISTED)
+ case RTE_BUS_SCAN_BLOCKLIST:
+ if (devargs == NULL || devargs->policy != RTE_DEV_BLOCKED)
return false;
break;
}
if (iommu_no_va == -1)
iommu_no_va = pci_device_iommu_support_va(dev)
? 0 : 1;
- if (pci_ignore_device(dev))
- continue;
- if (dev->kdrv == RTE_KDRV_UNKNOWN ||
- dev->kdrv == RTE_KDRV_NONE)
+
+ if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN ||
+ dev->kdrv == RTE_PCI_KDRV_NONE)
continue;
FOREACH_DRIVER_ON_PCIBUS(drv) {
enum rte_iova_mode dev_iova_mode;
return iova_mode;
}
+off_t
+rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap)
+{
+ off_t offset = RTE_PCI_CFG_SPACE_SIZE;
+ uint32_t header;
+ int ttl;
+
+ /* minimum 8 bytes per capability */
+ ttl = (RTE_PCI_CFG_SPACE_EXP_SIZE - RTE_PCI_CFG_SPACE_SIZE) / 8;
+
+ if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
+ RTE_LOG(ERR, EAL, "error in reading extended capabilities\n");
+ return -1;
+ }
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl != 0) {
+ if (RTE_PCI_EXT_CAP_ID(header) == cap)
+ return offset;
+
+ offset = RTE_PCI_EXT_CAP_NEXT(header);
+
+ if (offset < RTE_PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (rte_pci_read_config(dev, &header, 4, offset) < 0) {
+ RTE_LOG(ERR, EAL,
+ "error in reading extended capabilities\n");
+ return -1;
+ }
+
+ ttl--;
+ }
+
+ return 0;
+}
+
+int
+rte_pci_set_bus_master(struct rte_pci_device *dev, bool enable)
+{
+ uint16_t old_cmd, cmd;
+
+ if (rte_pci_read_config(dev, &old_cmd, sizeof(old_cmd),
+ RTE_PCI_COMMAND) < 0) {
+ RTE_LOG(ERR, EAL, "error in reading PCI command register\n");
+ return -1;
+ }
+
+ if (enable)
+ cmd = old_cmd | RTE_PCI_COMMAND_MASTER;
+ else
+ cmd = old_cmd & ~RTE_PCI_COMMAND_MASTER;
+
+ if (cmd == old_cmd)
+ return 0;
+
+ if (rte_pci_write_config(dev, &cmd, sizeof(cmd),
+ RTE_PCI_COMMAND) < 0) {
+ RTE_LOG(ERR, EAL, "error in writing PCI command register\n");
+ return -1;
+ }
+
+ return 0;
+}
+
struct rte_pci_bus rte_pci_bus = {
.bus = {
.scan = rte_pci_scan,
- .probe = rte_pci_probe,
+ .probe = pci_probe,
.find_device = pci_find_device,
.plug = pci_plug,
.unplug = pci_unplug,
.parse = pci_parse,
+ .devargs_parse = rte_pci_devargs_parse,
.dma_map = pci_dma_map,
.dma_unmap = pci_dma_unmap,
.get_iommu_class = rte_pci_get_iommu_class,