4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
48 #include <sys/queue.h>
50 #include <sys/ioctl.h>
51 #include <sys/pciio.h>
52 #include <dev/pci/pcireg.h>
54 #if defined(RTE_ARCH_X86)
55 #include <sys/types.h>
56 #include <machine/cpufunc.h>
59 #include <rte_interrupts.h>
62 #include <rte_common.h>
63 #include <rte_launch.h>
64 #include <rte_memory.h>
65 #include <rte_memzone.h>
67 #include <rte_eal_memconfig.h>
68 #include <rte_per_lcore.h>
69 #include <rte_lcore.h>
70 #include <rte_malloc.h>
71 #include <rte_string_fns.h>
72 #include <rte_debug.h>
73 #include <rte_devargs.h>
75 #include "eal_filesystem.h"
76 #include "eal_private.h"
80 * PCI probing under linux
82 * This code is used to simulate a PCI probe by parsing information in
83 * sysfs. Moreover, when a registered driver matches a device, the
84 * kernel driver currently using it is unloaded and replaced by
85 * igb_uio module, which is a very minimal userland driver for Intel
86 * network card, only providing access to PCI BAR to applications, and
87 * enabling bus master.
90 /* unbind kernel driver for this device */
92 pci_unbind_kernel_driver(struct rte_pci_device *dev __rte_unused)
94 RTE_LOG(ERR, EAL, "RTE_PCI_DRV_FORCE_UNBIND flag is not implemented "
101 rte_eal_pci_map_device(struct rte_pci_device *dev)
105 /* try mapping the NIC resources */
107 case RTE_KDRV_NIC_UIO:
108 /* map resources for devices that use uio */
109 ret = pci_uio_map_resource(dev);
113 " Not managed by a supported kernel driver, skipped\n");
121 /* Unmap pci device */
123 rte_eal_pci_unmap_device(struct rte_pci_device *dev)
125 /* try unmapping the NIC resources */
127 case RTE_KDRV_NIC_UIO:
128 /* unmap resources for devices that use uio */
129 pci_uio_unmap_resource(dev);
133 " Not managed by a supported kernel driver, skipped\n");
139 pci_uio_free_resource(struct rte_pci_device *dev,
140 struct mapped_pci_resource *uio_res)
144 if (dev->intr_handle.fd) {
145 close(dev->intr_handle.fd);
146 dev->intr_handle.fd = -1;
147 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
152 pci_uio_alloc_resource(struct rte_pci_device *dev,
153 struct mapped_pci_resource **uio_res)
155 char devname[PATH_MAX]; /* contains the /dev/uioX */
156 struct rte_pci_addr *loc;
160 snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
161 dev->addr.bus, dev->addr.devid, dev->addr.function);
163 if (access(devname, O_RDWR) < 0) {
164 RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
165 "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
169 /* save fd if in primary process */
170 dev->intr_handle.fd = open(devname, O_RDWR);
171 if (dev->intr_handle.fd < 0) {
172 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
173 devname, strerror(errno));
176 dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
178 /* allocate the mapping details for secondary processes*/
179 *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
180 if (*uio_res == NULL) {
182 "%s(): cannot store uio mmap details\n", __func__);
186 snprintf((*uio_res)->path, sizeof((*uio_res)->path), "%s", devname);
187 memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
192 pci_uio_free_resource(dev, *uio_res);
197 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
198 struct mapped_pci_resource *uio_res, int map_idx)
205 struct pci_map *maps;
207 maps = uio_res->maps;
208 devname = uio_res->path;
209 pagesz = sysconf(_SC_PAGESIZE);
211 /* allocate memory to keep path */
212 maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
213 if (maps[map_idx].path == NULL) {
214 RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
220 * open resource file, to mmap it
222 fd = open(devname, O_RDWR);
224 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
225 devname, strerror(errno));
229 /* if matching map is found, then use it */
230 offset = res_idx * pagesz;
231 mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
232 (size_t)dev->mem_resource[res_idx].len, 0);
234 if (mapaddr == MAP_FAILED)
237 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
238 maps[map_idx].size = dev->mem_resource[res_idx].len;
239 maps[map_idx].addr = mapaddr;
240 maps[map_idx].offset = offset;
241 strcpy(maps[map_idx].path, devname);
242 dev->mem_resource[res_idx].addr = mapaddr;
247 rte_free(maps[map_idx].path);
252 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
254 struct rte_pci_device *dev;
255 struct pci_bar_io bar;
258 dev = malloc(sizeof(*dev));
263 memset(dev, 0, sizeof(*dev));
264 dev->addr.domain = conf->pc_sel.pc_domain;
265 dev->addr.bus = conf->pc_sel.pc_bus;
266 dev->addr.devid = conf->pc_sel.pc_dev;
267 dev->addr.function = conf->pc_sel.pc_func;
270 dev->id.vendor_id = conf->pc_vendor;
273 dev->id.device_id = conf->pc_device;
275 /* get subsystem_vendor id */
276 dev->id.subsystem_vendor_id = conf->pc_subvendor;
278 /* get subsystem_device id */
279 dev->id.subsystem_device_id = conf->pc_subdevice;
282 dev->id.class_id = (conf->pc_class << 16) |
283 (conf->pc_subclass << 8) |
286 /* TODO: get max_vfs */
289 /* FreeBSD has no NUMA support (yet) */
290 dev->device.numa_node = 0;
292 /* FreeBSD has only one pass through driver */
293 dev->kdrv = RTE_KDRV_NIC_UIO;
295 /* parse resources */
296 switch (conf->pc_hdr & PCIM_HDRTYPE) {
297 case PCIM_HDRTYPE_NORMAL:
298 max = PCIR_MAX_BAR_0;
300 case PCIM_HDRTYPE_BRIDGE:
301 max = PCIR_MAX_BAR_1;
303 case PCIM_HDRTYPE_CARDBUS:
304 max = PCIR_MAX_BAR_2;
310 for (i = 0; i <= max; i++) {
311 bar.pbi_sel = conf->pc_sel;
312 bar.pbi_reg = PCIR_BAR(i);
313 if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
316 dev->mem_resource[i].len = bar.pbi_length;
317 if (PCI_BAR_IO(bar.pbi_base)) {
318 dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
321 dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
324 /* device is valid, add in list (sorted) */
325 if (TAILQ_EMPTY(&pci_device_list)) {
326 TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
329 struct rte_pci_device *dev2 = NULL;
332 TAILQ_FOREACH(dev2, &pci_device_list, next) {
333 ret = rte_eal_compare_pci_addr(&dev->addr, &dev2->addr);
337 TAILQ_INSERT_BEFORE(dev2, dev, next);
339 } else { /* already registered */
340 dev2->kdrv = dev->kdrv;
341 dev2->max_vfs = dev->max_vfs;
342 memmove(dev2->mem_resource,
344 sizeof(dev->mem_resource));
349 TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
360 * Scan the content of the PCI bus, and add the devices in the devices
361 * list. Call pci_scan_one() for each pci entry found.
364 rte_eal_pci_scan(void)
367 unsigned dev_count = 0;
368 struct pci_conf matches[16];
369 struct pci_conf_io conf_io = {
373 .match_buf_len = sizeof(matches),
374 .matches = &matches[0],
377 fd = open("/dev/pci", O_RDONLY);
379 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
385 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
386 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
387 __func__, strerror(errno));
391 for (i = 0; i < conf_io.num_matches; i++)
392 if (pci_scan_one(fd, &matches[i]) < 0)
395 dev_count += conf_io.num_matches;
396 } while(conf_io.status == PCI_GETCONF_MORE_DEVS);
400 RTE_LOG(ERR, EAL, "PCI scan found %u devices\n", dev_count);
410 pci_update_device(const struct rte_pci_addr *addr)
413 struct pci_conf matches[2];
414 struct pci_match_conf match = {
416 .pc_domain = addr->domain,
418 .pc_dev = addr->devid,
419 .pc_func = addr->function,
422 struct pci_conf_io conf_io = {
426 .match_buf_len = sizeof(matches),
427 .matches = &matches[0],
430 fd = open("/dev/pci", O_RDONLY);
432 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
436 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
437 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
438 __func__, strerror(errno));
442 if (conf_io.num_matches != 1)
445 if (pci_scan_one(fd, &matches[0]) < 0)
458 /* Read PCI config space. */
459 int rte_eal_pci_read_config(const struct rte_pci_device *dev,
460 void *buf, size_t len, off_t offset)
465 .pc_domain = dev->addr.domain,
466 .pc_bus = dev->addr.bus,
467 .pc_dev = dev->addr.devid,
468 .pc_func = dev->addr.function,
474 if (len == 3 || len > sizeof(pi.pi_data)) {
475 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
479 fd = open("/dev/pci", O_RDWR);
481 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
485 if (ioctl(fd, PCIOCREAD, &pi) < 0)
489 memcpy(buf, &pi.pi_data, len);
498 /* Write PCI config space. */
499 int rte_eal_pci_write_config(const struct rte_pci_device *dev,
500 const void *buf, size_t len, off_t offset)
506 .pc_domain = dev->addr.domain,
507 .pc_bus = dev->addr.bus,
508 .pc_dev = dev->addr.devid,
509 .pc_func = dev->addr.function,
512 .pi_data = *(const uint32_t *)buf,
516 if (len == 3 || len > sizeof(pi.pi_data)) {
517 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
521 memcpy(&pi.pi_data, buf, len);
523 fd = open("/dev/pci", O_RDWR);
525 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
529 if (ioctl(fd, PCIOCWRITE, &pi) < 0)
542 rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
543 struct rte_pci_ioport *p)
548 #if defined(RTE_ARCH_X86)
549 case RTE_KDRV_NIC_UIO:
550 if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
551 p->base = (uintptr_t)dev->mem_resource[bar].addr;
569 pci_uio_ioport_read(struct rte_pci_ioport *p,
570 void *data, size_t len, off_t offset)
572 #if defined(RTE_ARCH_X86)
575 unsigned short reg = p->base + offset;
577 for (d = data; len > 0; d += size, reg += size, len -= size) {
580 *(uint32_t *)d = inl(reg);
581 } else if (len >= 2) {
583 *(uint16_t *)d = inw(reg);
593 RTE_SET_USED(offset);
598 rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
599 void *data, size_t len, off_t offset)
601 switch (p->dev->kdrv) {
602 case RTE_KDRV_NIC_UIO:
603 pci_uio_ioport_read(p, data, len, offset);
611 pci_uio_ioport_write(struct rte_pci_ioport *p,
612 const void *data, size_t len, off_t offset)
614 #if defined(RTE_ARCH_X86)
617 unsigned short reg = p->base + offset;
619 for (s = data; len > 0; s += size, reg += size, len -= size) {
622 outl(*(const uint32_t *)s, reg);
623 } else if (len >= 2) {
625 outw(*(const uint16_t *)s, reg);
635 RTE_SET_USED(offset);
640 rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
641 const void *data, size_t len, off_t offset)
643 switch (p->dev->kdrv) {
644 case RTE_KDRV_NIC_UIO:
645 pci_uio_ioport_write(p, data, len, offset);
653 rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p)
657 switch (p->dev->kdrv) {
658 #if defined(RTE_ARCH_X86)
659 case RTE_KDRV_NIC_UIO:
671 /* Init the PCI EAL subsystem */
673 rte_eal_pci_init(void)
675 /* for debug purposes, PCI can be disabled */
676 if (internal_config.no_pci)
679 if (rte_eal_pci_scan() < 0) {
680 RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);