4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/pci_regs.h>
37 #include <sys/eventfd.h>
38 #include <sys/socket.h>
39 #include <sys/ioctl.h>
45 #include <rte_eal_memconfig.h>
46 #include <rte_malloc.h>
49 #include "eal_filesystem.h"
50 #include "eal_pci_init.h"
51 #include "eal_private.h"
55 * PCI probing under linux (VFIO version)
57 * This code tries to determine if the PCI device is bound to VFIO driver,
58 * and initialize it (map BARs, set up interrupts) if that's the case.
60 * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
65 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
66 #define PAGE_MASK (~(PAGE_SIZE - 1))
68 static struct rte_tailq_elem rte_vfio_tailq = {
69 .name = "VFIO_RESOURCE_LIST",
71 EAL_REGISTER_TAILQ(rte_vfio_tailq)
74 pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
75 void *buf, size_t len, off_t offs)
77 return pread64(intr_handle->vfio_dev_fd, buf, len,
78 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
82 pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
83 const void *buf, size_t len, off_t offs)
85 return pwrite64(intr_handle->vfio_dev_fd, buf, len,
86 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
89 /* get PCI BAR number where MSI-X interrupts are */
91 pci_vfio_get_msix_bar(int fd, struct pci_msix_table *msix_table)
96 uint8_t cap_id, cap_offset;
98 /* read PCI capability pointer from config space */
99 ret = pread64(fd, ®, sizeof(reg),
100 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
101 PCI_CAPABILITY_LIST);
102 if (ret != sizeof(reg)) {
103 RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
108 /* we need first byte */
109 cap_offset = reg & 0xFF;
113 /* read PCI capability ID */
114 ret = pread64(fd, ®, sizeof(reg),
115 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
117 if (ret != sizeof(reg)) {
118 RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
123 /* we need first byte */
126 /* if we haven't reached MSI-X, check next capability */
127 if (cap_id != PCI_CAP_ID_MSIX) {
128 ret = pread64(fd, ®, sizeof(reg),
129 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
131 if (ret != sizeof(reg)) {
132 RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
137 /* we need second byte */
138 cap_offset = (reg & 0xFF00) >> 8;
142 /* else, read table offset */
144 /* table offset resides in the next 4 bytes */
145 ret = pread64(fd, ®, sizeof(reg),
146 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
148 if (ret != sizeof(reg)) {
149 RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
154 ret = pread64(fd, &flags, sizeof(flags),
155 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
157 if (ret != sizeof(flags)) {
158 RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
163 msix_table->bar_index = reg & RTE_PCI_MSIX_TABLE_BIR;
164 msix_table->offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
166 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
174 /* set PCI bus mastering */
176 pci_vfio_set_bus_master(int dev_fd, bool op)
181 ret = pread64(dev_fd, ®, sizeof(reg),
182 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
184 if (ret != sizeof(reg)) {
185 RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
190 /* set the master bit */
191 reg |= PCI_COMMAND_MASTER;
193 reg &= ~(PCI_COMMAND_MASTER);
195 ret = pwrite64(dev_fd, ®, sizeof(reg),
196 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
199 if (ret != sizeof(reg)) {
200 RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
207 /* set up interrupt support (but not enable interrupts) */
209 pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
211 int i, ret, intr_idx;
212 enum rte_intr_mode intr_mode;
214 /* default to invalid index */
215 intr_idx = VFIO_PCI_NUM_IRQS;
217 /* Get default / configured intr_mode */
218 intr_mode = rte_eal_vfio_intr_mode();
220 /* get interrupt type from internal config (MSI-X by default, can be
221 * overridden from the command line
224 case RTE_INTR_MODE_MSIX:
225 intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
227 case RTE_INTR_MODE_MSI:
228 intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
230 case RTE_INTR_MODE_LEGACY:
231 intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
233 /* don't do anything if we want to automatically determine interrupt type */
234 case RTE_INTR_MODE_NONE:
237 RTE_LOG(ERR, EAL, " unknown default interrupt type!\n");
241 /* start from MSI-X interrupt type */
242 for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
243 struct vfio_irq_info irq = { .argsz = sizeof(irq) };
246 /* skip interrupt modes we don't want */
247 if (intr_mode != RTE_INTR_MODE_NONE &&
253 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
255 RTE_LOG(ERR, EAL, " cannot get IRQ info, "
256 "error %i (%s)\n", errno, strerror(errno));
260 /* if this vector cannot be used with eventfd, fail if we explicitly
261 * specified interrupt type, otherwise continue */
262 if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
263 if (intr_mode != RTE_INTR_MODE_NONE) {
265 " interrupt vector does not support eventfd!\n");
271 /* set up an eventfd for interrupts */
272 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
274 RTE_LOG(ERR, EAL, " cannot set up eventfd, "
275 "error %i (%s)\n", errno, strerror(errno));
279 dev->intr_handle.fd = fd;
280 dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
283 case VFIO_PCI_MSIX_IRQ_INDEX:
284 intr_mode = RTE_INTR_MODE_MSIX;
285 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
287 case VFIO_PCI_MSI_IRQ_INDEX:
288 intr_mode = RTE_INTR_MODE_MSI;
289 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
291 case VFIO_PCI_INTX_IRQ_INDEX:
292 intr_mode = RTE_INTR_MODE_LEGACY;
293 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
296 RTE_LOG(ERR, EAL, " unknown interrupt type!\n");
303 /* if we're here, we haven't found a suitable interrupt vector */
308 pci_vfio_is_ioport_bar(int vfio_dev_fd, int bar_index)
313 ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
314 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
315 + PCI_BASE_ADDRESS_0 + bar_index*4);
316 if (ret != sizeof(ioport_bar)) {
317 RTE_LOG(ERR, EAL, "Cannot read command (%x) from config space!\n",
318 PCI_BASE_ADDRESS_0 + bar_index*4);
322 return (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) != 0;
326 pci_vfio_setup_device(struct rte_pci_device *dev, int vfio_dev_fd)
328 if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
329 RTE_LOG(ERR, EAL, "Error setting up interrupts!\n");
333 /* set bus mastering for the device */
334 if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
335 RTE_LOG(ERR, EAL, "Cannot set up bus mastering!\n");
339 /* Reset the device */
340 ioctl(vfio_dev_fd, VFIO_DEVICE_RESET);
346 pci_vfio_mmap_bar(int vfio_dev_fd, struct mapped_pci_resource *vfio_res,
347 int bar_index, int additional_flags)
350 unsigned long offset, size;
353 struct pci_msix_table *msix_table = &vfio_res->msix_table;
354 struct pci_map *bar = &vfio_res->maps[bar_index];
360 if (msix_table->bar_index == bar_index) {
362 * VFIO will not let us map the MSI-X table,
363 * but we can map around it.
365 uint32_t table_start = msix_table->offset;
366 uint32_t table_end = table_start + msix_table->size;
367 table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
368 table_start &= PAGE_MASK;
370 if (table_start == 0 && table_end >= bar->size) {
371 /* Cannot map this BAR */
372 RTE_LOG(DEBUG, EAL, "Skipping BAR%d\n", bar_index);
378 memreg[0].offset = bar->offset;
379 memreg[0].size = table_start;
380 memreg[1].offset = bar->offset + table_end;
381 memreg[1].size = bar->size - table_end;
384 "Trying to map BAR%d that contains the MSI-X "
385 "table. Trying offsets: "
386 "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", bar_index,
387 memreg[0].offset, memreg[0].size,
388 memreg[1].offset, memreg[1].size);
390 memreg[0].offset = bar->offset;
391 memreg[0].size = bar->size;
394 /* reserve the address using an inaccessible mapping */
395 bar_addr = mmap(bar->addr, bar->size, 0, MAP_PRIVATE |
396 MAP_ANONYMOUS | additional_flags, -1, 0);
397 if (bar_addr != MAP_FAILED) {
398 void *map_addr = NULL;
399 if (memreg[0].size) {
400 /* actual map of first part */
401 map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
407 /* if there's a second part, try to map it */
408 if (map_addr != MAP_FAILED
409 && memreg[1].offset && memreg[1].size) {
410 void *second_addr = RTE_PTR_ADD(bar_addr,
412 (uintptr_t)bar->offset);
413 map_addr = pci_map_resource(second_addr,
420 if (map_addr == MAP_FAILED || !map_addr) {
421 munmap(bar_addr, bar->size);
422 bar_addr = MAP_FAILED;
423 RTE_LOG(ERR, EAL, "Failed to map pci BAR%d\n",
429 "Failed to create inaccessible mapping for BAR%d\n",
434 bar->addr = bar_addr;
439 pci_vfio_map_resource_primary(struct rte_pci_device *dev)
441 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
442 char pci_addr[PATH_MAX] = {0};
444 struct rte_pci_addr *loc = &dev->addr;
446 struct mapped_pci_resource *vfio_res = NULL;
447 struct mapped_pci_res_list *vfio_res_list =
448 RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
450 struct pci_map *maps;
452 dev->intr_handle.fd = -1;
453 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
455 /* store PCI address string */
456 snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
457 loc->domain, loc->bus, loc->devid, loc->function);
459 ret = vfio_setup_device(pci_get_sysfs_path(), pci_addr,
460 &vfio_dev_fd, &device_info);
464 /* allocate vfio_res and get region info */
465 vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
466 if (vfio_res == NULL) {
468 "%s(): cannot store uio mmap details\n", __func__);
469 goto err_vfio_dev_fd;
471 memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
473 /* get number of registers (up to BAR5) */
474 vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
475 VFIO_PCI_BAR5_REGION_INDEX + 1);
478 maps = vfio_res->maps;
480 vfio_res->msix_table.bar_index = -1;
481 /* get MSI-X BAR, if any (we have to know where it is because we can't
482 * easily mmap it when using VFIO)
484 ret = pci_vfio_get_msix_bar(vfio_dev_fd, &vfio_res->msix_table);
486 RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n",
488 goto err_vfio_dev_fd;
491 for (i = 0; i < (int) vfio_res->nb_maps; i++) {
492 struct vfio_region_info reg = { .argsz = sizeof(reg) };
497 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
499 RTE_LOG(ERR, EAL, " %s cannot get device region info "
500 "error %i (%s)\n", pci_addr, errno, strerror(errno));
504 /* chk for io port region */
505 ret = pci_vfio_is_ioport_bar(vfio_dev_fd, i);
509 RTE_LOG(INFO, EAL, "Ignore mapping IO port bar(%d)\n",
514 /* skip non-mmapable BARs */
515 if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
518 /* try mapping somewhere close to the end of hugepages */
519 if (pci_map_addr == NULL)
520 pci_map_addr = pci_find_max_end_va();
522 bar_addr = pci_map_addr;
523 pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
525 maps[i].addr = bar_addr;
526 maps[i].offset = reg.offset;
527 maps[i].size = reg.size;
528 maps[i].path = NULL; /* vfio doesn't have per-resource paths */
530 ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, 0);
532 RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
533 pci_addr, i, strerror(errno));
537 dev->mem_resource[i].addr = maps[i].addr;
540 if (pci_vfio_setup_device(dev, vfio_dev_fd) < 0) {
541 RTE_LOG(ERR, EAL, " %s setup device failed\n", pci_addr);
545 TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
556 pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
558 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
559 char pci_addr[PATH_MAX] = {0};
561 struct rte_pci_addr *loc = &dev->addr;
563 struct mapped_pci_resource *vfio_res = NULL;
564 struct mapped_pci_res_list *vfio_res_list =
565 RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
567 struct pci_map *maps;
569 dev->intr_handle.fd = -1;
570 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
572 /* store PCI address string */
573 snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
574 loc->domain, loc->bus, loc->devid, loc->function);
576 ret = vfio_setup_device(pci_get_sysfs_path(), pci_addr,
577 &vfio_dev_fd, &device_info);
581 /* if we're in a secondary process, just find our tailq entry */
582 TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
583 if (rte_pci_addr_cmp(&vfio_res->pci_addr,
588 /* if we haven't found our tailq entry, something's wrong */
589 if (vfio_res == NULL) {
590 RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
592 goto err_vfio_dev_fd;
596 maps = vfio_res->maps;
598 for (i = 0; i < (int) vfio_res->nb_maps; i++) {
599 ret = pci_vfio_mmap_bar(vfio_dev_fd, vfio_res, i, MAP_FIXED);
601 RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n",
602 pci_addr, i, strerror(errno));
603 goto err_vfio_dev_fd;
606 dev->mem_resource[i].addr = maps[i].addr;
616 * map the PCI resources of a PCI device in virtual memory (VFIO version).
617 * primary and secondary processes follow almost exactly the same path
620 pci_vfio_map_resource(struct rte_pci_device *dev)
622 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
623 return pci_vfio_map_resource_primary(dev);
625 return pci_vfio_map_resource_secondary(dev);
629 pci_vfio_unmap_resource(struct rte_pci_device *dev)
631 char pci_addr[PATH_MAX] = {0};
632 struct rte_pci_addr *loc = &dev->addr;
634 struct mapped_pci_resource *vfio_res = NULL;
635 struct mapped_pci_res_list *vfio_res_list;
637 struct pci_map *maps;
639 /* store PCI address string */
640 snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
641 loc->domain, loc->bus, loc->devid, loc->function);
644 if (close(dev->intr_handle.fd) < 0) {
645 RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
650 if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
651 RTE_LOG(ERR, EAL, " %s cannot unset bus mastering for PCI device!\n",
656 ret = vfio_release_device(pci_get_sysfs_path(), pci_addr,
657 dev->intr_handle.vfio_dev_fd);
660 "%s(): cannot release device\n", __func__);
664 vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
666 TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
667 if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
671 /* if we haven't found our tailq entry, something's wrong */
672 if (vfio_res == NULL) {
673 RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
679 maps = vfio_res->maps;
681 RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
683 for (i = 0; i < (int) vfio_res->nb_maps; i++) {
686 * We do not need to be aware of MSI-X table BAR mappings as
687 * when mapping. Just using current maps array is enough
690 RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
691 pci_addr, maps[i].addr);
692 pci_unmap_resource(maps[i].addr, maps[i].size);
696 TAILQ_REMOVE(vfio_res_list, vfio_res, next);
702 pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
703 struct rte_pci_ioport *p)
705 if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
706 bar > VFIO_PCI_BAR5_REGION_INDEX) {
707 RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
712 p->base = VFIO_GET_REGION_ADDR(bar);
717 pci_vfio_ioport_read(struct rte_pci_ioport *p,
718 void *data, size_t len, off_t offset)
720 const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
722 if (pread64(intr_handle->vfio_dev_fd, data,
723 len, p->base + offset) <= 0)
725 "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
726 VFIO_GET_REGION_IDX(p->base), (int)offset);
730 pci_vfio_ioport_write(struct rte_pci_ioport *p,
731 const void *data, size_t len, off_t offset)
733 const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
735 if (pwrite64(intr_handle->vfio_dev_fd, data,
736 len, p->base + offset) <= 0)
738 "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
739 VFIO_GET_REGION_IDX(p->base), (int)offset);
743 pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
750 pci_vfio_is_enabled(void)
752 return vfio_is_enabled("vfio_pci");