4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/pci_regs.h>
37 #include <sys/eventfd.h>
38 #include <sys/socket.h>
39 #include <sys/ioctl.h>
44 #include <rte_eal_memconfig.h>
45 #include <rte_malloc.h>
47 #include "eal_filesystem.h"
48 #include "eal_pci_init.h"
50 #include "eal_private.h"
54 * PCI probing under linux (VFIO version)
56 * This code tries to determine if the PCI device is bound to VFIO driver,
57 * and initialize it (map BARs, set up interrupts) if that's the case.
59 * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
64 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
65 #define PAGE_MASK (~(PAGE_SIZE - 1))
67 static struct rte_tailq_elem rte_vfio_tailq = {
68 .name = "VFIO_RESOURCE_LIST",
70 EAL_REGISTER_TAILQ(rte_vfio_tailq)
72 /* per-process VFIO config */
73 static struct vfio_config vfio_cfg;
75 /* DMA mapping function prototype.
76 * Takes VFIO container fd as a parameter.
77 * Returns 0 on success, -1 on error.
79 typedef int (*vfio_dma_func_t)(int);
81 struct vfio_iommu_type {
84 vfio_dma_func_t dma_map_func;
87 static int vfio_type1_dma_map(int);
88 static int vfio_noiommu_dma_map(int);
90 /* IOMMU types we support */
91 static const struct vfio_iommu_type iommu_types[] = {
92 /* x86 IOMMU, otherwise known as type 1 */
93 { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
95 { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
99 vfio_type1_dma_map(int vfio_container_fd)
101 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
104 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
105 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
106 struct vfio_iommu_type1_dma_map dma_map;
108 if (ms[i].addr == NULL)
111 memset(&dma_map, 0, sizeof(dma_map));
112 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
113 dma_map.vaddr = ms[i].addr_64;
114 dma_map.size = ms[i].len;
115 dma_map.iova = ms[i].phys_addr;
116 dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
118 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
121 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
122 "error %i (%s)\n", errno, strerror(errno));
131 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
133 /* No-IOMMU mode does not need DMA mapping */
138 pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
139 void *buf, size_t len, off_t offs)
141 return pread64(intr_handle->vfio_dev_fd, buf, len,
142 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
146 pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
147 const void *buf, size_t len, off_t offs)
149 return pwrite64(intr_handle->vfio_dev_fd, buf, len,
150 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
153 /* get PCI BAR number where MSI-X interrupts are */
155 pci_vfio_get_msix_bar(int fd, int *msix_bar, uint32_t *msix_table_offset,
156 uint32_t *msix_table_size)
161 uint8_t cap_id, cap_offset;
163 /* read PCI capability pointer from config space */
164 ret = pread64(fd, ®, sizeof(reg),
165 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
166 PCI_CAPABILITY_LIST);
167 if (ret != sizeof(reg)) {
168 RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
173 /* we need first byte */
174 cap_offset = reg & 0xFF;
178 /* read PCI capability ID */
179 ret = pread64(fd, ®, sizeof(reg),
180 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
182 if (ret != sizeof(reg)) {
183 RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
188 /* we need first byte */
191 /* if we haven't reached MSI-X, check next capability */
192 if (cap_id != PCI_CAP_ID_MSIX) {
193 ret = pread64(fd, ®, sizeof(reg),
194 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
196 if (ret != sizeof(reg)) {
197 RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
202 /* we need second byte */
203 cap_offset = (reg & 0xFF00) >> 8;
207 /* else, read table offset */
209 /* table offset resides in the next 4 bytes */
210 ret = pread64(fd, ®, sizeof(reg),
211 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
213 if (ret != sizeof(reg)) {
214 RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
219 ret = pread64(fd, &flags, sizeof(flags),
220 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
222 if (ret != sizeof(flags)) {
223 RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
228 *msix_bar = reg & RTE_PCI_MSIX_TABLE_BIR;
229 *msix_table_offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
230 *msix_table_size = 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
238 /* set PCI bus mastering */
240 pci_vfio_set_bus_master(int dev_fd)
245 ret = pread64(dev_fd, ®, sizeof(reg),
246 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
248 if (ret != sizeof(reg)) {
249 RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
253 /* set the master bit */
254 reg |= PCI_COMMAND_MASTER;
256 ret = pwrite64(dev_fd, ®, sizeof(reg),
257 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
260 if (ret != sizeof(reg)) {
261 RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
268 /* pick IOMMU type. returns a pointer to vfio_iommu_type or NULL for error */
269 static const struct vfio_iommu_type *
270 pci_vfio_set_iommu_type(int vfio_container_fd) {
272 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
273 const struct vfio_iommu_type *t = &iommu_types[idx];
275 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
278 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
279 t->type_id, t->name);
282 /* not an error, there may be more supported IOMMU types */
283 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
284 "error %i (%s)\n", t->type_id, t->name, errno,
287 /* if we didn't find a suitable IOMMU type, fail */
291 /* check if we have any supported extensions */
293 pci_vfio_has_supported_extensions(int vfio_container_fd) {
295 unsigned idx, n_extensions = 0;
296 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
297 const struct vfio_iommu_type *t = &iommu_types[idx];
299 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
302 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
303 "error %i (%s)\n", errno,
305 close(vfio_container_fd);
307 } else if (ret == 1) {
308 /* we found a supported extension */
311 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
313 ret ? "supported" : "not supported");
316 /* if we didn't find any supported IOMMU types, fail */
318 close(vfio_container_fd);
325 /* set up interrupt support (but not enable interrupts) */
327 pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
329 int i, ret, intr_idx;
331 /* default to invalid index */
332 intr_idx = VFIO_PCI_NUM_IRQS;
334 /* get interrupt type from internal config (MSI-X by default, can be
335 * overriden from the command line
337 switch (internal_config.vfio_intr_mode) {
338 case RTE_INTR_MODE_MSIX:
339 intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
341 case RTE_INTR_MODE_MSI:
342 intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
344 case RTE_INTR_MODE_LEGACY:
345 intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
347 /* don't do anything if we want to automatically determine interrupt type */
348 case RTE_INTR_MODE_NONE:
351 RTE_LOG(ERR, EAL, " unknown default interrupt type!\n");
355 /* start from MSI-X interrupt type */
356 for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
357 struct vfio_irq_info irq = { .argsz = sizeof(irq) };
360 /* skip interrupt modes we don't want */
361 if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE &&
367 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
369 RTE_LOG(ERR, EAL, " cannot get IRQ info, "
370 "error %i (%s)\n", errno, strerror(errno));
374 /* if this vector cannot be used with eventfd, fail if we explicitly
375 * specified interrupt type, otherwise continue */
376 if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
377 if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE) {
379 " interrupt vector does not support eventfd!\n");
385 /* set up an eventfd for interrupts */
386 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
388 RTE_LOG(ERR, EAL, " cannot set up eventfd, "
389 "error %i (%s)\n", errno, strerror(errno));
393 dev->intr_handle.fd = fd;
394 dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
397 case VFIO_PCI_MSIX_IRQ_INDEX:
398 internal_config.vfio_intr_mode = RTE_INTR_MODE_MSIX;
399 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
401 case VFIO_PCI_MSI_IRQ_INDEX:
402 internal_config.vfio_intr_mode = RTE_INTR_MODE_MSI;
403 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
405 case VFIO_PCI_INTX_IRQ_INDEX:
406 internal_config.vfio_intr_mode = RTE_INTR_MODE_LEGACY;
407 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
410 RTE_LOG(ERR, EAL, " unknown interrupt type!\n");
417 /* if we're here, we haven't found a suitable interrupt vector */
421 /* open container fd or get an existing one */
423 pci_vfio_get_container_fd(void)
425 int ret, vfio_container_fd;
427 /* if we're in a primary process, try to open the container */
428 if (internal_config.process_type == RTE_PROC_PRIMARY) {
429 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
430 if (vfio_container_fd < 0) {
431 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
432 "error %i (%s)\n", errno, strerror(errno));
436 /* check VFIO API version */
437 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
438 if (ret != VFIO_API_VERSION) {
440 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
441 "error %i (%s)\n", errno, strerror(errno));
443 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
444 close(vfio_container_fd);
448 ret = pci_vfio_has_supported_extensions(vfio_container_fd);
450 RTE_LOG(ERR, EAL, " no supported IOMMU "
451 "extensions found!\n");
455 return vfio_container_fd;
458 * if we're in a secondary process, request container fd from the
459 * primary process via our socket
463 socket_fd = vfio_mp_sync_connect_to_primary();
465 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
468 if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) {
469 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
473 vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd);
474 if (vfio_container_fd < 0) {
475 RTE_LOG(ERR, EAL, " cannot get container fd!\n");
480 return vfio_container_fd;
486 /* open group fd or get an existing one */
488 pci_vfio_get_group_fd(int iommu_group_no)
492 char filename[PATH_MAX];
494 /* check if we already have the group descriptor open */
495 for (i = 0; i < vfio_cfg.vfio_group_idx; i++)
496 if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
497 return vfio_cfg.vfio_groups[i].fd;
499 /* if primary, try to open the group */
500 if (internal_config.process_type == RTE_PROC_PRIMARY) {
501 /* try regular group format */
502 snprintf(filename, sizeof(filename),
503 VFIO_GROUP_FMT, iommu_group_no);
504 vfio_group_fd = open(filename, O_RDWR);
505 if (vfio_group_fd < 0) {
506 /* if file not found, it's not an error */
507 if (errno != ENOENT) {
508 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
513 /* special case: try no-IOMMU path as well */
514 snprintf(filename, sizeof(filename),
515 VFIO_NOIOMMU_GROUP_FMT, iommu_group_no);
516 vfio_group_fd = open(filename, O_RDWR);
517 if (vfio_group_fd < 0) {
518 if (errno != ENOENT) {
519 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
525 /* noiommu group found */
528 /* if the fd is valid, create a new group for it */
529 if (vfio_cfg.vfio_group_idx == VFIO_MAX_GROUPS) {
530 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
531 close(vfio_group_fd);
534 vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
535 vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
536 return vfio_group_fd;
538 /* if we're in a secondary process, request group fd from the primary
539 * process via our socket
544 socket_fd = vfio_mp_sync_connect_to_primary();
547 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
550 if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) {
551 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
555 if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) {
556 RTE_LOG(ERR, EAL, " cannot send group number!\n");
560 ret = vfio_mp_sync_receive_request(socket_fd);
566 vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
567 /* if we got the fd, return it */
568 if (vfio_group_fd > 0) {
570 return vfio_group_fd;
572 /* fall-through on error */
574 RTE_LOG(ERR, EAL, " cannot get container fd!\n");
582 /* parse IOMMU group number for a PCI device
583 * returns 1 on success, -1 for errors, 0 for non-existent group
586 pci_vfio_get_group_no(const char *pci_addr, int *iommu_group_no)
588 char linkname[PATH_MAX];
589 char filename[PATH_MAX];
590 char *tok[16], *group_tok, *end;
593 memset(linkname, 0, sizeof(linkname));
594 memset(filename, 0, sizeof(filename));
596 /* try to find out IOMMU group for this device */
597 snprintf(linkname, sizeof(linkname),
598 "%s/%s/iommu_group", pci_get_sysfs_path(), pci_addr);
600 ret = readlink(linkname, filename, sizeof(filename));
602 /* if the link doesn't exist, no VFIO for us */
606 ret = rte_strsplit(filename, sizeof(filename),
607 tok, RTE_DIM(tok), '/');
610 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", pci_addr);
614 /* IOMMU group is always the last token */
616 group_tok = tok[ret - 1];
618 *iommu_group_no = strtol(group_tok, &end, 10);
619 if ((end != group_tok && *end != '\0') || errno != 0) {
620 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", pci_addr);
628 clear_current_group(void)
630 vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = 0;
631 vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = -1;
636 * map the PCI resources of a PCI device in virtual memory (VFIO version).
637 * primary and secondary processes follow almost exactly the same path
640 pci_vfio_map_resource(struct rte_pci_device *dev)
642 struct vfio_group_status group_status = {
643 .argsz = sizeof(group_status)
645 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
646 int vfio_group_fd, vfio_dev_fd;
648 char pci_addr[PATH_MAX] = {0};
649 struct rte_pci_addr *loc = &dev->addr;
650 int i, ret, msix_bar;
651 struct mapped_pci_resource *vfio_res = NULL;
652 struct mapped_pci_res_list *vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
654 struct pci_map *maps;
655 uint32_t msix_table_offset = 0;
656 uint32_t msix_table_size = 0;
659 dev->intr_handle.fd = -1;
660 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
662 /* store PCI address string */
663 snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
664 loc->domain, loc->bus, loc->devid, loc->function);
666 /* get group number */
667 ret = pci_vfio_get_group_no(pci_addr, &iommu_group_no);
669 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
674 /* if negative, something failed */
678 /* get the actual group fd */
679 vfio_group_fd = pci_vfio_get_group_fd(iommu_group_no);
680 if (vfio_group_fd < 0)
684 vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
685 vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
687 /* if group_fd == 0, that means the device isn't managed by VFIO */
688 if (vfio_group_fd == 0) {
689 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
691 /* we store 0 as group fd to distinguish between existing but
692 * unbound VFIO groups, and groups that don't exist at all.
694 vfio_cfg.vfio_group_idx++;
699 * at this point, we know at least one port on this device is bound to VFIO,
700 * so we can proceed to try and set this particular port up
703 /* check if the group is viable */
704 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
706 RTE_LOG(ERR, EAL, " %s cannot get group status, "
707 "error %i (%s)\n", pci_addr, errno, strerror(errno));
708 close(vfio_group_fd);
709 clear_current_group();
711 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
712 RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", pci_addr);
713 close(vfio_group_fd);
714 clear_current_group();
719 * at this point, we know that this group is viable (meaning, all devices
720 * are either bound to VFIO or not bound to anything)
723 /* check if group does not have a container yet */
724 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
726 /* add group to a container */
727 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
728 &vfio_cfg.vfio_container_fd);
730 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
731 "error %i (%s)\n", pci_addr, errno, strerror(errno));
732 close(vfio_group_fd);
733 clear_current_group();
737 * at this point we know that this group has been successfully
738 * initialized, so we increment vfio_group_idx to indicate that we can
741 vfio_cfg.vfio_group_idx++;
745 * pick an IOMMU type and set up DMA mappings for container
747 * needs to be done only once, only when at least one group is assigned to
748 * a container and only in primary process
750 if (internal_config.process_type == RTE_PROC_PRIMARY &&
751 vfio_cfg.vfio_container_has_dma == 0) {
752 /* select an IOMMU type which we will be using */
753 const struct vfio_iommu_type *t =
754 pci_vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
756 RTE_LOG(ERR, EAL, " %s failed to select IOMMU type\n", pci_addr);
759 ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
761 RTE_LOG(ERR, EAL, " %s DMA remapping failed, "
762 "error %i (%s)\n", pci_addr, errno, strerror(errno));
765 vfio_cfg.vfio_container_has_dma = 1;
768 /* get a file descriptor for the device */
769 vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, pci_addr);
770 if (vfio_dev_fd < 0) {
771 /* if we cannot get a device fd, this simply means that this
772 * particular port is not bound to VFIO
774 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
779 /* test and setup the device */
780 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_INFO, &device_info);
782 RTE_LOG(ERR, EAL, " %s cannot get device info, "
783 "error %i (%s)\n", pci_addr, errno, strerror(errno));
788 /* get MSI-X BAR, if any (we have to know where it is because we can't
789 * easily mmap it when using VFIO) */
791 ret = pci_vfio_get_msix_bar(vfio_dev_fd, &msix_bar,
792 &msix_table_offset, &msix_table_size);
794 RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n", pci_addr);
799 /* if we're in a primary process, allocate vfio_res and get region info */
800 if (internal_config.process_type == RTE_PROC_PRIMARY) {
801 vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
802 if (vfio_res == NULL) {
804 "%s(): cannot store uio mmap details\n", __func__);
808 memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
810 /* get number of registers (up to BAR5) */
811 vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
812 VFIO_PCI_BAR5_REGION_INDEX + 1);
814 /* if we're in a secondary process, just find our tailq entry */
815 TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
816 if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
820 /* if we haven't found our tailq entry, something's wrong */
821 if (vfio_res == NULL) {
822 RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
830 maps = vfio_res->maps;
832 for (i = 0; i < (int) vfio_res->nb_maps; i++) {
833 struct vfio_region_info reg = { .argsz = sizeof(reg) };
836 unsigned long offset, size;
841 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
844 RTE_LOG(ERR, EAL, " %s cannot get device region info "
845 "error %i (%s)\n", pci_addr, errno, strerror(errno));
847 if (internal_config.process_type == RTE_PROC_PRIMARY)
852 /* chk for io port region */
853 ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
854 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
855 + PCI_BASE_ADDRESS_0 + i*4);
857 if (ret != sizeof(ioport_bar)) {
859 "Cannot read command (%x) from config space!\n",
860 PCI_BASE_ADDRESS_0 + i*4);
864 if (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) {
866 "Ignore mapping IO port bar(%d) addr: %x\n",
871 /* skip non-mmapable BARs */
872 if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
877 * VFIO will not let us map the MSI-X table,
878 * but we can map around it.
880 uint32_t table_start = msix_table_offset;
881 uint32_t table_end = table_start + msix_table_size;
882 table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
883 table_start &= PAGE_MASK;
885 if (table_start == 0 && table_end >= reg.size) {
886 /* Cannot map this BAR */
887 RTE_LOG(DEBUG, EAL, "Skipping BAR %d\n", i);
890 memreg[0].offset = reg.offset;
891 memreg[0].size = table_start;
892 memreg[1].offset = table_end;
893 memreg[1].size = reg.size - table_end;
896 "Trying to map BAR %d that contains the MSI-X "
897 "table. Trying offsets: "
898 "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", i,
899 memreg[0].offset, memreg[0].size,
900 memreg[1].offset, memreg[1].size);
903 memreg[0].offset = reg.offset;
904 memreg[0].size = reg.size;
907 /* try to figure out an address */
908 if (internal_config.process_type == RTE_PROC_PRIMARY) {
909 /* try mapping somewhere close to the end of hugepages */
910 if (pci_map_addr == NULL)
911 pci_map_addr = pci_find_max_end_va();
913 bar_addr = pci_map_addr;
914 pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
916 bar_addr = maps[i].addr;
919 /* reserve the address using an inaccessible mapping */
920 bar_addr = mmap(bar_addr, reg.size, 0, MAP_PRIVATE |
921 MAP_ANONYMOUS, -1, 0);
922 if (bar_addr != MAP_FAILED) {
923 void *map_addr = NULL;
924 if (memreg[0].size) {
925 /* actual map of first part */
926 map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
932 /* if there's a second part, try to map it */
933 if (map_addr != MAP_FAILED
934 && memreg[1].offset && memreg[1].size) {
935 void *second_addr = RTE_PTR_ADD(bar_addr, memreg[1].offset);
936 map_addr = pci_map_resource(second_addr,
937 vfio_dev_fd, memreg[1].offset,
942 if (map_addr == MAP_FAILED || !map_addr) {
943 munmap(bar_addr, reg.size);
944 bar_addr = MAP_FAILED;
948 if (bar_addr == MAP_FAILED ||
949 (internal_config.process_type == RTE_PROC_SECONDARY &&
950 bar_addr != maps[i].addr)) {
951 RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n", pci_addr, i,
954 if (internal_config.process_type == RTE_PROC_PRIMARY)
959 maps[i].addr = bar_addr;
960 maps[i].offset = reg.offset;
961 maps[i].size = reg.size;
962 maps[i].path = NULL; /* vfio doesn't have per-resource paths */
963 dev->mem_resource[i].addr = bar_addr;
966 /* if secondary process, do not set up interrupts */
967 if (internal_config.process_type == RTE_PROC_PRIMARY) {
968 if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
969 RTE_LOG(ERR, EAL, " %s error setting up interrupts!\n", pci_addr);
975 /* set bus mastering for the device */
976 if (pci_vfio_set_bus_master(vfio_dev_fd)) {
977 RTE_LOG(ERR, EAL, " %s cannot set up bus mastering!\n", pci_addr);
983 /* Reset the device */
984 ioctl(vfio_dev_fd, VFIO_DEVICE_RESET);
987 if (internal_config.process_type == RTE_PROC_PRIMARY)
988 TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
994 pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
995 struct rte_pci_ioport *p)
997 if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
998 bar > VFIO_PCI_BAR5_REGION_INDEX) {
999 RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
1004 p->base = VFIO_GET_REGION_ADDR(bar);
1009 pci_vfio_ioport_read(struct rte_pci_ioport *p,
1010 void *data, size_t len, off_t offset)
1012 const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
1014 if (pread64(intr_handle->vfio_dev_fd, data,
1015 len, p->base + offset) <= 0)
1017 "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
1018 VFIO_GET_REGION_IDX(p->base), (int)offset);
1022 pci_vfio_ioport_write(struct rte_pci_ioport *p,
1023 const void *data, size_t len, off_t offset)
1025 const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
1027 if (pwrite64(intr_handle->vfio_dev_fd, data,
1028 len, p->base + offset) <= 0)
1030 "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
1031 VFIO_GET_REGION_IDX(p->base), (int)offset);
1035 pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
1042 pci_vfio_enable(void)
1044 /* initialize group list */
1048 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
1049 vfio_cfg.vfio_groups[i].fd = -1;
1050 vfio_cfg.vfio_groups[i].group_no = -1;
1053 /* inform the user that we are probing for VFIO */
1054 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
1056 /* check if vfio-pci module is loaded */
1057 vfio_available = rte_eal_check_module("vfio_pci");
1059 /* return error directly */
1060 if (vfio_available == -1) {
1061 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
1065 /* return 0 if VFIO modules not loaded */
1066 if (vfio_available == 0) {
1067 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
1068 "skipping VFIO support...\n");
1072 vfio_cfg.vfio_container_fd = pci_vfio_get_container_fd();
1074 /* check if we have VFIO driver enabled */
1075 if (vfio_cfg.vfio_container_fd != -1) {
1076 RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
1077 vfio_cfg.vfio_enabled = 1;
1079 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
1086 pci_vfio_is_enabled(void)
1088 return vfio_cfg.vfio_enabled;