1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
17 #include "eal_filesystem.h"
19 #include "eal_private.h"
23 #define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
25 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
26 * recreate the mappings for DPDK segments, but we cannot do so for memory that
27 * was registered by the user themselves, so we need to store the user mappings
28 * somewhere, to recreate them later.
30 #define VFIO_MAX_USER_MEM_MAPS 256
37 struct user_mem_maps {
38 rte_spinlock_recursive_t lock;
40 struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
45 int vfio_container_fd;
46 int vfio_active_groups;
47 const struct vfio_iommu_type *vfio_iommu_type;
48 struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
49 struct user_mem_maps mem_maps;
52 /* per-process VFIO config */
53 static struct vfio_config vfio_cfgs[VFIO_MAX_CONTAINERS];
54 static struct vfio_config *default_vfio_cfg = &vfio_cfgs[0];
56 static int vfio_type1_dma_map(int);
57 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
58 static int vfio_spapr_dma_map(int);
59 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
60 static int vfio_noiommu_dma_map(int);
61 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
62 static int vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr,
63 uint64_t iova, uint64_t len, int do_map);
65 /* IOMMU types we support */
66 static const struct vfio_iommu_type iommu_types[] = {
67 /* x86 IOMMU, otherwise known as type 1 */
69 .type_id = RTE_VFIO_TYPE1,
71 .dma_map_func = &vfio_type1_dma_map,
72 .dma_user_map_func = &vfio_type1_dma_mem_map
74 /* ppc64 IOMMU, otherwise known as spapr */
76 .type_id = RTE_VFIO_SPAPR,
78 .dma_map_func = &vfio_spapr_dma_map,
79 .dma_user_map_func = &vfio_spapr_dma_mem_map
83 .type_id = RTE_VFIO_NOIOMMU,
85 .dma_map_func = &vfio_noiommu_dma_map,
86 .dma_user_map_func = &vfio_noiommu_dma_mem_map
91 is_null_map(const struct user_mem_map *map)
93 return map->addr == 0 && map->iova == 0 && map->len == 0;
96 /* we may need to merge user mem maps together in case of user mapping/unmapping
97 * chunks of memory, so we'll need a comparator function to sort segments.
100 user_mem_map_cmp(const void *a, const void *b)
102 const struct user_mem_map *umm_a = a;
103 const struct user_mem_map *umm_b = b;
105 /* move null entries to end */
106 if (is_null_map(umm_a))
108 if (is_null_map(umm_b))
111 /* sort by iova first */
112 if (umm_a->iova < umm_b->iova)
114 if (umm_a->iova > umm_b->iova)
117 if (umm_a->addr < umm_b->addr)
119 if (umm_a->addr > umm_b->addr)
122 if (umm_a->len < umm_b->len)
124 if (umm_a->len > umm_b->len)
130 /* adjust user map entry. this may result in shortening of existing map, or in
131 * splitting existing map in two pieces.
134 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
135 uint64_t remove_va_start, uint64_t remove_len)
137 /* if va start is same as start address, we're simply moving start */
138 if (remove_va_start == src->addr) {
139 src->addr += remove_len;
140 src->iova += remove_len;
141 src->len -= remove_len;
142 } else if (remove_va_start + remove_len == src->addr + src->len) {
143 /* we're shrinking mapping from the end */
144 src->len -= remove_len;
146 /* we're blowing a hole in the middle */
147 struct user_mem_map tmp;
148 uint64_t total_len = src->len;
150 /* adjust source segment length */
151 src->len = remove_va_start - src->addr;
153 /* create temporary segment in the middle */
154 tmp.addr = src->addr + src->len;
155 tmp.iova = src->iova + src->len;
156 tmp.len = remove_len;
158 /* populate end segment - this one we will be keeping */
159 end->addr = tmp.addr + tmp.len;
160 end->iova = tmp.iova + tmp.len;
161 end->len = total_len - src->len - tmp.len;
165 /* try merging two maps into one, return 1 if succeeded */
167 merge_map(struct user_mem_map *left, struct user_mem_map *right)
169 if (left->addr + left->len != right->addr)
171 if (left->iova + left->len != right->iova)
174 left->len += right->len;
176 memset(right, 0, sizeof(*right));
181 static struct user_mem_map *
182 find_user_mem_map(struct user_mem_maps *user_mem_maps, uint64_t addr,
183 uint64_t iova, uint64_t len)
185 uint64_t va_end = addr + len;
186 uint64_t iova_end = iova + len;
189 for (i = 0; i < user_mem_maps->n_maps; i++) {
190 struct user_mem_map *map = &user_mem_maps->maps[i];
191 uint64_t map_va_end = map->addr + map->len;
192 uint64_t map_iova_end = map->iova + map->len;
195 if (addr < map->addr || addr >= map_va_end)
197 /* check if VA end is within boundaries */
198 if (va_end <= map->addr || va_end > map_va_end)
201 /* check start IOVA */
202 if (iova < map->iova || iova >= map_iova_end)
204 /* check if IOVA end is within boundaries */
205 if (iova_end <= map->iova || iova_end > map_iova_end)
208 /* we've found our map */
214 /* this will sort all user maps, and merge/compact any adjacent maps */
216 compact_user_maps(struct user_mem_maps *user_mem_maps)
218 int i, n_merged, cur_idx;
220 qsort(user_mem_maps->maps, user_mem_maps->n_maps,
221 sizeof(user_mem_maps->maps[0]), user_mem_map_cmp);
223 /* we'll go over the list backwards when merging */
225 for (i = user_mem_maps->n_maps - 2; i >= 0; i--) {
226 struct user_mem_map *l, *r;
228 l = &user_mem_maps->maps[i];
229 r = &user_mem_maps->maps[i + 1];
231 if (is_null_map(l) || is_null_map(r))
238 /* the entries are still sorted, but now they have holes in them, so
239 * walk through the list and remove the holes
243 for (i = 0; i < user_mem_maps->n_maps; i++) {
244 if (!is_null_map(&user_mem_maps->maps[i])) {
245 struct user_mem_map *src, *dst;
247 src = &user_mem_maps->maps[i];
248 dst = &user_mem_maps->maps[cur_idx++];
251 memcpy(dst, src, sizeof(*src));
252 memset(src, 0, sizeof(*src));
256 user_mem_maps->n_maps = cur_idx;
261 vfio_open_group_fd(int iommu_group_num)
264 char filename[PATH_MAX];
265 struct rte_mp_msg mp_req, *mp_rep;
266 struct rte_mp_reply mp_reply;
267 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
268 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
270 /* if primary, try to open the group */
271 if (internal_config.process_type == RTE_PROC_PRIMARY) {
272 /* try regular group format */
273 snprintf(filename, sizeof(filename),
274 VFIO_GROUP_FMT, iommu_group_num);
275 vfio_group_fd = open(filename, O_RDWR);
276 if (vfio_group_fd < 0) {
277 /* if file not found, it's not an error */
278 if (errno != ENOENT) {
279 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
284 /* special case: try no-IOMMU path as well */
285 snprintf(filename, sizeof(filename),
286 VFIO_NOIOMMU_GROUP_FMT,
288 vfio_group_fd = open(filename, O_RDWR);
289 if (vfio_group_fd < 0) {
290 if (errno != ENOENT) {
291 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
297 /* noiommu group found */
300 return vfio_group_fd;
302 /* if we're in a secondary process, request group fd from the primary
303 * process via mp channel.
305 p->req = SOCKET_REQ_GROUP;
306 p->group_num = iommu_group_num;
307 strcpy(mp_req.name, EAL_VFIO_MP);
308 mp_req.len_param = sizeof(*p);
312 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
313 mp_reply.nb_received == 1) {
314 mp_rep = &mp_reply.msgs[0];
315 p = (struct vfio_mp_param *)mp_rep->param;
316 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
317 vfio_group_fd = mp_rep->fds[0];
318 } else if (p->result == SOCKET_NO_FD) {
319 RTE_LOG(ERR, EAL, " bad VFIO group fd\n");
325 if (vfio_group_fd < 0)
326 RTE_LOG(ERR, EAL, " cannot request group fd\n");
327 return vfio_group_fd;
330 static struct vfio_config *
331 get_vfio_cfg_by_group_num(int iommu_group_num)
333 struct vfio_config *vfio_cfg;
336 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
337 vfio_cfg = &vfio_cfgs[i];
338 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
339 if (vfio_cfg->vfio_groups[j].group_num ==
349 vfio_get_group_fd(struct vfio_config *vfio_cfg,
354 struct vfio_group *cur_grp;
356 /* check if we already have the group descriptor open */
357 for (i = 0; i < VFIO_MAX_GROUPS; i++)
358 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num)
359 return vfio_cfg->vfio_groups[i].fd;
361 /* Lets see first if there is room for a new group */
362 if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
363 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
367 /* Now lets get an index for the new group */
368 for (i = 0; i < VFIO_MAX_GROUPS; i++)
369 if (vfio_cfg->vfio_groups[i].group_num == -1) {
370 cur_grp = &vfio_cfg->vfio_groups[i];
374 /* This should not happen */
375 if (i == VFIO_MAX_GROUPS) {
376 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
380 vfio_group_fd = vfio_open_group_fd(iommu_group_num);
381 if (vfio_group_fd < 0) {
382 RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
386 cur_grp->group_num = iommu_group_num;
387 cur_grp->fd = vfio_group_fd;
388 vfio_cfg->vfio_active_groups++;
390 return vfio_group_fd;
393 static struct vfio_config *
394 get_vfio_cfg_by_group_fd(int vfio_group_fd)
396 struct vfio_config *vfio_cfg;
399 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
400 vfio_cfg = &vfio_cfgs[i];
401 for (j = 0; j < VFIO_MAX_GROUPS; j++)
402 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
409 static struct vfio_config *
410 get_vfio_cfg_by_container_fd(int container_fd)
414 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
415 if (vfio_cfgs[i].vfio_container_fd == container_fd)
416 return &vfio_cfgs[i];
423 rte_vfio_get_group_fd(int iommu_group_num)
425 struct vfio_config *vfio_cfg;
427 /* get the vfio_config it belongs to */
428 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
429 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
431 return vfio_get_group_fd(vfio_cfg, iommu_group_num);
435 get_vfio_group_idx(int vfio_group_fd)
437 struct vfio_config *vfio_cfg;
440 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
441 vfio_cfg = &vfio_cfgs[i];
442 for (j = 0; j < VFIO_MAX_GROUPS; j++)
443 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
451 vfio_group_device_get(int vfio_group_fd)
453 struct vfio_config *vfio_cfg;
456 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
457 if (vfio_cfg == NULL) {
458 RTE_LOG(ERR, EAL, " invalid group fd!\n");
462 i = get_vfio_group_idx(vfio_group_fd);
463 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
464 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
466 vfio_cfg->vfio_groups[i].devices++;
470 vfio_group_device_put(int vfio_group_fd)
472 struct vfio_config *vfio_cfg;
475 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
476 if (vfio_cfg == NULL) {
477 RTE_LOG(ERR, EAL, " invalid group fd!\n");
481 i = get_vfio_group_idx(vfio_group_fd);
482 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
483 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
485 vfio_cfg->vfio_groups[i].devices--;
489 vfio_group_device_count(int vfio_group_fd)
491 struct vfio_config *vfio_cfg;
494 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
495 if (vfio_cfg == NULL) {
496 RTE_LOG(ERR, EAL, " invalid group fd!\n");
500 i = get_vfio_group_idx(vfio_group_fd);
501 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
502 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
506 return vfio_cfg->vfio_groups[i].devices;
510 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
511 void *arg __rte_unused)
513 struct rte_memseg_list *msl;
514 struct rte_memseg *ms;
517 msl = rte_mem_virt2memseg_list(addr);
519 /* for IOVA as VA mode, no need to care for IOVA addresses */
520 if (rte_eal_iova_mode() == RTE_IOVA_VA && msl->external == 0) {
521 uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
522 if (type == RTE_MEM_EVENT_ALLOC)
523 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
526 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
531 /* memsegs are contiguous in memory */
532 ms = rte_mem_virt2memseg(addr, msl);
533 while (cur_len < len) {
534 /* some memory segments may have invalid IOVA */
535 if (ms->iova == RTE_BAD_IOVA) {
536 RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
540 if (type == RTE_MEM_EVENT_ALLOC)
541 vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
542 ms->iova, ms->len, 1);
544 vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
545 ms->iova, ms->len, 0);
553 vfio_sync_default_container(void)
555 struct rte_mp_msg mp_req, *mp_rep;
556 struct rte_mp_reply mp_reply;
557 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
558 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
562 /* cannot be called from primary */
563 if (rte_eal_process_type() != RTE_PROC_SECONDARY)
566 /* default container fd should have been opened in rte_vfio_enable() */
567 if (!default_vfio_cfg->vfio_enabled ||
568 default_vfio_cfg->vfio_container_fd < 0) {
569 RTE_LOG(ERR, EAL, "VFIO support is not initialized\n");
573 /* find default container's IOMMU type */
574 p->req = SOCKET_REQ_IOMMU_TYPE;
575 strcpy(mp_req.name, EAL_VFIO_MP);
576 mp_req.len_param = sizeof(*p);
580 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
581 mp_reply.nb_received == 1) {
582 mp_rep = &mp_reply.msgs[0];
583 p = (struct vfio_mp_param *)mp_rep->param;
584 if (p->result == SOCKET_OK)
585 iommu_type_id = p->iommu_type_id;
588 if (iommu_type_id < 0) {
589 RTE_LOG(ERR, EAL, "Could not get IOMMU type for default container\n");
593 /* we now have an fd for default container, as well as its IOMMU type.
594 * now, set up default VFIO container config to match.
596 for (i = 0; i < RTE_DIM(iommu_types); i++) {
597 const struct vfio_iommu_type *t = &iommu_types[i];
598 if (t->type_id != iommu_type_id)
601 /* we found our IOMMU type */
602 default_vfio_cfg->vfio_iommu_type = t;
606 RTE_LOG(ERR, EAL, "Could not find IOMMU type id (%i)\n",
612 rte_vfio_clear_group(int vfio_group_fd)
615 struct vfio_config *vfio_cfg;
617 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
618 if (vfio_cfg == NULL) {
619 RTE_LOG(ERR, EAL, " invalid group fd!\n");
623 i = get_vfio_group_idx(vfio_group_fd);
626 vfio_cfg->vfio_groups[i].group_num = -1;
627 vfio_cfg->vfio_groups[i].fd = -1;
628 vfio_cfg->vfio_groups[i].devices = 0;
629 vfio_cfg->vfio_active_groups--;
635 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
636 int *vfio_dev_fd, struct vfio_device_info *device_info)
638 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
639 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
640 struct vfio_group_status group_status = {
641 .argsz = sizeof(group_status)
643 struct vfio_config *vfio_cfg;
644 struct user_mem_maps *user_mem_maps;
645 int vfio_container_fd;
650 /* get group number */
651 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
653 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
658 /* if negative, something failed */
662 /* get the actual group fd */
663 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
664 if (vfio_group_fd < 0)
667 /* if group_fd == 0, that means the device isn't managed by VFIO */
668 if (vfio_group_fd == 0) {
669 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
675 * at this point, we know that this group is viable (meaning, all devices
676 * are either bound to VFIO or not bound to anything)
679 /* check if the group is viable */
680 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
682 RTE_LOG(ERR, EAL, " %s cannot get group status, "
683 "error %i (%s)\n", dev_addr, errno, strerror(errno));
684 close(vfio_group_fd);
685 rte_vfio_clear_group(vfio_group_fd);
687 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
688 RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
689 close(vfio_group_fd);
690 rte_vfio_clear_group(vfio_group_fd);
694 /* get the vfio_config it belongs to */
695 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
696 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
697 vfio_container_fd = vfio_cfg->vfio_container_fd;
698 user_mem_maps = &vfio_cfg->mem_maps;
700 /* check if group does not have a container yet */
701 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
703 /* add group to a container */
704 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
707 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
708 "error %i (%s)\n", dev_addr, errno, strerror(errno));
709 close(vfio_group_fd);
710 rte_vfio_clear_group(vfio_group_fd);
715 * pick an IOMMU type and set up DMA mappings for container
717 * needs to be done only once, only when first group is
718 * assigned to a container and only in primary process.
719 * Note this can happen several times with the hotplug
722 if (internal_config.process_type == RTE_PROC_PRIMARY &&
723 vfio_cfg->vfio_active_groups == 1 &&
724 vfio_group_device_count(vfio_group_fd) == 0) {
725 const struct vfio_iommu_type *t;
727 /* select an IOMMU type which we will be using */
728 t = vfio_set_iommu_type(vfio_container_fd);
731 " %s failed to select IOMMU type\n",
733 close(vfio_group_fd);
734 rte_vfio_clear_group(vfio_group_fd);
737 /* lock memory hotplug before mapping and release it
738 * after registering callback, to prevent races
740 rte_rwlock_read_lock(mem_lock);
741 if (vfio_cfg == default_vfio_cfg)
742 ret = t->dma_map_func(vfio_container_fd);
747 " %s DMA remapping failed, error %i (%s)\n",
748 dev_addr, errno, strerror(errno));
749 close(vfio_group_fd);
750 rte_vfio_clear_group(vfio_group_fd);
751 rte_rwlock_read_unlock(mem_lock);
755 vfio_cfg->vfio_iommu_type = t;
757 /* re-map all user-mapped segments */
758 rte_spinlock_recursive_lock(&user_mem_maps->lock);
760 /* this IOMMU type may not support DMA mapping, but
761 * if we have mappings in the list - that means we have
762 * previously mapped something successfully, so we can
763 * be sure that DMA mapping is supported.
765 for (i = 0; i < user_mem_maps->n_maps; i++) {
766 struct user_mem_map *map;
767 map = &user_mem_maps->maps[i];
769 ret = t->dma_user_map_func(
771 map->addr, map->iova, map->len,
774 RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
776 "iova: 0x%" PRIx64 " "
777 "len: 0x%" PRIu64 "\n",
778 map->addr, map->iova,
780 rte_spinlock_recursive_unlock(
781 &user_mem_maps->lock);
782 rte_rwlock_read_unlock(mem_lock);
786 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
788 /* register callback for mem events */
789 if (vfio_cfg == default_vfio_cfg)
790 ret = rte_mem_event_callback_register(
791 VFIO_MEM_EVENT_CLB_NAME,
792 vfio_mem_event_callback, NULL);
795 /* unlock memory hotplug */
796 rte_rwlock_read_unlock(mem_lock);
798 if (ret && rte_errno != ENOTSUP) {
799 RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
803 RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
805 RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
807 } else if (rte_eal_process_type() != RTE_PROC_PRIMARY &&
808 vfio_cfg == default_vfio_cfg &&
809 vfio_cfg->vfio_iommu_type == NULL) {
810 /* if we're not a primary process, we do not set up the VFIO
811 * container because it's already been set up by the primary
812 * process. instead, we simply ask the primary about VFIO type
813 * we are using, and set the VFIO config up appropriately.
815 ret = vfio_sync_default_container();
817 RTE_LOG(ERR, EAL, "Could not sync default VFIO container\n");
818 close(vfio_group_fd);
819 rte_vfio_clear_group(vfio_group_fd);
822 /* we have successfully initialized VFIO, notify user */
823 const struct vfio_iommu_type *t =
824 default_vfio_cfg->vfio_iommu_type;
825 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
826 t->type_id, t->name);
829 /* get a file descriptor for the device */
830 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
831 if (*vfio_dev_fd < 0) {
832 /* if we cannot get a device fd, this implies a problem with
833 * the VFIO group or the container not having IOMMU configured.
836 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
838 close(vfio_group_fd);
839 rte_vfio_clear_group(vfio_group_fd);
843 /* test and setup the device */
844 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
846 RTE_LOG(ERR, EAL, " %s cannot get device info, "
847 "error %i (%s)\n", dev_addr, errno,
850 close(vfio_group_fd);
851 rte_vfio_clear_group(vfio_group_fd);
854 vfio_group_device_get(vfio_group_fd);
860 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
863 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
864 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
865 struct vfio_group_status group_status = {
866 .argsz = sizeof(group_status)
868 struct vfio_config *vfio_cfg;
873 /* we don't want any DMA mapping messages to come while we're detaching
874 * VFIO device, because this might be the last device and we might need
875 * to unregister the callback.
877 rte_rwlock_read_lock(mem_lock);
879 /* get group number */
880 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
882 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
884 /* This is an error at this point. */
889 /* get the actual group fd */
890 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
891 if (vfio_group_fd <= 0) {
892 RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
898 /* get the vfio_config it belongs to */
899 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
900 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
902 /* At this point we got an active group. Closing it will make the
903 * container detachment. If this is the last active group, VFIO kernel
904 * code will unset the container and the IOMMU mappings.
907 /* Closing a device */
908 if (close(vfio_dev_fd) < 0) {
909 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
915 /* An VFIO group can have several devices attached. Just when there is
916 * no devices remaining should the group be closed.
918 vfio_group_device_put(vfio_group_fd);
919 if (!vfio_group_device_count(vfio_group_fd)) {
921 if (close(vfio_group_fd) < 0) {
922 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
928 if (rte_vfio_clear_group(vfio_group_fd) < 0) {
929 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
936 /* if there are no active device groups, unregister the callback to
937 * avoid spurious attempts to map/unmap memory from VFIO.
939 if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0 &&
940 rte_eal_process_type() != RTE_PROC_SECONDARY)
941 rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
948 rte_rwlock_read_unlock(mem_lock);
953 rte_vfio_enable(const char *modname)
955 /* initialize group list */
959 rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
961 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
962 vfio_cfgs[i].vfio_container_fd = -1;
963 vfio_cfgs[i].vfio_active_groups = 0;
964 vfio_cfgs[i].vfio_iommu_type = NULL;
965 vfio_cfgs[i].mem_maps.lock = lock;
967 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
968 vfio_cfgs[i].vfio_groups[j].fd = -1;
969 vfio_cfgs[i].vfio_groups[j].group_num = -1;
970 vfio_cfgs[i].vfio_groups[j].devices = 0;
974 /* inform the user that we are probing for VFIO */
975 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
977 /* check if vfio module is loaded */
978 vfio_available = rte_eal_check_module(modname);
980 /* return error directly */
981 if (vfio_available == -1) {
982 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
986 /* return 0 if VFIO modules not loaded */
987 if (vfio_available == 0) {
988 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
989 "skipping VFIO support...\n");
993 if (internal_config.process_type == RTE_PROC_PRIMARY) {
994 /* open a new container */
995 default_vfio_cfg->vfio_container_fd =
996 rte_vfio_get_container_fd();
998 /* get the default container from the primary process */
999 default_vfio_cfg->vfio_container_fd =
1000 vfio_get_default_container_fd();
1003 /* check if we have VFIO driver enabled */
1004 if (default_vfio_cfg->vfio_container_fd != -1) {
1005 RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
1006 default_vfio_cfg->vfio_enabled = 1;
1008 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
1015 rte_vfio_is_enabled(const char *modname)
1017 const int mod_available = rte_eal_check_module(modname) > 0;
1018 return default_vfio_cfg->vfio_enabled && mod_available;
1022 vfio_get_default_container_fd(void)
1024 struct rte_mp_msg mp_req, *mp_rep;
1025 struct rte_mp_reply mp_reply;
1026 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1027 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1029 if (default_vfio_cfg->vfio_enabled)
1030 return default_vfio_cfg->vfio_container_fd;
1032 if (internal_config.process_type == RTE_PROC_PRIMARY) {
1033 /* if we were secondary process we would try requesting
1034 * container fd from the primary, but we're the primary
1035 * process so just exit here
1040 p->req = SOCKET_REQ_DEFAULT_CONTAINER;
1041 strcpy(mp_req.name, EAL_VFIO_MP);
1042 mp_req.len_param = sizeof(*p);
1045 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1046 mp_reply.nb_received == 1) {
1047 mp_rep = &mp_reply.msgs[0];
1048 p = (struct vfio_mp_param *)mp_rep->param;
1049 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1050 free(mp_reply.msgs);
1051 return mp_rep->fds[0];
1053 free(mp_reply.msgs);
1056 RTE_LOG(ERR, EAL, " cannot request default container fd\n");
1061 vfio_get_iommu_type(void)
1063 if (default_vfio_cfg->vfio_iommu_type == NULL)
1066 return default_vfio_cfg->vfio_iommu_type->type_id;
1069 const struct vfio_iommu_type *
1070 vfio_set_iommu_type(int vfio_container_fd)
1073 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1074 const struct vfio_iommu_type *t = &iommu_types[idx];
1076 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
1079 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
1080 t->type_id, t->name);
1083 /* not an error, there may be more supported IOMMU types */
1084 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
1085 "error %i (%s)\n", t->type_id, t->name, errno,
1088 /* if we didn't find a suitable IOMMU type, fail */
1093 vfio_has_supported_extensions(int vfio_container_fd)
1096 unsigned idx, n_extensions = 0;
1097 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1098 const struct vfio_iommu_type *t = &iommu_types[idx];
1100 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
1103 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
1104 "error %i (%s)\n", errno,
1106 close(vfio_container_fd);
1108 } else if (ret == 1) {
1109 /* we found a supported extension */
1112 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
1113 t->type_id, t->name,
1114 ret ? "supported" : "not supported");
1117 /* if we didn't find any supported IOMMU types, fail */
1118 if (!n_extensions) {
1119 close(vfio_container_fd);
1127 rte_vfio_get_container_fd(void)
1129 int ret, vfio_container_fd;
1130 struct rte_mp_msg mp_req, *mp_rep;
1131 struct rte_mp_reply mp_reply;
1132 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1133 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1136 /* if we're in a primary process, try to open the container */
1137 if (internal_config.process_type == RTE_PROC_PRIMARY) {
1138 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
1139 if (vfio_container_fd < 0) {
1140 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
1141 "error %i (%s)\n", errno, strerror(errno));
1145 /* check VFIO API version */
1146 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
1147 if (ret != VFIO_API_VERSION) {
1149 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
1150 "error %i (%s)\n", errno, strerror(errno));
1152 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
1153 close(vfio_container_fd);
1157 ret = vfio_has_supported_extensions(vfio_container_fd);
1159 RTE_LOG(ERR, EAL, " no supported IOMMU "
1160 "extensions found!\n");
1164 return vfio_container_fd;
1167 * if we're in a secondary process, request container fd from the
1168 * primary process via mp channel
1170 p->req = SOCKET_REQ_CONTAINER;
1171 strcpy(mp_req.name, EAL_VFIO_MP);
1172 mp_req.len_param = sizeof(*p);
1175 vfio_container_fd = -1;
1176 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1177 mp_reply.nb_received == 1) {
1178 mp_rep = &mp_reply.msgs[0];
1179 p = (struct vfio_mp_param *)mp_rep->param;
1180 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1181 vfio_container_fd = mp_rep->fds[0];
1182 free(mp_reply.msgs);
1183 return vfio_container_fd;
1185 free(mp_reply.msgs);
1188 RTE_LOG(ERR, EAL, " cannot request container fd\n");
1193 rte_vfio_get_group_num(const char *sysfs_base,
1194 const char *dev_addr, int *iommu_group_num)
1196 char linkname[PATH_MAX];
1197 char filename[PATH_MAX];
1198 char *tok[16], *group_tok, *end;
1201 memset(linkname, 0, sizeof(linkname));
1202 memset(filename, 0, sizeof(filename));
1204 /* try to find out IOMMU group for this device */
1205 snprintf(linkname, sizeof(linkname),
1206 "%s/%s/iommu_group", sysfs_base, dev_addr);
1208 ret = readlink(linkname, filename, sizeof(filename));
1210 /* if the link doesn't exist, no VFIO for us */
1214 ret = rte_strsplit(filename, sizeof(filename),
1215 tok, RTE_DIM(tok), '/');
1218 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
1222 /* IOMMU group is always the last token */
1224 group_tok = tok[ret - 1];
1226 *iommu_group_num = strtol(group_tok, &end, 10);
1227 if ((end != group_tok && *end != '\0') || errno != 0) {
1228 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
1236 type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1239 int *vfio_container_fd = arg;
1244 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1249 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1250 uint64_t len, int do_map)
1252 struct vfio_iommu_type1_dma_map dma_map;
1253 struct vfio_iommu_type1_dma_unmap dma_unmap;
1257 memset(&dma_map, 0, sizeof(dma_map));
1258 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1259 dma_map.vaddr = vaddr;
1261 dma_map.iova = iova;
1262 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1263 VFIO_DMA_MAP_FLAG_WRITE;
1265 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1267 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
1268 errno, strerror(errno));
1272 memset(&dma_unmap, 0, sizeof(dma_unmap));
1273 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1274 dma_unmap.size = len;
1275 dma_unmap.iova = iova;
1277 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1280 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1281 errno, strerror(errno));
1290 vfio_type1_dma_map(int vfio_container_fd)
1292 return rte_memseg_walk(type1_map, &vfio_container_fd);
1296 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1297 uint64_t len, int do_map)
1299 struct vfio_iommu_type1_dma_map dma_map;
1300 struct vfio_iommu_type1_dma_unmap dma_unmap;
1302 struct vfio_iommu_spapr_register_memory reg = {
1303 .argsz = sizeof(reg),
1306 reg.vaddr = (uintptr_t) vaddr;
1310 ret = ioctl(vfio_container_fd,
1311 VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®);
1313 RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, "
1314 "error %i (%s)\n", errno, strerror(errno));
1318 memset(&dma_map, 0, sizeof(dma_map));
1319 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1320 dma_map.vaddr = vaddr;
1322 dma_map.iova = iova;
1323 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1324 VFIO_DMA_MAP_FLAG_WRITE;
1326 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1328 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
1329 errno, strerror(errno));
1334 ret = ioctl(vfio_container_fd,
1335 VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®);
1337 RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
1338 errno, strerror(errno));
1342 memset(&dma_unmap, 0, sizeof(dma_unmap));
1343 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1344 dma_unmap.size = len;
1345 dma_unmap.iova = iova;
1347 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1350 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1351 errno, strerror(errno));
1360 vfio_spapr_map_walk(const struct rte_memseg_list *msl,
1361 const struct rte_memseg *ms, void *arg)
1363 int *vfio_container_fd = arg;
1368 return vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,
1372 struct spapr_walk_param {
1373 uint64_t window_size;
1374 uint64_t hugepage_sz;
1377 vfio_spapr_window_size_walk(const struct rte_memseg_list *msl,
1378 const struct rte_memseg *ms, void *arg)
1380 struct spapr_walk_param *param = arg;
1381 uint64_t max = ms->iova + ms->len;
1386 if (max > param->window_size) {
1387 param->hugepage_sz = ms->hugepage_sz;
1388 param->window_size = max;
1395 vfio_spapr_create_new_dma_window(int vfio_container_fd,
1396 struct vfio_iommu_spapr_tce_create *create) {
1397 struct vfio_iommu_spapr_tce_remove remove = {
1398 .argsz = sizeof(remove),
1400 struct vfio_iommu_spapr_tce_info info = {
1401 .argsz = sizeof(info),
1405 /* query spapr iommu info */
1406 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1408 RTE_LOG(ERR, EAL, " cannot get iommu info, "
1409 "error %i (%s)\n", errno, strerror(errno));
1413 /* remove default DMA of 32 bit window */
1414 remove.start_addr = info.dma32_window_start;
1415 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1417 RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
1418 "error %i (%s)\n", errno, strerror(errno));
1422 /* create new DMA window */
1423 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);
1425 RTE_LOG(ERR, EAL, " cannot create new DMA window, "
1426 "error %i (%s)\n", errno, strerror(errno));
1430 if (create->start_addr != 0) {
1431 RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
1439 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1440 uint64_t len, int do_map)
1442 struct spapr_walk_param param;
1443 struct vfio_iommu_spapr_tce_create create = {
1444 .argsz = sizeof(create),
1446 struct vfio_config *vfio_cfg;
1447 struct user_mem_maps *user_mem_maps;
1450 vfio_cfg = get_vfio_cfg_by_container_fd(vfio_container_fd);
1451 if (vfio_cfg == NULL) {
1452 RTE_LOG(ERR, EAL, " invalid container fd!\n");
1456 user_mem_maps = &vfio_cfg->mem_maps;
1457 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1459 /* check if window size needs to be adjusted */
1460 memset(¶m, 0, sizeof(param));
1462 /* we're inside a callback so use thread-unsafe version */
1463 if (rte_memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,
1465 RTE_LOG(ERR, EAL, "Could not get window size\n");
1470 /* also check user maps */
1471 for (i = 0; i < user_mem_maps->n_maps; i++) {
1472 uint64_t max = user_mem_maps->maps[i].iova +
1473 user_mem_maps->maps[i].len;
1474 create.window_size = RTE_MAX(create.window_size, max);
1477 /* sPAPR requires window size to be a power of 2 */
1478 create.window_size = rte_align64pow2(param.window_size);
1479 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1484 /* re-create window and remap the entire memory */
1485 if (iova > create.window_size) {
1486 if (vfio_spapr_create_new_dma_window(vfio_container_fd,
1488 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1492 /* we're inside a callback, so use thread-unsafe version
1494 if (rte_memseg_walk_thread_unsafe(vfio_spapr_map_walk,
1495 &vfio_container_fd) < 0) {
1496 RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
1500 /* remap all user maps */
1501 for (i = 0; i < user_mem_maps->n_maps; i++) {
1502 struct user_mem_map *map =
1503 &user_mem_maps->maps[i];
1504 if (vfio_spapr_dma_do_map(vfio_container_fd,
1505 map->addr, map->iova, map->len,
1507 RTE_LOG(ERR, EAL, "Could not recreate user DMA maps\n");
1514 /* now that we've remapped all of the memory that was present
1515 * before, map the segment that we were requested to map.
1517 * however, if we were called by the callback, the memory we
1518 * were called with was already in the memseg list, so previous
1519 * mapping should've mapped that segment already.
1521 * virt2memseg_list is a relatively cheap check, so use that. if
1522 * memory is within any memseg list, it's a memseg, so it's
1525 addr = (void *)(uintptr_t)vaddr;
1526 if (rte_mem_virt2memseg_list(addr) == NULL &&
1527 vfio_spapr_dma_do_map(vfio_container_fd,
1528 vaddr, iova, len, 1) < 0) {
1529 RTE_LOG(ERR, EAL, "Could not map segment\n");
1534 /* for unmap, check if iova within DMA window */
1535 if (iova > create.window_size) {
1536 RTE_LOG(ERR, EAL, "iova beyond DMA window for unmap");
1541 vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);
1544 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1549 vfio_spapr_dma_map(int vfio_container_fd)
1551 struct vfio_iommu_spapr_tce_create create = {
1552 .argsz = sizeof(create),
1554 struct spapr_walk_param param;
1556 memset(¶m, 0, sizeof(param));
1558 /* create DMA window from 0 to max(phys_addr + len) */
1559 rte_memseg_walk(vfio_spapr_window_size_walk, ¶m);
1561 /* sPAPR requires window size to be a power of 2 */
1562 create.window_size = rte_align64pow2(param.window_size);
1563 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1566 if (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {
1567 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1571 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
1572 if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1579 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1581 /* No-IOMMU mode does not need DMA mapping */
1586 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1587 uint64_t __rte_unused vaddr,
1588 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1589 int __rte_unused do_map)
1591 /* No-IOMMU mode does not need DMA mapping */
1596 vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1597 uint64_t len, int do_map)
1599 const struct vfio_iommu_type *t = vfio_cfg->vfio_iommu_type;
1602 RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
1607 if (!t->dma_user_map_func) {
1609 " VFIO custom DMA region maping not supported by IOMMU %s\n",
1611 rte_errno = ENOTSUP;
1615 return t->dma_user_map_func(vfio_cfg->vfio_container_fd, vaddr, iova,
1620 container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1623 struct user_mem_map *new_map;
1624 struct user_mem_maps *user_mem_maps;
1627 user_mem_maps = &vfio_cfg->mem_maps;
1628 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1629 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1630 RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1636 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
1637 /* technically, this will fail if there are currently no devices
1638 * plugged in, even if a device were added later, this mapping
1639 * might have succeeded. however, since we cannot verify if this
1640 * is a valid mapping without having a device attached, consider
1641 * this to be unsupported, because we can't just store any old
1642 * mapping and pollute list of active mappings willy-nilly.
1644 RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1648 /* create new user mem map entry */
1649 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1650 new_map->addr = vaddr;
1651 new_map->iova = iova;
1654 compact_user_maps(user_mem_maps);
1656 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1661 container_dma_unmap(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1664 struct user_mem_map *map, *new_map = NULL;
1665 struct user_mem_maps *user_mem_maps;
1668 user_mem_maps = &vfio_cfg->mem_maps;
1669 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1671 /* find our mapping */
1672 map = find_user_mem_map(user_mem_maps, vaddr, iova, len);
1674 RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1679 if (map->addr != vaddr || map->iova != iova || map->len != len) {
1680 /* we're partially unmapping a previously mapped region, so we
1681 * need to split entry into two.
1683 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1684 RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1689 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1692 /* unmap the entry */
1693 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 0)) {
1694 /* there may not be any devices plugged in, so unmapping will
1695 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1696 * stop us from removing the mapping, as the assumption is we
1697 * won't be needing this memory any more and thus will want to
1698 * prevent it from being remapped again on hotplug. so, only
1699 * fail if we indeed failed to unmap (e.g. if the mapping was
1700 * within our mapped range but had invalid alignment).
1702 if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1703 RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1707 RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1710 /* remove map from the list of active mappings */
1711 if (new_map != NULL) {
1712 adjust_map(map, new_map, vaddr, len);
1714 /* if we've created a new map by splitting, sort everything */
1715 if (!is_null_map(new_map)) {
1716 compact_user_maps(user_mem_maps);
1718 /* we've created a new mapping, but it was unused */
1719 user_mem_maps->n_maps--;
1722 memset(map, 0, sizeof(*map));
1723 compact_user_maps(user_mem_maps);
1724 user_mem_maps->n_maps--;
1728 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1733 rte_vfio_dma_map(uint64_t vaddr, uint64_t iova, uint64_t len)
1740 return container_dma_map(default_vfio_cfg, vaddr, iova, len);
1744 rte_vfio_dma_unmap(uint64_t vaddr, uint64_t iova, uint64_t len)
1751 return container_dma_unmap(default_vfio_cfg, vaddr, iova, len);
1755 rte_vfio_noiommu_is_enabled(void)
1761 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1763 if (errno != ENOENT) {
1764 RTE_LOG(ERR, EAL, " cannot open vfio noiommu file %i (%s)\n",
1765 errno, strerror(errno));
1769 * else the file does not exists
1770 * i.e. noiommu is not enabled
1775 cnt = read(fd, &c, 1);
1778 RTE_LOG(ERR, EAL, " unable to read from vfio noiommu "
1779 "file %i (%s)\n", errno, strerror(errno));
1787 rte_vfio_container_create(void)
1791 /* Find an empty slot to store new vfio config */
1792 for (i = 1; i < VFIO_MAX_CONTAINERS; i++) {
1793 if (vfio_cfgs[i].vfio_container_fd == -1)
1797 if (i == VFIO_MAX_CONTAINERS) {
1798 RTE_LOG(ERR, EAL, "exceed max vfio container limit\n");
1802 vfio_cfgs[i].vfio_container_fd = rte_vfio_get_container_fd();
1803 if (vfio_cfgs[i].vfio_container_fd < 0) {
1804 RTE_LOG(NOTICE, EAL, "fail to create a new container\n");
1808 return vfio_cfgs[i].vfio_container_fd;
1811 int __rte_experimental
1812 rte_vfio_container_destroy(int container_fd)
1814 struct vfio_config *vfio_cfg;
1817 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1818 if (vfio_cfg == NULL) {
1819 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1823 for (i = 0; i < VFIO_MAX_GROUPS; i++)
1824 if (vfio_cfg->vfio_groups[i].group_num != -1)
1825 rte_vfio_container_group_unbind(container_fd,
1826 vfio_cfg->vfio_groups[i].group_num);
1828 close(container_fd);
1829 vfio_cfg->vfio_container_fd = -1;
1830 vfio_cfg->vfio_active_groups = 0;
1831 vfio_cfg->vfio_iommu_type = NULL;
1837 rte_vfio_container_group_bind(int container_fd, int iommu_group_num)
1839 struct vfio_config *vfio_cfg;
1841 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1842 if (vfio_cfg == NULL) {
1843 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1847 return vfio_get_group_fd(vfio_cfg, iommu_group_num);
1851 rte_vfio_container_group_unbind(int container_fd, int iommu_group_num)
1853 struct vfio_config *vfio_cfg;
1854 struct vfio_group *cur_grp = NULL;
1857 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1858 if (vfio_cfg == NULL) {
1859 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1863 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
1864 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num) {
1865 cur_grp = &vfio_cfg->vfio_groups[i];
1870 /* This should not happen */
1871 if (i == VFIO_MAX_GROUPS || cur_grp == NULL) {
1872 RTE_LOG(ERR, EAL, "Specified group number not found\n");
1876 if (cur_grp->fd >= 0 && close(cur_grp->fd) < 0) {
1877 RTE_LOG(ERR, EAL, "Error when closing vfio_group_fd for"
1878 " iommu_group_num %d\n", iommu_group_num);
1881 cur_grp->group_num = -1;
1883 cur_grp->devices = 0;
1884 vfio_cfg->vfio_active_groups--;
1890 rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, uint64_t iova,
1893 struct vfio_config *vfio_cfg;
1900 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1901 if (vfio_cfg == NULL) {
1902 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1906 return container_dma_map(vfio_cfg, vaddr, iova, len);
1910 rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, uint64_t iova,
1913 struct vfio_config *vfio_cfg;
1920 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1921 if (vfio_cfg == NULL) {
1922 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1926 return container_dma_unmap(vfio_cfg, vaddr, iova, len);
1932 rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
1933 __rte_unused uint64_t len)
1939 rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
1940 __rte_unused uint64_t len)
1946 rte_vfio_setup_device(__rte_unused const char *sysfs_base,
1947 __rte_unused const char *dev_addr,
1948 __rte_unused int *vfio_dev_fd,
1949 __rte_unused struct vfio_device_info *device_info)
1955 rte_vfio_release_device(__rte_unused const char *sysfs_base,
1956 __rte_unused const char *dev_addr, __rte_unused int fd)
1962 rte_vfio_enable(__rte_unused const char *modname)
1968 rte_vfio_is_enabled(__rte_unused const char *modname)
1974 rte_vfio_noiommu_is_enabled(void)
1980 rte_vfio_clear_group(__rte_unused int vfio_group_fd)
1986 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
1987 __rte_unused const char *dev_addr,
1988 __rte_unused int *iommu_group_num)
1994 rte_vfio_get_container_fd(void)
2000 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
2006 rte_vfio_container_create(void)
2012 rte_vfio_container_destroy(__rte_unused int container_fd)
2018 rte_vfio_container_group_bind(__rte_unused int container_fd,
2019 __rte_unused int iommu_group_num)
2025 rte_vfio_container_group_unbind(__rte_unused int container_fd,
2026 __rte_unused int iommu_group_num)
2032 rte_vfio_container_dma_map(__rte_unused int container_fd,
2033 __rte_unused uint64_t vaddr,
2034 __rte_unused uint64_t iova,
2035 __rte_unused uint64_t len)
2041 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
2042 __rte_unused uint64_t vaddr,
2043 __rte_unused uint64_t iova,
2044 __rte_unused uint64_t len)
2049 #endif /* VFIO_PRESENT */