1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
17 #include "eal_filesystem.h"
18 #include "eal_memcfg.h"
20 #include "eal_private.h"
24 #define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
26 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
27 * recreate the mappings for DPDK segments, but we cannot do so for memory that
28 * was registered by the user themselves, so we need to store the user mappings
29 * somewhere, to recreate them later.
31 #define VFIO_MAX_USER_MEM_MAPS 256
38 struct user_mem_maps {
39 rte_spinlock_recursive_t lock;
41 struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
46 int vfio_container_fd;
47 int vfio_active_groups;
48 const struct vfio_iommu_type *vfio_iommu_type;
49 struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
50 struct user_mem_maps mem_maps;
53 /* per-process VFIO config */
54 static struct vfio_config vfio_cfgs[VFIO_MAX_CONTAINERS];
55 static struct vfio_config *default_vfio_cfg = &vfio_cfgs[0];
57 static int vfio_type1_dma_map(int);
58 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
59 static int vfio_spapr_dma_map(int);
60 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
61 static int vfio_noiommu_dma_map(int);
62 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
63 static int vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr,
64 uint64_t iova, uint64_t len, int do_map);
66 /* IOMMU types we support */
67 static const struct vfio_iommu_type iommu_types[] = {
68 /* x86 IOMMU, otherwise known as type 1 */
70 .type_id = RTE_VFIO_TYPE1,
72 .dma_map_func = &vfio_type1_dma_map,
73 .dma_user_map_func = &vfio_type1_dma_mem_map
75 /* ppc64 IOMMU, otherwise known as spapr */
77 .type_id = RTE_VFIO_SPAPR,
79 .dma_map_func = &vfio_spapr_dma_map,
80 .dma_user_map_func = &vfio_spapr_dma_mem_map
84 .type_id = RTE_VFIO_NOIOMMU,
86 .dma_map_func = &vfio_noiommu_dma_map,
87 .dma_user_map_func = &vfio_noiommu_dma_mem_map
92 is_null_map(const struct user_mem_map *map)
94 return map->addr == 0 && map->iova == 0 && map->len == 0;
97 /* we may need to merge user mem maps together in case of user mapping/unmapping
98 * chunks of memory, so we'll need a comparator function to sort segments.
101 user_mem_map_cmp(const void *a, const void *b)
103 const struct user_mem_map *umm_a = a;
104 const struct user_mem_map *umm_b = b;
106 /* move null entries to end */
107 if (is_null_map(umm_a))
109 if (is_null_map(umm_b))
112 /* sort by iova first */
113 if (umm_a->iova < umm_b->iova)
115 if (umm_a->iova > umm_b->iova)
118 if (umm_a->addr < umm_b->addr)
120 if (umm_a->addr > umm_b->addr)
123 if (umm_a->len < umm_b->len)
125 if (umm_a->len > umm_b->len)
131 /* adjust user map entry. this may result in shortening of existing map, or in
132 * splitting existing map in two pieces.
135 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
136 uint64_t remove_va_start, uint64_t remove_len)
138 /* if va start is same as start address, we're simply moving start */
139 if (remove_va_start == src->addr) {
140 src->addr += remove_len;
141 src->iova += remove_len;
142 src->len -= remove_len;
143 } else if (remove_va_start + remove_len == src->addr + src->len) {
144 /* we're shrinking mapping from the end */
145 src->len -= remove_len;
147 /* we're blowing a hole in the middle */
148 struct user_mem_map tmp;
149 uint64_t total_len = src->len;
151 /* adjust source segment length */
152 src->len = remove_va_start - src->addr;
154 /* create temporary segment in the middle */
155 tmp.addr = src->addr + src->len;
156 tmp.iova = src->iova + src->len;
157 tmp.len = remove_len;
159 /* populate end segment - this one we will be keeping */
160 end->addr = tmp.addr + tmp.len;
161 end->iova = tmp.iova + tmp.len;
162 end->len = total_len - src->len - tmp.len;
166 /* try merging two maps into one, return 1 if succeeded */
168 merge_map(struct user_mem_map *left, struct user_mem_map *right)
170 if (left->addr + left->len != right->addr)
172 if (left->iova + left->len != right->iova)
175 left->len += right->len;
177 memset(right, 0, sizeof(*right));
182 static struct user_mem_map *
183 find_user_mem_map(struct user_mem_maps *user_mem_maps, uint64_t addr,
184 uint64_t iova, uint64_t len)
186 uint64_t va_end = addr + len;
187 uint64_t iova_end = iova + len;
190 for (i = 0; i < user_mem_maps->n_maps; i++) {
191 struct user_mem_map *map = &user_mem_maps->maps[i];
192 uint64_t map_va_end = map->addr + map->len;
193 uint64_t map_iova_end = map->iova + map->len;
196 if (addr < map->addr || addr >= map_va_end)
198 /* check if VA end is within boundaries */
199 if (va_end <= map->addr || va_end > map_va_end)
202 /* check start IOVA */
203 if (iova < map->iova || iova >= map_iova_end)
205 /* check if IOVA end is within boundaries */
206 if (iova_end <= map->iova || iova_end > map_iova_end)
209 /* we've found our map */
215 /* this will sort all user maps, and merge/compact any adjacent maps */
217 compact_user_maps(struct user_mem_maps *user_mem_maps)
219 int i, n_merged, cur_idx;
221 qsort(user_mem_maps->maps, user_mem_maps->n_maps,
222 sizeof(user_mem_maps->maps[0]), user_mem_map_cmp);
224 /* we'll go over the list backwards when merging */
226 for (i = user_mem_maps->n_maps - 2; i >= 0; i--) {
227 struct user_mem_map *l, *r;
229 l = &user_mem_maps->maps[i];
230 r = &user_mem_maps->maps[i + 1];
232 if (is_null_map(l) || is_null_map(r))
239 /* the entries are still sorted, but now they have holes in them, so
240 * walk through the list and remove the holes
244 for (i = 0; i < user_mem_maps->n_maps; i++) {
245 if (!is_null_map(&user_mem_maps->maps[i])) {
246 struct user_mem_map *src, *dst;
248 src = &user_mem_maps->maps[i];
249 dst = &user_mem_maps->maps[cur_idx++];
252 memcpy(dst, src, sizeof(*src));
253 memset(src, 0, sizeof(*src));
257 user_mem_maps->n_maps = cur_idx;
262 vfio_open_group_fd(int iommu_group_num)
265 char filename[PATH_MAX];
266 struct rte_mp_msg mp_req, *mp_rep;
267 struct rte_mp_reply mp_reply = {0};
268 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
269 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
270 const struct internal_config *internal_conf =
271 eal_get_internal_configuration();
273 /* if primary, try to open the group */
274 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
275 /* try regular group format */
276 snprintf(filename, sizeof(filename),
277 VFIO_GROUP_FMT, iommu_group_num);
278 vfio_group_fd = open(filename, O_RDWR);
279 if (vfio_group_fd < 0) {
280 /* if file not found, it's not an error */
281 if (errno != ENOENT) {
282 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
287 /* special case: try no-IOMMU path as well */
288 snprintf(filename, sizeof(filename),
289 VFIO_NOIOMMU_GROUP_FMT,
291 vfio_group_fd = open(filename, O_RDWR);
292 if (vfio_group_fd < 0) {
293 if (errno != ENOENT) {
294 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
300 /* noiommu group found */
303 return vfio_group_fd;
305 /* if we're in a secondary process, request group fd from the primary
306 * process via mp channel.
308 p->req = SOCKET_REQ_GROUP;
309 p->group_num = iommu_group_num;
310 strcpy(mp_req.name, EAL_VFIO_MP);
311 mp_req.len_param = sizeof(*p);
315 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
316 mp_reply.nb_received == 1) {
317 mp_rep = &mp_reply.msgs[0];
318 p = (struct vfio_mp_param *)mp_rep->param;
319 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
320 vfio_group_fd = mp_rep->fds[0];
321 } else if (p->result == SOCKET_NO_FD) {
322 RTE_LOG(ERR, EAL, " bad VFIO group fd\n");
323 vfio_group_fd = -ENOENT;
328 if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
329 RTE_LOG(ERR, EAL, " cannot request group fd\n");
330 return vfio_group_fd;
333 static struct vfio_config *
334 get_vfio_cfg_by_group_num(int iommu_group_num)
336 struct vfio_config *vfio_cfg;
339 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
340 vfio_cfg = &vfio_cfgs[i];
341 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
342 if (vfio_cfg->vfio_groups[j].group_num ==
352 vfio_get_group_fd(struct vfio_config *vfio_cfg,
357 struct vfio_group *cur_grp;
359 /* check if we already have the group descriptor open */
360 for (i = 0; i < VFIO_MAX_GROUPS; i++)
361 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num)
362 return vfio_cfg->vfio_groups[i].fd;
364 /* Lets see first if there is room for a new group */
365 if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
366 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
370 /* Now lets get an index for the new group */
371 for (i = 0; i < VFIO_MAX_GROUPS; i++)
372 if (vfio_cfg->vfio_groups[i].group_num == -1) {
373 cur_grp = &vfio_cfg->vfio_groups[i];
377 /* This should not happen */
378 if (i == VFIO_MAX_GROUPS) {
379 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
383 vfio_group_fd = vfio_open_group_fd(iommu_group_num);
384 if (vfio_group_fd < 0) {
385 RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
386 return vfio_group_fd;
389 cur_grp->group_num = iommu_group_num;
390 cur_grp->fd = vfio_group_fd;
391 vfio_cfg->vfio_active_groups++;
393 return vfio_group_fd;
396 static struct vfio_config *
397 get_vfio_cfg_by_group_fd(int vfio_group_fd)
399 struct vfio_config *vfio_cfg;
402 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
403 vfio_cfg = &vfio_cfgs[i];
404 for (j = 0; j < VFIO_MAX_GROUPS; j++)
405 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
412 static struct vfio_config *
413 get_vfio_cfg_by_container_fd(int container_fd)
417 if (container_fd == RTE_VFIO_DEFAULT_CONTAINER_FD)
418 return default_vfio_cfg;
420 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
421 if (vfio_cfgs[i].vfio_container_fd == container_fd)
422 return &vfio_cfgs[i];
429 rte_vfio_get_group_fd(int iommu_group_num)
431 struct vfio_config *vfio_cfg;
433 /* get the vfio_config it belongs to */
434 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
435 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
437 return vfio_get_group_fd(vfio_cfg, iommu_group_num);
441 get_vfio_group_idx(int vfio_group_fd)
443 struct vfio_config *vfio_cfg;
446 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
447 vfio_cfg = &vfio_cfgs[i];
448 for (j = 0; j < VFIO_MAX_GROUPS; j++)
449 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
457 vfio_group_device_get(int vfio_group_fd)
459 struct vfio_config *vfio_cfg;
462 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
463 if (vfio_cfg == NULL) {
464 RTE_LOG(ERR, EAL, " invalid group fd!\n");
468 i = get_vfio_group_idx(vfio_group_fd);
469 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
470 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
472 vfio_cfg->vfio_groups[i].devices++;
476 vfio_group_device_put(int vfio_group_fd)
478 struct vfio_config *vfio_cfg;
481 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
482 if (vfio_cfg == NULL) {
483 RTE_LOG(ERR, EAL, " invalid group fd!\n");
487 i = get_vfio_group_idx(vfio_group_fd);
488 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
489 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
491 vfio_cfg->vfio_groups[i].devices--;
495 vfio_group_device_count(int vfio_group_fd)
497 struct vfio_config *vfio_cfg;
500 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
501 if (vfio_cfg == NULL) {
502 RTE_LOG(ERR, EAL, " invalid group fd!\n");
506 i = get_vfio_group_idx(vfio_group_fd);
507 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
508 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
512 return vfio_cfg->vfio_groups[i].devices;
516 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
517 void *arg __rte_unused)
519 rte_iova_t iova_start, iova_expected;
520 struct rte_memseg_list *msl;
521 struct rte_memseg *ms;
525 msl = rte_mem_virt2memseg_list(addr);
527 /* for IOVA as VA mode, no need to care for IOVA addresses */
528 if (rte_eal_iova_mode() == RTE_IOVA_VA && msl->external == 0) {
529 uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
530 if (type == RTE_MEM_EVENT_ALLOC)
531 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
534 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
539 #ifdef RTE_ARCH_PPC_64
540 ms = rte_mem_virt2memseg(addr, msl);
541 while (cur_len < len) {
542 int idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
544 rte_fbarray_set_free(&msl->memseg_arr, idx);
550 /* memsegs are contiguous in memory */
551 ms = rte_mem_virt2memseg(addr, msl);
554 * This memory is not guaranteed to be contiguous, but it still could
555 * be, or it could have some small contiguous chunks. Since the number
556 * of VFIO mappings is limited, and VFIO appears to not concatenate
557 * adjacent mappings, we have to do this ourselves.
559 * So, find contiguous chunks, then map them.
561 va_start = ms->addr_64;
562 iova_start = iova_expected = ms->iova;
563 while (cur_len < len) {
564 bool new_contig_area = ms->iova != iova_expected;
565 bool last_seg = (len - cur_len) == ms->len;
566 bool skip_last = false;
568 /* only do mappings when current contiguous area ends */
569 if (new_contig_area) {
570 if (type == RTE_MEM_EVENT_ALLOC)
571 vfio_dma_mem_map(default_vfio_cfg, va_start,
573 iova_expected - iova_start, 1);
575 vfio_dma_mem_map(default_vfio_cfg, va_start,
577 iova_expected - iova_start, 0);
578 va_start = ms->addr_64;
579 iova_start = ms->iova;
581 /* some memory segments may have invalid IOVA */
582 if (ms->iova == RTE_BAD_IOVA) {
583 RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
587 iova_expected = ms->iova + ms->len;
592 * don't count previous segment, and don't attempt to
593 * dereference a potentially invalid pointer.
595 if (skip_last && !last_seg) {
596 iova_expected = iova_start = ms->iova;
597 va_start = ms->addr_64;
598 } else if (!skip_last && last_seg) {
599 /* this is the last segment and we're not skipping */
600 if (type == RTE_MEM_EVENT_ALLOC)
601 vfio_dma_mem_map(default_vfio_cfg, va_start,
603 iova_expected - iova_start, 1);
605 vfio_dma_mem_map(default_vfio_cfg, va_start,
607 iova_expected - iova_start, 0);
610 #ifdef RTE_ARCH_PPC_64
612 ms = rte_mem_virt2memseg(addr, msl);
613 while (cur_len < len) {
614 int idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
616 rte_fbarray_set_used(&msl->memseg_arr, idx);
624 vfio_sync_default_container(void)
626 struct rte_mp_msg mp_req, *mp_rep;
627 struct rte_mp_reply mp_reply = {0};
628 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
629 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
633 /* cannot be called from primary */
634 if (rte_eal_process_type() != RTE_PROC_SECONDARY)
637 /* default container fd should have been opened in rte_vfio_enable() */
638 if (!default_vfio_cfg->vfio_enabled ||
639 default_vfio_cfg->vfio_container_fd < 0) {
640 RTE_LOG(ERR, EAL, "VFIO support is not initialized\n");
644 /* find default container's IOMMU type */
645 p->req = SOCKET_REQ_IOMMU_TYPE;
646 strcpy(mp_req.name, EAL_VFIO_MP);
647 mp_req.len_param = sizeof(*p);
651 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
652 mp_reply.nb_received == 1) {
653 mp_rep = &mp_reply.msgs[0];
654 p = (struct vfio_mp_param *)mp_rep->param;
655 if (p->result == SOCKET_OK)
656 iommu_type_id = p->iommu_type_id;
659 if (iommu_type_id < 0) {
660 RTE_LOG(ERR, EAL, "Could not get IOMMU type for default container\n");
664 /* we now have an fd for default container, as well as its IOMMU type.
665 * now, set up default VFIO container config to match.
667 for (i = 0; i < RTE_DIM(iommu_types); i++) {
668 const struct vfio_iommu_type *t = &iommu_types[i];
669 if (t->type_id != iommu_type_id)
672 /* we found our IOMMU type */
673 default_vfio_cfg->vfio_iommu_type = t;
677 RTE_LOG(ERR, EAL, "Could not find IOMMU type id (%i)\n",
683 rte_vfio_clear_group(int vfio_group_fd)
686 struct vfio_config *vfio_cfg;
688 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
689 if (vfio_cfg == NULL) {
690 RTE_LOG(ERR, EAL, " invalid group fd!\n");
694 i = get_vfio_group_idx(vfio_group_fd);
697 vfio_cfg->vfio_groups[i].group_num = -1;
698 vfio_cfg->vfio_groups[i].fd = -1;
699 vfio_cfg->vfio_groups[i].devices = 0;
700 vfio_cfg->vfio_active_groups--;
706 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
707 int *vfio_dev_fd, struct vfio_device_info *device_info)
709 struct vfio_group_status group_status = {
710 .argsz = sizeof(group_status)
712 struct vfio_config *vfio_cfg;
713 struct user_mem_maps *user_mem_maps;
714 int vfio_container_fd;
719 const struct internal_config *internal_conf =
720 eal_get_internal_configuration();
722 /* get group number */
723 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
725 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
730 /* if negative, something failed */
734 /* get the actual group fd */
735 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
736 if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
740 * if vfio_group_fd == -ENOENT, that means the device
741 * isn't managed by VFIO
743 if (vfio_group_fd == -ENOENT) {
744 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
750 * at this point, we know that this group is viable (meaning, all devices
751 * are either bound to VFIO or not bound to anything)
754 /* check if the group is viable */
755 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
757 RTE_LOG(ERR, EAL, " %s cannot get group status, "
758 "error %i (%s)\n", dev_addr, errno, strerror(errno));
759 close(vfio_group_fd);
760 rte_vfio_clear_group(vfio_group_fd);
762 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
763 RTE_LOG(ERR, EAL, " %s VFIO group is not viable! "
764 "Not all devices in IOMMU group bound to VFIO or unbound\n",
766 close(vfio_group_fd);
767 rte_vfio_clear_group(vfio_group_fd);
771 /* get the vfio_config it belongs to */
772 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
773 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
774 vfio_container_fd = vfio_cfg->vfio_container_fd;
775 user_mem_maps = &vfio_cfg->mem_maps;
777 /* check if group does not have a container yet */
778 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
780 /* add group to a container */
781 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
784 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
785 "error %i (%s)\n", dev_addr, errno, strerror(errno));
786 close(vfio_group_fd);
787 rte_vfio_clear_group(vfio_group_fd);
792 * pick an IOMMU type and set up DMA mappings for container
794 * needs to be done only once, only when first group is
795 * assigned to a container and only in primary process.
796 * Note this can happen several times with the hotplug
799 if (internal_conf->process_type == RTE_PROC_PRIMARY &&
800 vfio_cfg->vfio_active_groups == 1 &&
801 vfio_group_device_count(vfio_group_fd) == 0) {
802 const struct vfio_iommu_type *t;
804 /* select an IOMMU type which we will be using */
805 t = vfio_set_iommu_type(vfio_container_fd);
808 " %s failed to select IOMMU type\n",
810 close(vfio_group_fd);
811 rte_vfio_clear_group(vfio_group_fd);
814 /* lock memory hotplug before mapping and release it
815 * after registering callback, to prevent races
817 rte_mcfg_mem_read_lock();
818 if (vfio_cfg == default_vfio_cfg)
819 ret = t->dma_map_func(vfio_container_fd);
824 " %s DMA remapping failed, error %i (%s)\n",
825 dev_addr, errno, strerror(errno));
826 close(vfio_group_fd);
827 rte_vfio_clear_group(vfio_group_fd);
828 rte_mcfg_mem_read_unlock();
832 vfio_cfg->vfio_iommu_type = t;
834 /* re-map all user-mapped segments */
835 rte_spinlock_recursive_lock(&user_mem_maps->lock);
837 /* this IOMMU type may not support DMA mapping, but
838 * if we have mappings in the list - that means we have
839 * previously mapped something successfully, so we can
840 * be sure that DMA mapping is supported.
842 for (i = 0; i < user_mem_maps->n_maps; i++) {
843 struct user_mem_map *map;
844 map = &user_mem_maps->maps[i];
846 ret = t->dma_user_map_func(
848 map->addr, map->iova, map->len,
851 RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
853 "iova: 0x%" PRIx64 " "
854 "len: 0x%" PRIu64 "\n",
855 map->addr, map->iova,
857 rte_spinlock_recursive_unlock(
858 &user_mem_maps->lock);
859 rte_mcfg_mem_read_unlock();
863 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
865 /* register callback for mem events */
866 if (vfio_cfg == default_vfio_cfg)
867 ret = rte_mem_event_callback_register(
868 VFIO_MEM_EVENT_CLB_NAME,
869 vfio_mem_event_callback, NULL);
872 /* unlock memory hotplug */
873 rte_mcfg_mem_read_unlock();
875 if (ret && rte_errno != ENOTSUP) {
876 RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
880 RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
882 RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
884 } else if (rte_eal_process_type() != RTE_PROC_PRIMARY &&
885 vfio_cfg == default_vfio_cfg &&
886 vfio_cfg->vfio_iommu_type == NULL) {
887 /* if we're not a primary process, we do not set up the VFIO
888 * container because it's already been set up by the primary
889 * process. instead, we simply ask the primary about VFIO type
890 * we are using, and set the VFIO config up appropriately.
892 ret = vfio_sync_default_container();
894 RTE_LOG(ERR, EAL, "Could not sync default VFIO container\n");
895 close(vfio_group_fd);
896 rte_vfio_clear_group(vfio_group_fd);
899 /* we have successfully initialized VFIO, notify user */
900 const struct vfio_iommu_type *t =
901 default_vfio_cfg->vfio_iommu_type;
902 RTE_LOG(INFO, EAL, " using IOMMU type %d (%s)\n",
903 t->type_id, t->name);
906 rte_eal_vfio_get_vf_token(vf_token);
908 /* get a file descriptor for the device with VF token firstly */
909 if (!rte_uuid_is_null(vf_token)) {
910 char vf_token_str[RTE_UUID_STRLEN];
913 rte_uuid_unparse(vf_token, vf_token_str, sizeof(vf_token_str));
914 snprintf(dev, sizeof(dev),
915 "%s vf_token=%s", dev_addr, vf_token_str);
917 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD,
919 if (*vfio_dev_fd >= 0)
923 /* get a file descriptor for the device */
924 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
925 if (*vfio_dev_fd < 0) {
926 /* if we cannot get a device fd, this implies a problem with
927 * the VFIO group or the container not having IOMMU configured.
930 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
932 close(vfio_group_fd);
933 rte_vfio_clear_group(vfio_group_fd);
937 /* test and setup the device */
939 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
941 RTE_LOG(ERR, EAL, " %s cannot get device info, "
942 "error %i (%s)\n", dev_addr, errno,
945 close(vfio_group_fd);
946 rte_vfio_clear_group(vfio_group_fd);
949 vfio_group_device_get(vfio_group_fd);
955 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
958 struct vfio_config *vfio_cfg;
963 /* we don't want any DMA mapping messages to come while we're detaching
964 * VFIO device, because this might be the last device and we might need
965 * to unregister the callback.
967 rte_mcfg_mem_read_lock();
969 /* get group number */
970 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
972 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
974 /* This is an error at this point. */
979 /* get the actual group fd */
980 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
981 if (vfio_group_fd < 0) {
982 RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
988 /* get the vfio_config it belongs to */
989 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
990 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
992 /* At this point we got an active group. Closing it will make the
993 * container detachment. If this is the last active group, VFIO kernel
994 * code will unset the container and the IOMMU mappings.
997 /* Closing a device */
998 if (close(vfio_dev_fd) < 0) {
999 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
1005 /* An VFIO group can have several devices attached. Just when there is
1006 * no devices remaining should the group be closed.
1008 vfio_group_device_put(vfio_group_fd);
1009 if (!vfio_group_device_count(vfio_group_fd)) {
1011 if (close(vfio_group_fd) < 0) {
1012 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
1018 if (rte_vfio_clear_group(vfio_group_fd) < 0) {
1019 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
1026 /* if there are no active device groups, unregister the callback to
1027 * avoid spurious attempts to map/unmap memory from VFIO.
1029 if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0 &&
1030 rte_eal_process_type() != RTE_PROC_SECONDARY)
1031 rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
1038 rte_mcfg_mem_read_unlock();
1043 rte_vfio_enable(const char *modname)
1045 /* initialize group list */
1048 const struct internal_config *internal_conf =
1049 eal_get_internal_configuration();
1051 rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
1053 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
1054 vfio_cfgs[i].vfio_container_fd = -1;
1055 vfio_cfgs[i].vfio_active_groups = 0;
1056 vfio_cfgs[i].vfio_iommu_type = NULL;
1057 vfio_cfgs[i].mem_maps.lock = lock;
1059 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
1060 vfio_cfgs[i].vfio_groups[j].fd = -1;
1061 vfio_cfgs[i].vfio_groups[j].group_num = -1;
1062 vfio_cfgs[i].vfio_groups[j].devices = 0;
1066 /* inform the user that we are probing for VFIO */
1067 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
1069 /* check if vfio module is loaded */
1070 vfio_available = rte_eal_check_module(modname);
1072 /* return error directly */
1073 if (vfio_available == -1) {
1074 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
1078 /* return 0 if VFIO modules not loaded */
1079 if (vfio_available == 0) {
1080 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
1081 "skipping VFIO support...\n");
1085 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1086 /* open a new container */
1087 default_vfio_cfg->vfio_container_fd =
1088 rte_vfio_get_container_fd();
1090 /* get the default container from the primary process */
1091 default_vfio_cfg->vfio_container_fd =
1092 vfio_get_default_container_fd();
1095 /* check if we have VFIO driver enabled */
1096 if (default_vfio_cfg->vfio_container_fd != -1) {
1097 RTE_LOG(INFO, EAL, "VFIO support initialized\n");
1098 default_vfio_cfg->vfio_enabled = 1;
1100 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
1107 rte_vfio_is_enabled(const char *modname)
1109 const int mod_available = rte_eal_check_module(modname) > 0;
1110 return default_vfio_cfg->vfio_enabled && mod_available;
1114 vfio_get_default_container_fd(void)
1116 struct rte_mp_msg mp_req, *mp_rep;
1117 struct rte_mp_reply mp_reply = {0};
1118 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1119 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1121 const struct internal_config *internal_conf =
1122 eal_get_internal_configuration();
1124 if (default_vfio_cfg->vfio_enabled)
1125 return default_vfio_cfg->vfio_container_fd;
1127 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1128 /* if we were secondary process we would try requesting
1129 * container fd from the primary, but we're the primary
1130 * process so just exit here
1135 p->req = SOCKET_REQ_DEFAULT_CONTAINER;
1136 strcpy(mp_req.name, EAL_VFIO_MP);
1137 mp_req.len_param = sizeof(*p);
1140 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1141 mp_reply.nb_received == 1) {
1142 mp_rep = &mp_reply.msgs[0];
1143 p = (struct vfio_mp_param *)mp_rep->param;
1144 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1145 container_fd = mp_rep->fds[0];
1146 free(mp_reply.msgs);
1147 return container_fd;
1151 free(mp_reply.msgs);
1152 RTE_LOG(ERR, EAL, " cannot request default container fd\n");
1157 vfio_get_iommu_type(void)
1159 if (default_vfio_cfg->vfio_iommu_type == NULL)
1162 return default_vfio_cfg->vfio_iommu_type->type_id;
1165 const struct vfio_iommu_type *
1166 vfio_set_iommu_type(int vfio_container_fd)
1169 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1170 const struct vfio_iommu_type *t = &iommu_types[idx];
1172 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
1175 RTE_LOG(INFO, EAL, " using IOMMU type %d (%s)\n",
1176 t->type_id, t->name);
1179 /* not an error, there may be more supported IOMMU types */
1180 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
1181 "error %i (%s)\n", t->type_id, t->name, errno,
1184 /* if we didn't find a suitable IOMMU type, fail */
1189 vfio_has_supported_extensions(int vfio_container_fd)
1192 unsigned idx, n_extensions = 0;
1193 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1194 const struct vfio_iommu_type *t = &iommu_types[idx];
1196 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
1199 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
1200 "error %i (%s)\n", errno,
1202 close(vfio_container_fd);
1204 } else if (ret == 1) {
1205 /* we found a supported extension */
1208 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
1209 t->type_id, t->name,
1210 ret ? "supported" : "not supported");
1213 /* if we didn't find any supported IOMMU types, fail */
1214 if (!n_extensions) {
1215 close(vfio_container_fd);
1223 rte_vfio_get_container_fd(void)
1225 int ret, vfio_container_fd;
1226 struct rte_mp_msg mp_req, *mp_rep;
1227 struct rte_mp_reply mp_reply = {0};
1228 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1229 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1230 const struct internal_config *internal_conf =
1231 eal_get_internal_configuration();
1234 /* if we're in a primary process, try to open the container */
1235 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1236 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
1237 if (vfio_container_fd < 0) {
1238 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
1239 "error %i (%s)\n", errno, strerror(errno));
1243 /* check VFIO API version */
1244 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
1245 if (ret != VFIO_API_VERSION) {
1247 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
1248 "error %i (%s)\n", errno, strerror(errno));
1250 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
1251 close(vfio_container_fd);
1255 ret = vfio_has_supported_extensions(vfio_container_fd);
1257 RTE_LOG(ERR, EAL, " no supported IOMMU "
1258 "extensions found!\n");
1262 return vfio_container_fd;
1265 * if we're in a secondary process, request container fd from the
1266 * primary process via mp channel
1268 p->req = SOCKET_REQ_CONTAINER;
1269 strcpy(mp_req.name, EAL_VFIO_MP);
1270 mp_req.len_param = sizeof(*p);
1273 vfio_container_fd = -1;
1274 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1275 mp_reply.nb_received == 1) {
1276 mp_rep = &mp_reply.msgs[0];
1277 p = (struct vfio_mp_param *)mp_rep->param;
1278 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1279 vfio_container_fd = mp_rep->fds[0];
1280 free(mp_reply.msgs);
1281 return vfio_container_fd;
1285 free(mp_reply.msgs);
1286 RTE_LOG(ERR, EAL, " cannot request container fd\n");
1291 rte_vfio_get_group_num(const char *sysfs_base,
1292 const char *dev_addr, int *iommu_group_num)
1294 char linkname[PATH_MAX];
1295 char filename[PATH_MAX];
1296 char *tok[16], *group_tok, *end;
1299 memset(linkname, 0, sizeof(linkname));
1300 memset(filename, 0, sizeof(filename));
1302 /* try to find out IOMMU group for this device */
1303 snprintf(linkname, sizeof(linkname),
1304 "%s/%s/iommu_group", sysfs_base, dev_addr);
1306 ret = readlink(linkname, filename, sizeof(filename));
1308 /* if the link doesn't exist, no VFIO for us */
1312 ret = rte_strsplit(filename, sizeof(filename),
1313 tok, RTE_DIM(tok), '/');
1316 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
1320 /* IOMMU group is always the last token */
1322 group_tok = tok[ret - 1];
1324 *iommu_group_num = strtol(group_tok, &end, 10);
1325 if ((end != group_tok && *end != '\0') || errno != 0) {
1326 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
1334 type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1335 size_t len, void *arg)
1337 int *vfio_container_fd = arg;
1342 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1347 type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1350 int *vfio_container_fd = arg;
1352 /* skip external memory that isn't a heap */
1353 if (msl->external && !msl->heap)
1356 /* skip any segments with invalid IOVA addresses */
1357 if (ms->iova == RTE_BAD_IOVA)
1360 /* if IOVA mode is VA, we've already mapped the internal segments */
1361 if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
1364 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1369 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1370 uint64_t len, int do_map)
1372 struct vfio_iommu_type1_dma_map dma_map;
1373 struct vfio_iommu_type1_dma_unmap dma_unmap;
1377 memset(&dma_map, 0, sizeof(dma_map));
1378 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1379 dma_map.vaddr = vaddr;
1381 dma_map.iova = iova;
1382 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1383 VFIO_DMA_MAP_FLAG_WRITE;
1385 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1388 * In case the mapping was already done EEXIST will be
1389 * returned from kernel.
1391 if (errno == EEXIST) {
1393 " Memory segment is already mapped,"
1397 " cannot set up DMA remapping,"
1399 errno, strerror(errno));
1404 memset(&dma_unmap, 0, sizeof(dma_unmap));
1405 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1406 dma_unmap.size = len;
1407 dma_unmap.iova = iova;
1409 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1412 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1413 errno, strerror(errno));
1422 vfio_type1_dma_map(int vfio_container_fd)
1424 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
1425 /* with IOVA as VA mode, we can get away with mapping contiguous
1426 * chunks rather than going page-by-page.
1428 int ret = rte_memseg_contig_walk(type1_map_contig,
1429 &vfio_container_fd);
1432 /* we have to continue the walk because we've skipped the
1433 * external segments during the config walk.
1436 return rte_memseg_walk(type1_map, &vfio_container_fd);
1440 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1441 uint64_t len, int do_map)
1443 struct vfio_iommu_type1_dma_map dma_map;
1444 struct vfio_iommu_type1_dma_unmap dma_unmap;
1446 struct vfio_iommu_spapr_register_memory reg = {
1447 .argsz = sizeof(reg),
1450 reg.vaddr = (uintptr_t) vaddr;
1454 ret = ioctl(vfio_container_fd,
1455 VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®);
1457 RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, "
1458 "error %i (%s)\n", errno, strerror(errno));
1462 memset(&dma_map, 0, sizeof(dma_map));
1463 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1464 dma_map.vaddr = vaddr;
1466 dma_map.iova = iova;
1467 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1468 VFIO_DMA_MAP_FLAG_WRITE;
1470 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1473 * In case the mapping was already done EBUSY will be
1474 * returned from kernel.
1476 if (errno == EBUSY) {
1478 " Memory segment is already mapped,"
1482 " cannot set up DMA remapping,"
1483 " error %i (%s)\n", errno,
1490 memset(&dma_unmap, 0, sizeof(dma_unmap));
1491 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1492 dma_unmap.size = len;
1493 dma_unmap.iova = iova;
1495 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1498 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1499 errno, strerror(errno));
1503 ret = ioctl(vfio_container_fd,
1504 VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®);
1506 RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
1507 errno, strerror(errno));
1516 vfio_spapr_map_walk(const struct rte_memseg_list *msl,
1517 const struct rte_memseg *ms, void *arg)
1519 int *vfio_container_fd = arg;
1521 /* skip external memory that isn't a heap */
1522 if (msl->external && !msl->heap)
1525 /* skip any segments with invalid IOVA addresses */
1526 if (ms->iova == RTE_BAD_IOVA)
1529 return vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,
1534 vfio_spapr_unmap_walk(const struct rte_memseg_list *msl,
1535 const struct rte_memseg *ms, void *arg)
1537 int *vfio_container_fd = arg;
1539 /* skip external memory that isn't a heap */
1540 if (msl->external && !msl->heap)
1543 /* skip any segments with invalid IOVA addresses */
1544 if (ms->iova == RTE_BAD_IOVA)
1547 return vfio_spapr_dma_do_map(*vfio_container_fd, ms->addr_64, ms->iova,
1551 struct spapr_walk_param {
1552 uint64_t window_size;
1553 uint64_t hugepage_sz;
1557 vfio_spapr_window_size_walk(const struct rte_memseg_list *msl,
1558 const struct rte_memseg *ms, void *arg)
1560 struct spapr_walk_param *param = arg;
1561 uint64_t max = ms->iova + ms->len;
1563 /* skip external memory that isn't a heap */
1564 if (msl->external && !msl->heap)
1567 /* skip any segments with invalid IOVA addresses */
1568 if (ms->iova == RTE_BAD_IOVA)
1571 if (max > param->window_size) {
1572 param->hugepage_sz = ms->hugepage_sz;
1573 param->window_size = max;
1580 vfio_spapr_create_new_dma_window(int vfio_container_fd,
1581 struct vfio_iommu_spapr_tce_create *create) {
1582 struct vfio_iommu_spapr_tce_remove remove = {
1583 .argsz = sizeof(remove),
1585 struct vfio_iommu_spapr_tce_info info = {
1586 .argsz = sizeof(info),
1590 /* query spapr iommu info */
1591 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1593 RTE_LOG(ERR, EAL, " cannot get iommu info, "
1594 "error %i (%s)\n", errno, strerror(errno));
1598 /* remove default DMA of 32 bit window */
1599 remove.start_addr = info.dma32_window_start;
1600 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1602 RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
1603 "error %i (%s)\n", errno, strerror(errno));
1607 /* create new DMA window */
1608 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);
1610 #ifdef VFIO_IOMMU_SPAPR_INFO_DDW
1611 /* try possible page_shift and levels for workaround */
1614 for (levels = create->levels + 1;
1615 ret && levels <= info.ddw.levels; levels++) {
1616 create->levels = levels;
1617 ret = ioctl(vfio_container_fd,
1618 VFIO_IOMMU_SPAPR_TCE_CREATE, create);
1622 RTE_LOG(ERR, EAL, " cannot create new DMA window, "
1623 "error %i (%s)\n", errno, strerror(errno));
1628 if (create->start_addr != 0) {
1629 RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
1637 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1638 uint64_t len, int do_map)
1640 struct spapr_walk_param param;
1641 struct vfio_iommu_spapr_tce_create create = {
1642 .argsz = sizeof(create),
1644 struct vfio_config *vfio_cfg;
1645 struct user_mem_maps *user_mem_maps;
1648 vfio_cfg = get_vfio_cfg_by_container_fd(vfio_container_fd);
1649 if (vfio_cfg == NULL) {
1650 RTE_LOG(ERR, EAL, " invalid container fd!\n");
1654 user_mem_maps = &vfio_cfg->mem_maps;
1655 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1657 /* check if window size needs to be adjusted */
1658 memset(¶m, 0, sizeof(param));
1660 /* we're inside a callback so use thread-unsafe version */
1661 if (rte_memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,
1663 RTE_LOG(ERR, EAL, "Could not get window size\n");
1668 /* also check user maps */
1669 for (i = 0; i < user_mem_maps->n_maps; i++) {
1670 uint64_t max = user_mem_maps->maps[i].iova +
1671 user_mem_maps->maps[i].len;
1672 param.window_size = RTE_MAX(param.window_size, max);
1675 /* sPAPR requires window size to be a power of 2 */
1676 create.window_size = rte_align64pow2(param.window_size);
1677 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1681 /* re-create window and remap the entire memory */
1682 if (iova + len > create.window_size) {
1683 /* release all maps before recreating the window */
1684 if (rte_memseg_walk_thread_unsafe(vfio_spapr_unmap_walk,
1685 &vfio_container_fd) < 0) {
1686 RTE_LOG(ERR, EAL, "Could not release DMA maps\n");
1690 /* release all user maps */
1691 for (i = 0; i < user_mem_maps->n_maps; i++) {
1692 struct user_mem_map *map =
1693 &user_mem_maps->maps[i];
1694 if (vfio_spapr_dma_do_map(vfio_container_fd,
1695 map->addr, map->iova, map->len,
1697 RTE_LOG(ERR, EAL, "Could not release user DMA maps\n");
1702 create.window_size = rte_align64pow2(iova + len);
1703 if (vfio_spapr_create_new_dma_window(vfio_container_fd,
1705 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1709 /* we're inside a callback, so use thread-unsafe version
1711 if (rte_memseg_walk_thread_unsafe(vfio_spapr_map_walk,
1712 &vfio_container_fd) < 0) {
1713 RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
1717 /* remap all user maps */
1718 for (i = 0; i < user_mem_maps->n_maps; i++) {
1719 struct user_mem_map *map =
1720 &user_mem_maps->maps[i];
1721 if (vfio_spapr_dma_do_map(vfio_container_fd,
1722 map->addr, map->iova, map->len,
1724 RTE_LOG(ERR, EAL, "Could not recreate user DMA maps\n");
1730 if (vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 1)) {
1731 RTE_LOG(ERR, EAL, "Failed to map DMA\n");
1736 /* for unmap, check if iova within DMA window */
1737 if (iova > create.window_size) {
1738 RTE_LOG(ERR, EAL, "iova beyond DMA window for unmap");
1743 vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);
1746 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1751 vfio_spapr_dma_map(int vfio_container_fd)
1753 struct vfio_iommu_spapr_tce_create create = {
1754 .argsz = sizeof(create),
1756 struct spapr_walk_param param;
1758 memset(¶m, 0, sizeof(param));
1760 /* create DMA window from 0 to max(phys_addr + len) */
1761 rte_memseg_walk(vfio_spapr_window_size_walk, ¶m);
1763 /* sPAPR requires window size to be a power of 2 */
1764 create.window_size = rte_align64pow2(param.window_size);
1765 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1768 if (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {
1769 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1773 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
1774 if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1781 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1783 /* No-IOMMU mode does not need DMA mapping */
1788 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1789 uint64_t __rte_unused vaddr,
1790 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1791 int __rte_unused do_map)
1793 /* No-IOMMU mode does not need DMA mapping */
1798 vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1799 uint64_t len, int do_map)
1801 const struct vfio_iommu_type *t = vfio_cfg->vfio_iommu_type;
1804 RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
1809 if (!t->dma_user_map_func) {
1811 " VFIO custom DMA region maping not supported by IOMMU %s\n",
1813 rte_errno = ENOTSUP;
1817 return t->dma_user_map_func(vfio_cfg->vfio_container_fd, vaddr, iova,
1822 container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1825 struct user_mem_map *new_map;
1826 struct user_mem_maps *user_mem_maps;
1829 user_mem_maps = &vfio_cfg->mem_maps;
1830 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1831 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1832 RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1838 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
1839 /* technically, this will fail if there are currently no devices
1840 * plugged in, even if a device were added later, this mapping
1841 * might have succeeded. however, since we cannot verify if this
1842 * is a valid mapping without having a device attached, consider
1843 * this to be unsupported, because we can't just store any old
1844 * mapping and pollute list of active mappings willy-nilly.
1846 RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1850 /* create new user mem map entry */
1851 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1852 new_map->addr = vaddr;
1853 new_map->iova = iova;
1856 compact_user_maps(user_mem_maps);
1858 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1863 container_dma_unmap(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1866 struct user_mem_map *map, *new_map = NULL;
1867 struct user_mem_maps *user_mem_maps;
1870 user_mem_maps = &vfio_cfg->mem_maps;
1871 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1873 /* find our mapping */
1874 map = find_user_mem_map(user_mem_maps, vaddr, iova, len);
1876 RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1881 if (map->addr != vaddr || map->iova != iova || map->len != len) {
1882 /* we're partially unmapping a previously mapped region, so we
1883 * need to split entry into two.
1885 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1886 RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1891 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1894 /* unmap the entry */
1895 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 0)) {
1896 /* there may not be any devices plugged in, so unmapping will
1897 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1898 * stop us from removing the mapping, as the assumption is we
1899 * won't be needing this memory any more and thus will want to
1900 * prevent it from being remapped again on hotplug. so, only
1901 * fail if we indeed failed to unmap (e.g. if the mapping was
1902 * within our mapped range but had invalid alignment).
1904 if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1905 RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1909 RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1912 /* remove map from the list of active mappings */
1913 if (new_map != NULL) {
1914 adjust_map(map, new_map, vaddr, len);
1916 /* if we've created a new map by splitting, sort everything */
1917 if (!is_null_map(new_map)) {
1918 compact_user_maps(user_mem_maps);
1920 /* we've created a new mapping, but it was unused */
1921 user_mem_maps->n_maps--;
1924 memset(map, 0, sizeof(*map));
1925 compact_user_maps(user_mem_maps);
1926 user_mem_maps->n_maps--;
1930 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1935 rte_vfio_noiommu_is_enabled(void)
1941 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1943 if (errno != ENOENT) {
1944 RTE_LOG(ERR, EAL, " cannot open vfio noiommu file %i (%s)\n",
1945 errno, strerror(errno));
1949 * else the file does not exists
1950 * i.e. noiommu is not enabled
1955 cnt = read(fd, &c, 1);
1958 RTE_LOG(ERR, EAL, " unable to read from vfio noiommu "
1959 "file %i (%s)\n", errno, strerror(errno));
1967 rte_vfio_container_create(void)
1971 /* Find an empty slot to store new vfio config */
1972 for (i = 1; i < VFIO_MAX_CONTAINERS; i++) {
1973 if (vfio_cfgs[i].vfio_container_fd == -1)
1977 if (i == VFIO_MAX_CONTAINERS) {
1978 RTE_LOG(ERR, EAL, "exceed max vfio container limit\n");
1982 vfio_cfgs[i].vfio_container_fd = rte_vfio_get_container_fd();
1983 if (vfio_cfgs[i].vfio_container_fd < 0) {
1984 RTE_LOG(NOTICE, EAL, "fail to create a new container\n");
1988 return vfio_cfgs[i].vfio_container_fd;
1992 rte_vfio_container_destroy(int container_fd)
1994 struct vfio_config *vfio_cfg;
1997 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1998 if (vfio_cfg == NULL) {
1999 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2003 for (i = 0; i < VFIO_MAX_GROUPS; i++)
2004 if (vfio_cfg->vfio_groups[i].group_num != -1)
2005 rte_vfio_container_group_unbind(container_fd,
2006 vfio_cfg->vfio_groups[i].group_num);
2008 close(container_fd);
2009 vfio_cfg->vfio_container_fd = -1;
2010 vfio_cfg->vfio_active_groups = 0;
2011 vfio_cfg->vfio_iommu_type = NULL;
2017 rte_vfio_container_group_bind(int container_fd, int iommu_group_num)
2019 struct vfio_config *vfio_cfg;
2021 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2022 if (vfio_cfg == NULL) {
2023 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2027 return vfio_get_group_fd(vfio_cfg, iommu_group_num);
2031 rte_vfio_container_group_unbind(int container_fd, int iommu_group_num)
2033 struct vfio_config *vfio_cfg;
2034 struct vfio_group *cur_grp = NULL;
2037 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2038 if (vfio_cfg == NULL) {
2039 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2043 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
2044 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num) {
2045 cur_grp = &vfio_cfg->vfio_groups[i];
2050 /* This should not happen */
2051 if (i == VFIO_MAX_GROUPS || cur_grp == NULL) {
2052 RTE_LOG(ERR, EAL, "Specified group number not found\n");
2056 if (cur_grp->fd >= 0 && close(cur_grp->fd) < 0) {
2057 RTE_LOG(ERR, EAL, "Error when closing vfio_group_fd for"
2058 " iommu_group_num %d\n", iommu_group_num);
2061 cur_grp->group_num = -1;
2063 cur_grp->devices = 0;
2064 vfio_cfg->vfio_active_groups--;
2070 rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, uint64_t iova,
2073 struct vfio_config *vfio_cfg;
2080 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2081 if (vfio_cfg == NULL) {
2082 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2086 return container_dma_map(vfio_cfg, vaddr, iova, len);
2090 rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, uint64_t iova,
2093 struct vfio_config *vfio_cfg;
2100 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2101 if (vfio_cfg == NULL) {
2102 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2106 return container_dma_unmap(vfio_cfg, vaddr, iova, len);
2112 rte_vfio_setup_device(__rte_unused const char *sysfs_base,
2113 __rte_unused const char *dev_addr,
2114 __rte_unused int *vfio_dev_fd,
2115 __rte_unused struct vfio_device_info *device_info)
2121 rte_vfio_release_device(__rte_unused const char *sysfs_base,
2122 __rte_unused const char *dev_addr, __rte_unused int fd)
2128 rte_vfio_enable(__rte_unused const char *modname)
2134 rte_vfio_is_enabled(__rte_unused const char *modname)
2140 rte_vfio_noiommu_is_enabled(void)
2146 rte_vfio_clear_group(__rte_unused int vfio_group_fd)
2152 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
2153 __rte_unused const char *dev_addr,
2154 __rte_unused int *iommu_group_num)
2160 rte_vfio_get_container_fd(void)
2166 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
2172 rte_vfio_container_create(void)
2178 rte_vfio_container_destroy(__rte_unused int container_fd)
2184 rte_vfio_container_group_bind(__rte_unused int container_fd,
2185 __rte_unused int iommu_group_num)
2191 rte_vfio_container_group_unbind(__rte_unused int container_fd,
2192 __rte_unused int iommu_group_num)
2198 rte_vfio_container_dma_map(__rte_unused int container_fd,
2199 __rte_unused uint64_t vaddr,
2200 __rte_unused uint64_t iova,
2201 __rte_unused uint64_t len)
2207 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
2208 __rte_unused uint64_t vaddr,
2209 __rte_unused uint64_t iova,
2210 __rte_unused uint64_t len)
2215 #endif /* VFIO_PRESENT */