1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
17 #include "eal_filesystem.h"
19 #include "eal_private.h"
23 #define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
25 /* per-process VFIO config */
26 static struct vfio_config vfio_cfg;
28 static int vfio_type1_dma_map(int);
29 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
30 static int vfio_spapr_dma_map(int);
31 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
32 static int vfio_noiommu_dma_map(int);
33 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
34 static int vfio_dma_mem_map(uint64_t vaddr, uint64_t iova, uint64_t len,
37 /* IOMMU types we support */
38 static const struct vfio_iommu_type iommu_types[] = {
39 /* x86 IOMMU, otherwise known as type 1 */
41 .type_id = RTE_VFIO_TYPE1,
43 .dma_map_func = &vfio_type1_dma_map,
44 .dma_user_map_func = &vfio_type1_dma_mem_map
46 /* ppc64 IOMMU, otherwise known as spapr */
48 .type_id = RTE_VFIO_SPAPR,
50 .dma_map_func = &vfio_spapr_dma_map,
51 .dma_user_map_func = &vfio_spapr_dma_mem_map
55 .type_id = RTE_VFIO_NOIOMMU,
57 .dma_map_func = &vfio_noiommu_dma_map,
58 .dma_user_map_func = &vfio_noiommu_dma_mem_map
62 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
63 * recreate the mappings for DPDK segments, but we cannot do so for memory that
64 * was registered by the user themselves, so we need to store the user mappings
65 * somewhere, to recreate them later.
67 #define VFIO_MAX_USER_MEM_MAPS 256
74 rte_spinlock_recursive_t lock;
76 struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
78 .lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER
81 /* for sPAPR IOMMU, we will need to walk memseg list, but we cannot use
82 * rte_memseg_walk() because by the time we enter callback we will be holding a
83 * write lock, so regular rte-memseg_walk will deadlock. copying the same
84 * iteration code everywhere is not ideal as well. so, use a lockless copy of
88 memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
90 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
91 int i, ms_idx, ret = 0;
93 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
94 struct rte_memseg_list *msl = &mcfg->memsegs[i];
95 const struct rte_memseg *ms;
96 struct rte_fbarray *arr;
98 if (msl->memseg_arr.count == 0)
101 arr = &msl->memseg_arr;
103 ms_idx = rte_fbarray_find_next_used(arr, 0);
104 while (ms_idx >= 0) {
105 ms = rte_fbarray_get(arr, ms_idx);
106 ret = func(msl, ms, arg);
111 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
118 is_null_map(const struct user_mem_map *map)
120 return map->addr == 0 && map->iova == 0 && map->len == 0;
123 /* we may need to merge user mem maps together in case of user mapping/unmapping
124 * chunks of memory, so we'll need a comparator function to sort segments.
127 user_mem_map_cmp(const void *a, const void *b)
129 const struct user_mem_map *umm_a = a;
130 const struct user_mem_map *umm_b = b;
132 /* move null entries to end */
133 if (is_null_map(umm_a))
135 if (is_null_map(umm_b))
138 /* sort by iova first */
139 if (umm_a->iova < umm_b->iova)
141 if (umm_a->iova > umm_b->iova)
144 if (umm_a->addr < umm_b->addr)
146 if (umm_a->addr > umm_b->addr)
149 if (umm_a->len < umm_b->len)
151 if (umm_a->len > umm_b->len)
157 /* adjust user map entry. this may result in shortening of existing map, or in
158 * splitting existing map in two pieces.
161 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
162 uint64_t remove_va_start, uint64_t remove_len)
164 /* if va start is same as start address, we're simply moving start */
165 if (remove_va_start == src->addr) {
166 src->addr += remove_len;
167 src->iova += remove_len;
168 src->len -= remove_len;
169 } else if (remove_va_start + remove_len == src->addr + src->len) {
170 /* we're shrinking mapping from the end */
171 src->len -= remove_len;
173 /* we're blowing a hole in the middle */
174 struct user_mem_map tmp;
175 uint64_t total_len = src->len;
177 /* adjust source segment length */
178 src->len = remove_va_start - src->addr;
180 /* create temporary segment in the middle */
181 tmp.addr = src->addr + src->len;
182 tmp.iova = src->iova + src->len;
183 tmp.len = remove_len;
185 /* populate end segment - this one we will be keeping */
186 end->addr = tmp.addr + tmp.len;
187 end->iova = tmp.iova + tmp.len;
188 end->len = total_len - src->len - tmp.len;
192 /* try merging two maps into one, return 1 if succeeded */
194 merge_map(struct user_mem_map *left, struct user_mem_map *right)
196 if (left->addr + left->len != right->addr)
198 if (left->iova + left->len != right->iova)
201 left->len += right->len;
203 memset(right, 0, sizeof(*right));
208 static struct user_mem_map *
209 find_user_mem_map(uint64_t addr, uint64_t iova, uint64_t len)
211 uint64_t va_end = addr + len;
212 uint64_t iova_end = iova + len;
215 for (i = 0; i < user_mem_maps.n_maps; i++) {
216 struct user_mem_map *map = &user_mem_maps.maps[i];
217 uint64_t map_va_end = map->addr + map->len;
218 uint64_t map_iova_end = map->iova + map->len;
221 if (addr < map->addr || addr >= map_va_end)
223 /* check if VA end is within boundaries */
224 if (va_end <= map->addr || va_end > map_va_end)
227 /* check start IOVA */
228 if (iova < map->iova || iova >= map_iova_end)
230 /* check if IOVA end is within boundaries */
231 if (iova_end <= map->iova || iova_end > map_iova_end)
234 /* we've found our map */
240 /* this will sort all user maps, and merge/compact any adjacent maps */
242 compact_user_maps(void)
244 int i, n_merged, cur_idx;
246 qsort(user_mem_maps.maps, user_mem_maps.n_maps,
247 sizeof(user_mem_maps.maps[0]), user_mem_map_cmp);
249 /* we'll go over the list backwards when merging */
251 for (i = user_mem_maps.n_maps - 2; i >= 0; i--) {
252 struct user_mem_map *l, *r;
254 l = &user_mem_maps.maps[i];
255 r = &user_mem_maps.maps[i + 1];
257 if (is_null_map(l) || is_null_map(r))
264 /* the entries are still sorted, but now they have holes in them, so
265 * walk through the list and remove the holes
269 for (i = 0; i < user_mem_maps.n_maps; i++) {
270 if (!is_null_map(&user_mem_maps.maps[i])) {
271 struct user_mem_map *src, *dst;
273 src = &user_mem_maps.maps[i];
274 dst = &user_mem_maps.maps[cur_idx++];
277 memcpy(dst, src, sizeof(*src));
278 memset(src, 0, sizeof(*src));
282 user_mem_maps.n_maps = cur_idx;
287 rte_vfio_get_group_fd(int iommu_group_num)
291 char filename[PATH_MAX];
292 struct vfio_group *cur_grp;
293 struct rte_mp_msg mp_req, *mp_rep;
294 struct rte_mp_reply mp_reply;
295 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
296 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
298 /* check if we already have the group descriptor open */
299 for (i = 0; i < VFIO_MAX_GROUPS; i++)
300 if (vfio_cfg.vfio_groups[i].group_num == iommu_group_num)
301 return vfio_cfg.vfio_groups[i].fd;
303 /* Lets see first if there is room for a new group */
304 if (vfio_cfg.vfio_active_groups == VFIO_MAX_GROUPS) {
305 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
309 /* Now lets get an index for the new group */
310 for (i = 0; i < VFIO_MAX_GROUPS; i++)
311 if (vfio_cfg.vfio_groups[i].group_num == -1) {
312 cur_grp = &vfio_cfg.vfio_groups[i];
316 /* This should not happen */
317 if (i == VFIO_MAX_GROUPS) {
318 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
321 /* if primary, try to open the group */
322 if (internal_config.process_type == RTE_PROC_PRIMARY) {
323 /* try regular group format */
324 snprintf(filename, sizeof(filename),
325 VFIO_GROUP_FMT, iommu_group_num);
326 vfio_group_fd = open(filename, O_RDWR);
327 if (vfio_group_fd < 0) {
328 /* if file not found, it's not an error */
329 if (errno != ENOENT) {
330 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
335 /* special case: try no-IOMMU path as well */
336 snprintf(filename, sizeof(filename),
337 VFIO_NOIOMMU_GROUP_FMT,
339 vfio_group_fd = open(filename, O_RDWR);
340 if (vfio_group_fd < 0) {
341 if (errno != ENOENT) {
342 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
348 /* noiommu group found */
351 cur_grp->group_num = iommu_group_num;
352 cur_grp->fd = vfio_group_fd;
353 vfio_cfg.vfio_active_groups++;
354 return vfio_group_fd;
356 /* if we're in a secondary process, request group fd from the primary
357 * process via mp channel.
359 p->req = SOCKET_REQ_GROUP;
360 p->group_num = iommu_group_num;
361 strcpy(mp_req.name, EAL_VFIO_MP);
362 mp_req.len_param = sizeof(*p);
366 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
367 mp_reply.nb_received == 1) {
368 mp_rep = &mp_reply.msgs[0];
369 p = (struct vfio_mp_param *)mp_rep->param;
370 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
371 cur_grp->group_num = iommu_group_num;
372 vfio_group_fd = mp_rep->fds[0];
373 cur_grp->fd = vfio_group_fd;
374 vfio_cfg.vfio_active_groups++;
375 } else if (p->result == SOCKET_NO_FD) {
376 RTE_LOG(ERR, EAL, " bad VFIO group fd\n");
382 if (vfio_group_fd < 0)
383 RTE_LOG(ERR, EAL, " cannot request group fd\n");
384 return vfio_group_fd;
389 get_vfio_group_idx(int vfio_group_fd)
392 for (i = 0; i < VFIO_MAX_GROUPS; i++)
393 if (vfio_cfg.vfio_groups[i].fd == vfio_group_fd)
399 vfio_group_device_get(int vfio_group_fd)
403 i = get_vfio_group_idx(vfio_group_fd);
404 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
405 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
407 vfio_cfg.vfio_groups[i].devices++;
411 vfio_group_device_put(int vfio_group_fd)
415 i = get_vfio_group_idx(vfio_group_fd);
416 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
417 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
419 vfio_cfg.vfio_groups[i].devices--;
423 vfio_group_device_count(int vfio_group_fd)
427 i = get_vfio_group_idx(vfio_group_fd);
428 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
429 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
433 return vfio_cfg.vfio_groups[i].devices;
437 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len)
439 struct rte_memseg_list *msl;
440 struct rte_memseg *ms;
443 msl = rte_mem_virt2memseg_list(addr);
445 /* for IOVA as VA mode, no need to care for IOVA addresses */
446 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
447 uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
448 if (type == RTE_MEM_EVENT_ALLOC)
449 vfio_dma_mem_map(vfio_va, vfio_va, len, 1);
451 vfio_dma_mem_map(vfio_va, vfio_va, len, 0);
455 /* memsegs are contiguous in memory */
456 ms = rte_mem_virt2memseg(addr, msl);
457 while (cur_len < len) {
458 if (type == RTE_MEM_EVENT_ALLOC)
459 vfio_dma_mem_map(ms->addr_64, ms->iova, ms->len, 1);
461 vfio_dma_mem_map(ms->addr_64, ms->iova, ms->len, 0);
469 rte_vfio_clear_group(int vfio_group_fd)
472 struct rte_mp_msg mp_req, *mp_rep;
473 struct rte_mp_reply mp_reply;
474 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
475 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
477 if (internal_config.process_type == RTE_PROC_PRIMARY) {
479 i = get_vfio_group_idx(vfio_group_fd);
482 vfio_cfg.vfio_groups[i].group_num = -1;
483 vfio_cfg.vfio_groups[i].fd = -1;
484 vfio_cfg.vfio_groups[i].devices = 0;
485 vfio_cfg.vfio_active_groups--;
489 p->req = SOCKET_CLR_GROUP;
490 p->group_num = vfio_group_fd;
491 strcpy(mp_req.name, EAL_VFIO_MP);
492 mp_req.len_param = sizeof(*p);
495 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
496 mp_reply.nb_received == 1) {
497 mp_rep = &mp_reply.msgs[0];
498 p = (struct vfio_mp_param *)mp_rep->param;
499 if (p->result == SOCKET_OK) {
502 } else if (p->result == SOCKET_NO_FD)
503 RTE_LOG(ERR, EAL, " BAD VFIO group fd!\n");
505 RTE_LOG(ERR, EAL, " no such VFIO group fd!\n");
514 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
515 int *vfio_dev_fd, struct vfio_device_info *device_info)
517 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
518 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
519 struct vfio_group_status group_status = {
520 .argsz = sizeof(group_status)
526 /* get group number */
527 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
529 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
534 /* if negative, something failed */
538 /* get the actual group fd */
539 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
540 if (vfio_group_fd < 0)
543 /* if group_fd == 0, that means the device isn't managed by VFIO */
544 if (vfio_group_fd == 0) {
545 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
551 * at this point, we know that this group is viable (meaning, all devices
552 * are either bound to VFIO or not bound to anything)
555 /* check if the group is viable */
556 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
558 RTE_LOG(ERR, EAL, " %s cannot get group status, "
559 "error %i (%s)\n", dev_addr, errno, strerror(errno));
560 close(vfio_group_fd);
561 rte_vfio_clear_group(vfio_group_fd);
563 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
564 RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
565 close(vfio_group_fd);
566 rte_vfio_clear_group(vfio_group_fd);
570 /* check if group does not have a container yet */
571 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
573 /* add group to a container */
574 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
575 &vfio_cfg.vfio_container_fd);
577 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
578 "error %i (%s)\n", dev_addr, errno, strerror(errno));
579 close(vfio_group_fd);
580 rte_vfio_clear_group(vfio_group_fd);
585 * pick an IOMMU type and set up DMA mappings for container
587 * needs to be done only once, only when first group is
588 * assigned to a container and only in primary process.
589 * Note this can happen several times with the hotplug
592 if (internal_config.process_type == RTE_PROC_PRIMARY &&
593 vfio_cfg.vfio_active_groups == 1 &&
594 vfio_group_device_count(vfio_group_fd) == 0) {
595 const struct vfio_iommu_type *t;
597 /* select an IOMMU type which we will be using */
598 t = vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
601 " %s failed to select IOMMU type\n",
603 close(vfio_group_fd);
604 rte_vfio_clear_group(vfio_group_fd);
607 /* lock memory hotplug before mapping and release it
608 * after registering callback, to prevent races
610 rte_rwlock_read_lock(mem_lock);
611 ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
614 " %s DMA remapping failed, error %i (%s)\n",
615 dev_addr, errno, strerror(errno));
616 close(vfio_group_fd);
617 rte_vfio_clear_group(vfio_group_fd);
618 rte_rwlock_read_unlock(mem_lock);
622 vfio_cfg.vfio_iommu_type = t;
624 /* re-map all user-mapped segments */
625 rte_spinlock_recursive_lock(&user_mem_maps.lock);
627 /* this IOMMU type may not support DMA mapping, but
628 * if we have mappings in the list - that means we have
629 * previously mapped something successfully, so we can
630 * be sure that DMA mapping is supported.
632 for (i = 0; i < user_mem_maps.n_maps; i++) {
633 struct user_mem_map *map;
634 map = &user_mem_maps.maps[i];
636 ret = t->dma_user_map_func(
637 vfio_cfg.vfio_container_fd,
638 map->addr, map->iova, map->len,
641 RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
643 "iova: 0x%" PRIx64 " "
644 "len: 0x%" PRIu64 "\n",
645 map->addr, map->iova,
647 rte_spinlock_recursive_unlock(
648 &user_mem_maps.lock);
649 rte_rwlock_read_unlock(mem_lock);
653 rte_spinlock_recursive_unlock(&user_mem_maps.lock);
655 /* register callback for mem events */
656 ret = rte_mem_event_callback_register(
657 VFIO_MEM_EVENT_CLB_NAME,
658 vfio_mem_event_callback);
659 /* unlock memory hotplug */
660 rte_rwlock_read_unlock(mem_lock);
662 if (ret && rte_errno != ENOTSUP) {
663 RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
667 RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
669 RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
673 /* get a file descriptor for the device */
674 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
675 if (*vfio_dev_fd < 0) {
676 /* if we cannot get a device fd, this implies a problem with
677 * the VFIO group or the container not having IOMMU configured.
680 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
682 close(vfio_group_fd);
683 rte_vfio_clear_group(vfio_group_fd);
687 /* test and setup the device */
688 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
690 RTE_LOG(ERR, EAL, " %s cannot get device info, "
691 "error %i (%s)\n", dev_addr, errno,
694 close(vfio_group_fd);
695 rte_vfio_clear_group(vfio_group_fd);
698 vfio_group_device_get(vfio_group_fd);
704 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
707 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
708 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
709 struct vfio_group_status group_status = {
710 .argsz = sizeof(group_status)
716 /* we don't want any DMA mapping messages to come while we're detaching
717 * VFIO device, because this might be the last device and we might need
718 * to unregister the callback.
720 rte_rwlock_read_lock(mem_lock);
722 /* get group number */
723 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
725 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
727 /* This is an error at this point. */
732 /* get the actual group fd */
733 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
734 if (vfio_group_fd <= 0) {
735 RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
741 /* At this point we got an active group. Closing it will make the
742 * container detachment. If this is the last active group, VFIO kernel
743 * code will unset the container and the IOMMU mappings.
746 /* Closing a device */
747 if (close(vfio_dev_fd) < 0) {
748 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
754 /* An VFIO group can have several devices attached. Just when there is
755 * no devices remaining should the group be closed.
757 vfio_group_device_put(vfio_group_fd);
758 if (!vfio_group_device_count(vfio_group_fd)) {
760 if (close(vfio_group_fd) < 0) {
761 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
767 if (rte_vfio_clear_group(vfio_group_fd) < 0) {
768 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
775 /* if there are no active device groups, unregister the callback to
776 * avoid spurious attempts to map/unmap memory from VFIO.
778 if (vfio_cfg.vfio_active_groups == 0)
779 rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME);
785 rte_rwlock_read_unlock(mem_lock);
790 rte_vfio_enable(const char *modname)
792 /* initialize group list */
796 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
797 vfio_cfg.vfio_groups[i].fd = -1;
798 vfio_cfg.vfio_groups[i].group_num = -1;
799 vfio_cfg.vfio_groups[i].devices = 0;
802 /* inform the user that we are probing for VFIO */
803 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
805 /* check if vfio module is loaded */
806 vfio_available = rte_eal_check_module(modname);
808 /* return error directly */
809 if (vfio_available == -1) {
810 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
814 /* return 0 if VFIO modules not loaded */
815 if (vfio_available == 0) {
816 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
817 "skipping VFIO support...\n");
821 vfio_cfg.vfio_container_fd = rte_vfio_get_container_fd();
823 /* check if we have VFIO driver enabled */
824 if (vfio_cfg.vfio_container_fd != -1) {
825 RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
826 vfio_cfg.vfio_enabled = 1;
828 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
835 rte_vfio_is_enabled(const char *modname)
837 const int mod_available = rte_eal_check_module(modname) > 0;
838 return vfio_cfg.vfio_enabled && mod_available;
841 const struct vfio_iommu_type *
842 vfio_set_iommu_type(int vfio_container_fd)
845 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
846 const struct vfio_iommu_type *t = &iommu_types[idx];
848 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
851 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
852 t->type_id, t->name);
855 /* not an error, there may be more supported IOMMU types */
856 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
857 "error %i (%s)\n", t->type_id, t->name, errno,
860 /* if we didn't find a suitable IOMMU type, fail */
865 vfio_has_supported_extensions(int vfio_container_fd)
868 unsigned idx, n_extensions = 0;
869 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
870 const struct vfio_iommu_type *t = &iommu_types[idx];
872 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
875 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
876 "error %i (%s)\n", errno,
878 close(vfio_container_fd);
880 } else if (ret == 1) {
881 /* we found a supported extension */
884 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
886 ret ? "supported" : "not supported");
889 /* if we didn't find any supported IOMMU types, fail */
891 close(vfio_container_fd);
899 rte_vfio_get_container_fd(void)
901 int ret, vfio_container_fd;
902 struct rte_mp_msg mp_req, *mp_rep;
903 struct rte_mp_reply mp_reply;
904 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
905 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
908 /* if we're in a primary process, try to open the container */
909 if (internal_config.process_type == RTE_PROC_PRIMARY) {
910 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
911 if (vfio_container_fd < 0) {
912 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
913 "error %i (%s)\n", errno, strerror(errno));
917 /* check VFIO API version */
918 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
919 if (ret != VFIO_API_VERSION) {
921 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
922 "error %i (%s)\n", errno, strerror(errno));
924 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
925 close(vfio_container_fd);
929 ret = vfio_has_supported_extensions(vfio_container_fd);
931 RTE_LOG(ERR, EAL, " no supported IOMMU "
932 "extensions found!\n");
936 return vfio_container_fd;
939 * if we're in a secondary process, request container fd from the
940 * primary process via mp channel
942 p->req = SOCKET_REQ_CONTAINER;
943 strcpy(mp_req.name, EAL_VFIO_MP);
944 mp_req.len_param = sizeof(*p);
947 vfio_container_fd = -1;
948 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
949 mp_reply.nb_received == 1) {
950 mp_rep = &mp_reply.msgs[0];
951 p = (struct vfio_mp_param *)mp_rep->param;
952 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
954 return mp_rep->fds[0];
959 RTE_LOG(ERR, EAL, " cannot request container fd\n");
964 rte_vfio_get_group_num(const char *sysfs_base,
965 const char *dev_addr, int *iommu_group_num)
967 char linkname[PATH_MAX];
968 char filename[PATH_MAX];
969 char *tok[16], *group_tok, *end;
972 memset(linkname, 0, sizeof(linkname));
973 memset(filename, 0, sizeof(filename));
975 /* try to find out IOMMU group for this device */
976 snprintf(linkname, sizeof(linkname),
977 "%s/%s/iommu_group", sysfs_base, dev_addr);
979 ret = readlink(linkname, filename, sizeof(filename));
981 /* if the link doesn't exist, no VFIO for us */
985 ret = rte_strsplit(filename, sizeof(filename),
986 tok, RTE_DIM(tok), '/');
989 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
993 /* IOMMU group is always the last token */
995 group_tok = tok[ret - 1];
997 *iommu_group_num = strtol(group_tok, &end, 10);
998 if ((end != group_tok && *end != '\0') || errno != 0) {
999 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
1007 type1_map(const struct rte_memseg_list *msl __rte_unused,
1008 const struct rte_memseg *ms, void *arg)
1010 int *vfio_container_fd = arg;
1012 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1017 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1018 uint64_t len, int do_map)
1020 struct vfio_iommu_type1_dma_map dma_map;
1021 struct vfio_iommu_type1_dma_unmap dma_unmap;
1025 memset(&dma_map, 0, sizeof(dma_map));
1026 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1027 dma_map.vaddr = vaddr;
1029 dma_map.iova = iova;
1030 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1031 VFIO_DMA_MAP_FLAG_WRITE;
1033 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1035 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
1036 errno, strerror(errno));
1040 memset(&dma_unmap, 0, sizeof(dma_unmap));
1041 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1042 dma_unmap.size = len;
1043 dma_unmap.iova = iova;
1045 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1048 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1049 errno, strerror(errno));
1058 vfio_type1_dma_map(int vfio_container_fd)
1060 return rte_memseg_walk(type1_map, &vfio_container_fd);
1064 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1065 uint64_t len, int do_map)
1067 struct vfio_iommu_type1_dma_map dma_map;
1068 struct vfio_iommu_type1_dma_unmap dma_unmap;
1072 memset(&dma_map, 0, sizeof(dma_map));
1073 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1074 dma_map.vaddr = vaddr;
1076 dma_map.iova = iova;
1077 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1078 VFIO_DMA_MAP_FLAG_WRITE;
1080 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1082 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
1083 errno, strerror(errno));
1088 struct vfio_iommu_spapr_register_memory reg = {
1089 .argsz = sizeof(reg),
1092 reg.vaddr = (uintptr_t) vaddr;
1095 ret = ioctl(vfio_container_fd,
1096 VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®);
1098 RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
1099 errno, strerror(errno));
1103 memset(&dma_unmap, 0, sizeof(dma_unmap));
1104 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1105 dma_unmap.size = len;
1106 dma_unmap.iova = iova;
1108 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1111 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1112 errno, strerror(errno));
1121 vfio_spapr_map_walk(const struct rte_memseg_list *msl __rte_unused,
1122 const struct rte_memseg *ms, void *arg)
1124 int *vfio_container_fd = arg;
1126 return vfio_spapr_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1130 struct spapr_walk_param {
1131 uint64_t window_size;
1132 uint64_t hugepage_sz;
1135 vfio_spapr_window_size_walk(const struct rte_memseg_list *msl __rte_unused,
1136 const struct rte_memseg *ms, void *arg)
1138 struct spapr_walk_param *param = arg;
1139 uint64_t max = ms->iova + ms->len;
1141 if (max > param->window_size) {
1142 param->hugepage_sz = ms->hugepage_sz;
1143 param->window_size = max;
1150 vfio_spapr_create_new_dma_window(int vfio_container_fd,
1151 struct vfio_iommu_spapr_tce_create *create) {
1152 struct vfio_iommu_spapr_tce_remove remove = {
1153 .argsz = sizeof(remove),
1155 struct vfio_iommu_spapr_tce_info info = {
1156 .argsz = sizeof(info),
1160 /* query spapr iommu info */
1161 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1163 RTE_LOG(ERR, EAL, " cannot get iommu info, "
1164 "error %i (%s)\n", errno, strerror(errno));
1168 /* remove default DMA of 32 bit window */
1169 remove.start_addr = info.dma32_window_start;
1170 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1172 RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
1173 "error %i (%s)\n", errno, strerror(errno));
1177 /* create new DMA window */
1178 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);
1180 RTE_LOG(ERR, EAL, " cannot create new DMA window, "
1181 "error %i (%s)\n", errno, strerror(errno));
1185 if (create->start_addr != 0) {
1186 RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
1194 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1195 uint64_t len, int do_map)
1197 struct spapr_walk_param param;
1198 struct vfio_iommu_spapr_tce_create create = {
1199 .argsz = sizeof(create),
1203 rte_spinlock_recursive_lock(&user_mem_maps.lock);
1205 /* check if window size needs to be adjusted */
1206 memset(¶m, 0, sizeof(param));
1208 if (memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,
1210 RTE_LOG(ERR, EAL, "Could not get window size\n");
1215 /* also check user maps */
1216 for (i = 0; i < user_mem_maps.n_maps; i++) {
1217 uint64_t max = user_mem_maps.maps[i].iova +
1218 user_mem_maps.maps[i].len;
1219 create.window_size = RTE_MAX(create.window_size, max);
1222 /* sPAPR requires window size to be a power of 2 */
1223 create.window_size = rte_align64pow2(param.window_size);
1224 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1229 /* re-create window and remap the entire memory */
1230 if (iova > create.window_size) {
1231 if (vfio_spapr_create_new_dma_window(vfio_container_fd,
1233 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1237 if (memseg_walk_thread_unsafe(vfio_spapr_map_walk,
1238 &vfio_container_fd) < 0) {
1239 RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
1243 /* remap all user maps */
1244 for (i = 0; i < user_mem_maps.n_maps; i++) {
1245 struct user_mem_map *map =
1246 &user_mem_maps.maps[i];
1247 if (vfio_spapr_dma_do_map(vfio_container_fd,
1248 map->addr, map->iova, map->len,
1250 RTE_LOG(ERR, EAL, "Could not recreate user DMA maps\n");
1257 /* now that we've remapped all of the memory that was present
1258 * before, map the segment that we were requested to map.
1260 * however, if we were called by the callback, the memory we
1261 * were called with was already in the memseg list, so previous
1262 * mapping should've mapped that segment already.
1264 * virt2memseg_list is a relatively cheap check, so use that. if
1265 * memory is within any memseg list, it's a memseg, so it's
1268 addr = (void *)(uintptr_t)vaddr;
1269 if (rte_mem_virt2memseg_list(addr) == NULL &&
1270 vfio_spapr_dma_do_map(vfio_container_fd,
1271 vaddr, iova, len, 1) < 0) {
1272 RTE_LOG(ERR, EAL, "Could not map segment\n");
1277 /* for unmap, check if iova within DMA window */
1278 if (iova > create.window_size) {
1279 RTE_LOG(ERR, EAL, "iova beyond DMA window for unmap");
1284 vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);
1287 rte_spinlock_recursive_unlock(&user_mem_maps.lock);
1292 vfio_spapr_dma_map(int vfio_container_fd)
1294 struct vfio_iommu_spapr_tce_create create = {
1295 .argsz = sizeof(create),
1297 struct spapr_walk_param param;
1299 memset(¶m, 0, sizeof(param));
1301 /* create DMA window from 0 to max(phys_addr + len) */
1302 rte_memseg_walk(vfio_spapr_window_size_walk, ¶m);
1304 /* sPAPR requires window size to be a power of 2 */
1305 create.window_size = rte_align64pow2(param.window_size);
1306 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1309 if (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {
1310 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1314 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
1315 if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1322 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1324 /* No-IOMMU mode does not need DMA mapping */
1329 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1330 uint64_t __rte_unused vaddr,
1331 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1332 int __rte_unused do_map)
1334 /* No-IOMMU mode does not need DMA mapping */
1339 vfio_dma_mem_map(uint64_t vaddr, uint64_t iova, uint64_t len, int do_map)
1341 const struct vfio_iommu_type *t = vfio_cfg.vfio_iommu_type;
1344 RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
1349 if (!t->dma_user_map_func) {
1351 " VFIO custom DMA region maping not supported by IOMMU %s\n",
1353 rte_errno = ENOTSUP;
1357 return t->dma_user_map_func(vfio_cfg.vfio_container_fd, vaddr, iova,
1361 int __rte_experimental
1362 rte_vfio_dma_map(uint64_t vaddr, uint64_t iova, uint64_t len)
1364 struct user_mem_map *new_map;
1372 rte_spinlock_recursive_lock(&user_mem_maps.lock);
1373 if (user_mem_maps.n_maps == VFIO_MAX_USER_MEM_MAPS) {
1374 RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1380 if (vfio_dma_mem_map(vaddr, iova, len, 1)) {
1381 /* technically, this will fail if there are currently no devices
1382 * plugged in, even if a device were added later, this mapping
1383 * might have succeeded. however, since we cannot verify if this
1384 * is a valid mapping without having a device attached, consider
1385 * this to be unsupported, because we can't just store any old
1386 * mapping and pollute list of active mappings willy-nilly.
1388 RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1392 /* create new user mem map entry */
1393 new_map = &user_mem_maps.maps[user_mem_maps.n_maps++];
1394 new_map->addr = vaddr;
1395 new_map->iova = iova;
1398 compact_user_maps();
1400 rte_spinlock_recursive_unlock(&user_mem_maps.lock);
1404 int __rte_experimental
1405 rte_vfio_dma_unmap(uint64_t vaddr, uint64_t iova, uint64_t len)
1407 struct user_mem_map *map, *new_map = NULL;
1415 rte_spinlock_recursive_lock(&user_mem_maps.lock);
1417 /* find our mapping */
1418 map = find_user_mem_map(vaddr, iova, len);
1420 RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1425 if (map->addr != vaddr || map->iova != iova || map->len != len) {
1426 /* we're partially unmapping a previously mapped region, so we
1427 * need to split entry into two.
1429 if (user_mem_maps.n_maps == VFIO_MAX_USER_MEM_MAPS) {
1430 RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1435 new_map = &user_mem_maps.maps[user_mem_maps.n_maps++];
1438 /* unmap the entry */
1439 if (vfio_dma_mem_map(vaddr, iova, len, 0)) {
1440 /* there may not be any devices plugged in, so unmapping will
1441 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1442 * stop us from removing the mapping, as the assumption is we
1443 * won't be needing this memory any more and thus will want to
1444 * prevent it from being remapped again on hotplug. so, only
1445 * fail if we indeed failed to unmap (e.g. if the mapping was
1446 * within our mapped range but had invalid alignment).
1448 if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1449 RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1453 RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1456 /* remove map from the list of active mappings */
1457 if (new_map != NULL) {
1458 adjust_map(map, new_map, vaddr, len);
1460 /* if we've created a new map by splitting, sort everything */
1461 if (!is_null_map(new_map)) {
1462 compact_user_maps();
1464 /* we've created a new mapping, but it was unused */
1465 user_mem_maps.n_maps--;
1468 memset(map, 0, sizeof(*map));
1469 compact_user_maps();
1470 user_mem_maps.n_maps--;
1474 rte_spinlock_recursive_unlock(&user_mem_maps.lock);
1479 rte_vfio_noiommu_is_enabled(void)
1485 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1487 if (errno != ENOENT) {
1488 RTE_LOG(ERR, EAL, " cannot open vfio noiommu file %i (%s)\n",
1489 errno, strerror(errno));
1493 * else the file does not exists
1494 * i.e. noiommu is not enabled
1499 cnt = read(fd, &c, 1);
1502 RTE_LOG(ERR, EAL, " unable to read from vfio noiommu "
1503 "file %i (%s)\n", errno, strerror(errno));
1512 int __rte_experimental
1513 rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
1514 __rte_unused uint64_t len)
1519 int __rte_experimental
1520 rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
1521 __rte_unused uint64_t len)
1527 rte_vfio_setup_device(__rte_unused const char *sysfs_base,
1528 __rte_unused const char *dev_addr,
1529 __rte_unused int *vfio_dev_fd,
1530 __rte_unused struct vfio_device_info *device_info)
1536 rte_vfio_release_device(__rte_unused const char *sysfs_base,
1537 __rte_unused const char *dev_addr, __rte_unused int fd)
1543 rte_vfio_enable(__rte_unused const char *modname)
1549 rte_vfio_is_enabled(__rte_unused const char *modname)
1555 rte_vfio_noiommu_is_enabled(void)
1561 rte_vfio_clear_group(__rte_unused int vfio_group_fd)
1566 int __rte_experimental
1567 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
1568 __rte_unused const char *dev_addr,
1569 __rte_unused int *iommu_group_num)
1574 int __rte_experimental
1575 rte_vfio_get_container_fd(void)
1580 int __rte_experimental
1581 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
1586 #endif /* VFIO_PRESENT */