1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
17 #include "eal_filesystem.h"
19 #include "eal_private.h"
23 #define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
25 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
26 * recreate the mappings for DPDK segments, but we cannot do so for memory that
27 * was registered by the user themselves, so we need to store the user mappings
28 * somewhere, to recreate them later.
30 #define VFIO_MAX_USER_MEM_MAPS 256
37 struct user_mem_maps {
38 rte_spinlock_recursive_t lock;
40 struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
45 int vfio_container_fd;
46 int vfio_active_groups;
47 const struct vfio_iommu_type *vfio_iommu_type;
48 struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
49 struct user_mem_maps mem_maps;
52 /* per-process VFIO config */
53 static struct vfio_config vfio_cfgs[VFIO_MAX_CONTAINERS];
54 static struct vfio_config *default_vfio_cfg = &vfio_cfgs[0];
56 static int vfio_type1_dma_map(int);
57 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
58 static int vfio_spapr_dma_map(int);
59 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
60 static int vfio_noiommu_dma_map(int);
61 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
62 static int vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr,
63 uint64_t iova, uint64_t len, int do_map);
65 /* IOMMU types we support */
66 static const struct vfio_iommu_type iommu_types[] = {
67 /* x86 IOMMU, otherwise known as type 1 */
69 .type_id = RTE_VFIO_TYPE1,
71 .dma_map_func = &vfio_type1_dma_map,
72 .dma_user_map_func = &vfio_type1_dma_mem_map
74 /* ppc64 IOMMU, otherwise known as spapr */
76 .type_id = RTE_VFIO_SPAPR,
78 .dma_map_func = &vfio_spapr_dma_map,
79 .dma_user_map_func = &vfio_spapr_dma_mem_map
83 .type_id = RTE_VFIO_NOIOMMU,
85 .dma_map_func = &vfio_noiommu_dma_map,
86 .dma_user_map_func = &vfio_noiommu_dma_mem_map
90 /* for sPAPR IOMMU, we will need to walk memseg list, but we cannot use
91 * rte_memseg_walk() because by the time we enter callback we will be holding a
92 * write lock, so regular rte-memseg_walk will deadlock. copying the same
93 * iteration code everywhere is not ideal as well. so, use a lockless copy of
97 memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
99 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
100 int i, ms_idx, ret = 0;
102 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
103 struct rte_memseg_list *msl = &mcfg->memsegs[i];
104 const struct rte_memseg *ms;
105 struct rte_fbarray *arr;
107 if (msl->memseg_arr.count == 0)
110 arr = &msl->memseg_arr;
112 ms_idx = rte_fbarray_find_next_used(arr, 0);
113 while (ms_idx >= 0) {
114 ms = rte_fbarray_get(arr, ms_idx);
115 ret = func(msl, ms, arg);
120 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
127 is_null_map(const struct user_mem_map *map)
129 return map->addr == 0 && map->iova == 0 && map->len == 0;
132 /* we may need to merge user mem maps together in case of user mapping/unmapping
133 * chunks of memory, so we'll need a comparator function to sort segments.
136 user_mem_map_cmp(const void *a, const void *b)
138 const struct user_mem_map *umm_a = a;
139 const struct user_mem_map *umm_b = b;
141 /* move null entries to end */
142 if (is_null_map(umm_a))
144 if (is_null_map(umm_b))
147 /* sort by iova first */
148 if (umm_a->iova < umm_b->iova)
150 if (umm_a->iova > umm_b->iova)
153 if (umm_a->addr < umm_b->addr)
155 if (umm_a->addr > umm_b->addr)
158 if (umm_a->len < umm_b->len)
160 if (umm_a->len > umm_b->len)
166 /* adjust user map entry. this may result in shortening of existing map, or in
167 * splitting existing map in two pieces.
170 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
171 uint64_t remove_va_start, uint64_t remove_len)
173 /* if va start is same as start address, we're simply moving start */
174 if (remove_va_start == src->addr) {
175 src->addr += remove_len;
176 src->iova += remove_len;
177 src->len -= remove_len;
178 } else if (remove_va_start + remove_len == src->addr + src->len) {
179 /* we're shrinking mapping from the end */
180 src->len -= remove_len;
182 /* we're blowing a hole in the middle */
183 struct user_mem_map tmp;
184 uint64_t total_len = src->len;
186 /* adjust source segment length */
187 src->len = remove_va_start - src->addr;
189 /* create temporary segment in the middle */
190 tmp.addr = src->addr + src->len;
191 tmp.iova = src->iova + src->len;
192 tmp.len = remove_len;
194 /* populate end segment - this one we will be keeping */
195 end->addr = tmp.addr + tmp.len;
196 end->iova = tmp.iova + tmp.len;
197 end->len = total_len - src->len - tmp.len;
201 /* try merging two maps into one, return 1 if succeeded */
203 merge_map(struct user_mem_map *left, struct user_mem_map *right)
205 if (left->addr + left->len != right->addr)
207 if (left->iova + left->len != right->iova)
210 left->len += right->len;
212 memset(right, 0, sizeof(*right));
217 static struct user_mem_map *
218 find_user_mem_map(struct user_mem_maps *user_mem_maps, uint64_t addr,
219 uint64_t iova, uint64_t len)
221 uint64_t va_end = addr + len;
222 uint64_t iova_end = iova + len;
225 for (i = 0; i < user_mem_maps->n_maps; i++) {
226 struct user_mem_map *map = &user_mem_maps->maps[i];
227 uint64_t map_va_end = map->addr + map->len;
228 uint64_t map_iova_end = map->iova + map->len;
231 if (addr < map->addr || addr >= map_va_end)
233 /* check if VA end is within boundaries */
234 if (va_end <= map->addr || va_end > map_va_end)
237 /* check start IOVA */
238 if (iova < map->iova || iova >= map_iova_end)
240 /* check if IOVA end is within boundaries */
241 if (iova_end <= map->iova || iova_end > map_iova_end)
244 /* we've found our map */
250 /* this will sort all user maps, and merge/compact any adjacent maps */
252 compact_user_maps(struct user_mem_maps *user_mem_maps)
254 int i, n_merged, cur_idx;
256 qsort(user_mem_maps->maps, user_mem_maps->n_maps,
257 sizeof(user_mem_maps->maps[0]), user_mem_map_cmp);
259 /* we'll go over the list backwards when merging */
261 for (i = user_mem_maps->n_maps - 2; i >= 0; i--) {
262 struct user_mem_map *l, *r;
264 l = &user_mem_maps->maps[i];
265 r = &user_mem_maps->maps[i + 1];
267 if (is_null_map(l) || is_null_map(r))
274 /* the entries are still sorted, but now they have holes in them, so
275 * walk through the list and remove the holes
279 for (i = 0; i < user_mem_maps->n_maps; i++) {
280 if (!is_null_map(&user_mem_maps->maps[i])) {
281 struct user_mem_map *src, *dst;
283 src = &user_mem_maps->maps[i];
284 dst = &user_mem_maps->maps[cur_idx++];
287 memcpy(dst, src, sizeof(*src));
288 memset(src, 0, sizeof(*src));
292 user_mem_maps->n_maps = cur_idx;
297 vfio_open_group_fd(int iommu_group_num)
300 char filename[PATH_MAX];
301 struct rte_mp_msg mp_req, *mp_rep;
302 struct rte_mp_reply mp_reply;
303 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
304 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
306 /* if primary, try to open the group */
307 if (internal_config.process_type == RTE_PROC_PRIMARY) {
308 /* try regular group format */
309 snprintf(filename, sizeof(filename),
310 VFIO_GROUP_FMT, iommu_group_num);
311 vfio_group_fd = open(filename, O_RDWR);
312 if (vfio_group_fd < 0) {
313 /* if file not found, it's not an error */
314 if (errno != ENOENT) {
315 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
320 /* special case: try no-IOMMU path as well */
321 snprintf(filename, sizeof(filename),
322 VFIO_NOIOMMU_GROUP_FMT,
324 vfio_group_fd = open(filename, O_RDWR);
325 if (vfio_group_fd < 0) {
326 if (errno != ENOENT) {
327 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
333 /* noiommu group found */
336 return vfio_group_fd;
338 /* if we're in a secondary process, request group fd from the primary
339 * process via mp channel.
341 p->req = SOCKET_REQ_GROUP;
342 p->group_num = iommu_group_num;
343 strcpy(mp_req.name, EAL_VFIO_MP);
344 mp_req.len_param = sizeof(*p);
348 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
349 mp_reply.nb_received == 1) {
350 mp_rep = &mp_reply.msgs[0];
351 p = (struct vfio_mp_param *)mp_rep->param;
352 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
353 vfio_group_fd = mp_rep->fds[0];
354 } else if (p->result == SOCKET_NO_FD) {
355 RTE_LOG(ERR, EAL, " bad VFIO group fd\n");
361 if (vfio_group_fd < 0)
362 RTE_LOG(ERR, EAL, " cannot request group fd\n");
363 return vfio_group_fd;
366 static struct vfio_config *
367 get_vfio_cfg_by_group_num(int iommu_group_num)
369 struct vfio_config *vfio_cfg;
372 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
373 vfio_cfg = &vfio_cfgs[i];
374 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
375 if (vfio_cfg->vfio_groups[j].group_num ==
384 static struct vfio_config *
385 get_vfio_cfg_by_group_fd(int vfio_group_fd)
387 struct vfio_config *vfio_cfg;
390 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
391 vfio_cfg = &vfio_cfgs[i];
392 for (j = 0; j < VFIO_MAX_GROUPS; j++)
393 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
400 static struct vfio_config *
401 get_vfio_cfg_by_container_fd(int container_fd)
405 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
406 if (vfio_cfgs[i].vfio_container_fd == container_fd)
407 return &vfio_cfgs[i];
414 rte_vfio_get_group_fd(int iommu_group_num)
418 struct vfio_group *cur_grp;
419 struct vfio_config *vfio_cfg;
421 /* get the vfio_config it belongs to */
422 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
423 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
425 /* check if we already have the group descriptor open */
426 for (i = 0; i < VFIO_MAX_GROUPS; i++)
427 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num)
428 return vfio_cfg->vfio_groups[i].fd;
430 /* Lets see first if there is room for a new group */
431 if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
432 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
436 /* Now lets get an index for the new group */
437 for (i = 0; i < VFIO_MAX_GROUPS; i++)
438 if (vfio_cfg->vfio_groups[i].group_num == -1) {
439 cur_grp = &vfio_cfg->vfio_groups[i];
443 /* This should not happen */
444 if (i == VFIO_MAX_GROUPS) {
445 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
449 vfio_group_fd = vfio_open_group_fd(iommu_group_num);
450 if (vfio_group_fd < 0) {
451 RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
455 cur_grp->group_num = iommu_group_num;
456 cur_grp->fd = vfio_group_fd;
457 vfio_cfg->vfio_active_groups++;
459 return vfio_group_fd;
463 get_vfio_group_idx(int vfio_group_fd)
465 struct vfio_config *vfio_cfg;
468 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
469 vfio_cfg = &vfio_cfgs[i];
470 for (j = 0; j < VFIO_MAX_GROUPS; j++)
471 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
479 vfio_group_device_get(int vfio_group_fd)
481 struct vfio_config *vfio_cfg;
484 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
485 if (vfio_cfg == NULL) {
486 RTE_LOG(ERR, EAL, " invalid group fd!\n");
490 i = get_vfio_group_idx(vfio_group_fd);
491 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
492 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
494 vfio_cfg->vfio_groups[i].devices++;
498 vfio_group_device_put(int vfio_group_fd)
500 struct vfio_config *vfio_cfg;
503 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
504 if (vfio_cfg == NULL) {
505 RTE_LOG(ERR, EAL, " invalid group fd!\n");
509 i = get_vfio_group_idx(vfio_group_fd);
510 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
511 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
513 vfio_cfg->vfio_groups[i].devices--;
517 vfio_group_device_count(int vfio_group_fd)
519 struct vfio_config *vfio_cfg;
522 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
523 if (vfio_cfg == NULL) {
524 RTE_LOG(ERR, EAL, " invalid group fd!\n");
528 i = get_vfio_group_idx(vfio_group_fd);
529 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
530 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
534 return vfio_cfg->vfio_groups[i].devices;
538 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
539 void *arg __rte_unused)
541 struct rte_memseg_list *msl;
542 struct rte_memseg *ms;
545 msl = rte_mem_virt2memseg_list(addr);
547 /* for IOVA as VA mode, no need to care for IOVA addresses */
548 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
549 uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
550 if (type == RTE_MEM_EVENT_ALLOC)
551 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
554 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
559 /* memsegs are contiguous in memory */
560 ms = rte_mem_virt2memseg(addr, msl);
561 while (cur_len < len) {
562 if (type == RTE_MEM_EVENT_ALLOC)
563 vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
564 ms->iova, ms->len, 1);
566 vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
567 ms->iova, ms->len, 0);
575 rte_vfio_clear_group(int vfio_group_fd)
578 struct rte_mp_msg mp_req, *mp_rep;
579 struct rte_mp_reply mp_reply;
580 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
581 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
582 struct vfio_config *vfio_cfg;
584 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
585 if (vfio_cfg == NULL) {
586 RTE_LOG(ERR, EAL, " invalid group fd!\n");
590 if (internal_config.process_type == RTE_PROC_PRIMARY) {
592 i = get_vfio_group_idx(vfio_group_fd);
595 vfio_cfg->vfio_groups[i].group_num = -1;
596 vfio_cfg->vfio_groups[i].fd = -1;
597 vfio_cfg->vfio_groups[i].devices = 0;
598 vfio_cfg->vfio_active_groups--;
602 p->req = SOCKET_CLR_GROUP;
603 p->group_num = vfio_group_fd;
604 strcpy(mp_req.name, EAL_VFIO_MP);
605 mp_req.len_param = sizeof(*p);
608 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
609 mp_reply.nb_received == 1) {
610 mp_rep = &mp_reply.msgs[0];
611 p = (struct vfio_mp_param *)mp_rep->param;
612 if (p->result == SOCKET_OK) {
615 } else if (p->result == SOCKET_NO_FD)
616 RTE_LOG(ERR, EAL, " BAD VFIO group fd!\n");
618 RTE_LOG(ERR, EAL, " no such VFIO group fd!\n");
627 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
628 int *vfio_dev_fd, struct vfio_device_info *device_info)
630 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
631 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
632 struct vfio_group_status group_status = {
633 .argsz = sizeof(group_status)
635 struct vfio_config *vfio_cfg;
636 struct user_mem_maps *user_mem_maps;
637 int vfio_container_fd;
642 /* get group number */
643 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
645 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
650 /* if negative, something failed */
654 /* get the actual group fd */
655 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
656 if (vfio_group_fd < 0)
659 /* if group_fd == 0, that means the device isn't managed by VFIO */
660 if (vfio_group_fd == 0) {
661 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
667 * at this point, we know that this group is viable (meaning, all devices
668 * are either bound to VFIO or not bound to anything)
671 /* check if the group is viable */
672 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
674 RTE_LOG(ERR, EAL, " %s cannot get group status, "
675 "error %i (%s)\n", dev_addr, errno, strerror(errno));
676 close(vfio_group_fd);
677 rte_vfio_clear_group(vfio_group_fd);
679 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
680 RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
681 close(vfio_group_fd);
682 rte_vfio_clear_group(vfio_group_fd);
686 /* get the vfio_config it belongs to */
687 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
688 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
689 vfio_container_fd = vfio_cfg->vfio_container_fd;
690 user_mem_maps = &vfio_cfg->mem_maps;
692 /* check if group does not have a container yet */
693 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
695 /* add group to a container */
696 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
699 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
700 "error %i (%s)\n", dev_addr, errno, strerror(errno));
701 close(vfio_group_fd);
702 rte_vfio_clear_group(vfio_group_fd);
707 * pick an IOMMU type and set up DMA mappings for container
709 * needs to be done only once, only when first group is
710 * assigned to a container and only in primary process.
711 * Note this can happen several times with the hotplug
714 if (internal_config.process_type == RTE_PROC_PRIMARY &&
715 vfio_cfg->vfio_active_groups == 1 &&
716 vfio_group_device_count(vfio_group_fd) == 0) {
717 const struct vfio_iommu_type *t;
719 /* select an IOMMU type which we will be using */
720 t = vfio_set_iommu_type(vfio_container_fd);
723 " %s failed to select IOMMU type\n",
725 close(vfio_group_fd);
726 rte_vfio_clear_group(vfio_group_fd);
729 /* lock memory hotplug before mapping and release it
730 * after registering callback, to prevent races
732 rte_rwlock_read_lock(mem_lock);
733 if (vfio_cfg == default_vfio_cfg)
734 ret = t->dma_map_func(vfio_container_fd);
739 " %s DMA remapping failed, error %i (%s)\n",
740 dev_addr, errno, strerror(errno));
741 close(vfio_group_fd);
742 rte_vfio_clear_group(vfio_group_fd);
743 rte_rwlock_read_unlock(mem_lock);
747 vfio_cfg->vfio_iommu_type = t;
749 /* re-map all user-mapped segments */
750 rte_spinlock_recursive_lock(&user_mem_maps->lock);
752 /* this IOMMU type may not support DMA mapping, but
753 * if we have mappings in the list - that means we have
754 * previously mapped something successfully, so we can
755 * be sure that DMA mapping is supported.
757 for (i = 0; i < user_mem_maps->n_maps; i++) {
758 struct user_mem_map *map;
759 map = &user_mem_maps->maps[i];
761 ret = t->dma_user_map_func(
763 map->addr, map->iova, map->len,
766 RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
768 "iova: 0x%" PRIx64 " "
769 "len: 0x%" PRIu64 "\n",
770 map->addr, map->iova,
772 rte_spinlock_recursive_unlock(
773 &user_mem_maps->lock);
774 rte_rwlock_read_unlock(mem_lock);
778 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
780 /* register callback for mem events */
781 if (vfio_cfg == default_vfio_cfg)
782 ret = rte_mem_event_callback_register(
783 VFIO_MEM_EVENT_CLB_NAME,
784 vfio_mem_event_callback, NULL);
787 /* unlock memory hotplug */
788 rte_rwlock_read_unlock(mem_lock);
790 if (ret && rte_errno != ENOTSUP) {
791 RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
795 RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
797 RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
801 /* get a file descriptor for the device */
802 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
803 if (*vfio_dev_fd < 0) {
804 /* if we cannot get a device fd, this implies a problem with
805 * the VFIO group or the container not having IOMMU configured.
808 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
810 close(vfio_group_fd);
811 rte_vfio_clear_group(vfio_group_fd);
815 /* test and setup the device */
816 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
818 RTE_LOG(ERR, EAL, " %s cannot get device info, "
819 "error %i (%s)\n", dev_addr, errno,
822 close(vfio_group_fd);
823 rte_vfio_clear_group(vfio_group_fd);
826 vfio_group_device_get(vfio_group_fd);
832 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
835 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
836 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
837 struct vfio_group_status group_status = {
838 .argsz = sizeof(group_status)
840 struct vfio_config *vfio_cfg;
845 /* we don't want any DMA mapping messages to come while we're detaching
846 * VFIO device, because this might be the last device and we might need
847 * to unregister the callback.
849 rte_rwlock_read_lock(mem_lock);
851 /* get group number */
852 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
854 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
856 /* This is an error at this point. */
861 /* get the actual group fd */
862 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
863 if (vfio_group_fd <= 0) {
864 RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
870 /* get the vfio_config it belongs to */
871 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
872 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
874 /* At this point we got an active group. Closing it will make the
875 * container detachment. If this is the last active group, VFIO kernel
876 * code will unset the container and the IOMMU mappings.
879 /* Closing a device */
880 if (close(vfio_dev_fd) < 0) {
881 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
887 /* An VFIO group can have several devices attached. Just when there is
888 * no devices remaining should the group be closed.
890 vfio_group_device_put(vfio_group_fd);
891 if (!vfio_group_device_count(vfio_group_fd)) {
893 if (close(vfio_group_fd) < 0) {
894 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
900 if (rte_vfio_clear_group(vfio_group_fd) < 0) {
901 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
908 /* if there are no active device groups, unregister the callback to
909 * avoid spurious attempts to map/unmap memory from VFIO.
911 if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0)
912 rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
919 rte_rwlock_read_unlock(mem_lock);
924 rte_vfio_enable(const char *modname)
926 /* initialize group list */
930 rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
932 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
933 vfio_cfgs[i].vfio_container_fd = -1;
934 vfio_cfgs[i].vfio_active_groups = 0;
935 vfio_cfgs[i].vfio_iommu_type = NULL;
936 vfio_cfgs[i].mem_maps.lock = lock;
938 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
939 vfio_cfgs[i].vfio_groups[j].fd = -1;
940 vfio_cfgs[i].vfio_groups[j].group_num = -1;
941 vfio_cfgs[i].vfio_groups[j].devices = 0;
945 /* inform the user that we are probing for VFIO */
946 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
948 /* check if vfio module is loaded */
949 vfio_available = rte_eal_check_module(modname);
951 /* return error directly */
952 if (vfio_available == -1) {
953 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
957 /* return 0 if VFIO modules not loaded */
958 if (vfio_available == 0) {
959 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
960 "skipping VFIO support...\n");
964 default_vfio_cfg->vfio_container_fd = rte_vfio_get_container_fd();
966 /* check if we have VFIO driver enabled */
967 if (default_vfio_cfg->vfio_container_fd != -1) {
968 RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
969 default_vfio_cfg->vfio_enabled = 1;
971 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
978 rte_vfio_is_enabled(const char *modname)
980 const int mod_available = rte_eal_check_module(modname) > 0;
981 return default_vfio_cfg->vfio_enabled && mod_available;
984 const struct vfio_iommu_type *
985 vfio_set_iommu_type(int vfio_container_fd)
988 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
989 const struct vfio_iommu_type *t = &iommu_types[idx];
991 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
994 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
995 t->type_id, t->name);
998 /* not an error, there may be more supported IOMMU types */
999 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
1000 "error %i (%s)\n", t->type_id, t->name, errno,
1003 /* if we didn't find a suitable IOMMU type, fail */
1008 vfio_has_supported_extensions(int vfio_container_fd)
1011 unsigned idx, n_extensions = 0;
1012 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1013 const struct vfio_iommu_type *t = &iommu_types[idx];
1015 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
1018 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
1019 "error %i (%s)\n", errno,
1021 close(vfio_container_fd);
1023 } else if (ret == 1) {
1024 /* we found a supported extension */
1027 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
1028 t->type_id, t->name,
1029 ret ? "supported" : "not supported");
1032 /* if we didn't find any supported IOMMU types, fail */
1033 if (!n_extensions) {
1034 close(vfio_container_fd);
1042 rte_vfio_get_container_fd(void)
1044 int ret, vfio_container_fd;
1045 struct rte_mp_msg mp_req, *mp_rep;
1046 struct rte_mp_reply mp_reply;
1047 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1048 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1051 /* if we're in a primary process, try to open the container */
1052 if (internal_config.process_type == RTE_PROC_PRIMARY) {
1053 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
1054 if (vfio_container_fd < 0) {
1055 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
1056 "error %i (%s)\n", errno, strerror(errno));
1060 /* check VFIO API version */
1061 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
1062 if (ret != VFIO_API_VERSION) {
1064 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
1065 "error %i (%s)\n", errno, strerror(errno));
1067 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
1068 close(vfio_container_fd);
1072 ret = vfio_has_supported_extensions(vfio_container_fd);
1074 RTE_LOG(ERR, EAL, " no supported IOMMU "
1075 "extensions found!\n");
1079 return vfio_container_fd;
1082 * if we're in a secondary process, request container fd from the
1083 * primary process via mp channel
1085 p->req = SOCKET_REQ_CONTAINER;
1086 strcpy(mp_req.name, EAL_VFIO_MP);
1087 mp_req.len_param = sizeof(*p);
1090 vfio_container_fd = -1;
1091 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1092 mp_reply.nb_received == 1) {
1093 mp_rep = &mp_reply.msgs[0];
1094 p = (struct vfio_mp_param *)mp_rep->param;
1095 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1096 free(mp_reply.msgs);
1097 return mp_rep->fds[0];
1099 free(mp_reply.msgs);
1102 RTE_LOG(ERR, EAL, " cannot request container fd\n");
1107 rte_vfio_get_group_num(const char *sysfs_base,
1108 const char *dev_addr, int *iommu_group_num)
1110 char linkname[PATH_MAX];
1111 char filename[PATH_MAX];
1112 char *tok[16], *group_tok, *end;
1115 memset(linkname, 0, sizeof(linkname));
1116 memset(filename, 0, sizeof(filename));
1118 /* try to find out IOMMU group for this device */
1119 snprintf(linkname, sizeof(linkname),
1120 "%s/%s/iommu_group", sysfs_base, dev_addr);
1122 ret = readlink(linkname, filename, sizeof(filename));
1124 /* if the link doesn't exist, no VFIO for us */
1128 ret = rte_strsplit(filename, sizeof(filename),
1129 tok, RTE_DIM(tok), '/');
1132 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
1136 /* IOMMU group is always the last token */
1138 group_tok = tok[ret - 1];
1140 *iommu_group_num = strtol(group_tok, &end, 10);
1141 if ((end != group_tok && *end != '\0') || errno != 0) {
1142 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
1150 type1_map(const struct rte_memseg_list *msl __rte_unused,
1151 const struct rte_memseg *ms, void *arg)
1153 int *vfio_container_fd = arg;
1155 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1160 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1161 uint64_t len, int do_map)
1163 struct vfio_iommu_type1_dma_map dma_map;
1164 struct vfio_iommu_type1_dma_unmap dma_unmap;
1168 memset(&dma_map, 0, sizeof(dma_map));
1169 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1170 dma_map.vaddr = vaddr;
1172 dma_map.iova = iova;
1173 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1174 VFIO_DMA_MAP_FLAG_WRITE;
1176 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1178 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
1179 errno, strerror(errno));
1183 memset(&dma_unmap, 0, sizeof(dma_unmap));
1184 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1185 dma_unmap.size = len;
1186 dma_unmap.iova = iova;
1188 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1191 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1192 errno, strerror(errno));
1201 vfio_type1_dma_map(int vfio_container_fd)
1203 return rte_memseg_walk(type1_map, &vfio_container_fd);
1207 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1208 uint64_t len, int do_map)
1210 struct vfio_iommu_type1_dma_map dma_map;
1211 struct vfio_iommu_type1_dma_unmap dma_unmap;
1215 memset(&dma_map, 0, sizeof(dma_map));
1216 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1217 dma_map.vaddr = vaddr;
1219 dma_map.iova = iova;
1220 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1221 VFIO_DMA_MAP_FLAG_WRITE;
1223 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1225 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
1226 errno, strerror(errno));
1231 struct vfio_iommu_spapr_register_memory reg = {
1232 .argsz = sizeof(reg),
1235 reg.vaddr = (uintptr_t) vaddr;
1238 ret = ioctl(vfio_container_fd,
1239 VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®);
1241 RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
1242 errno, strerror(errno));
1246 memset(&dma_unmap, 0, sizeof(dma_unmap));
1247 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1248 dma_unmap.size = len;
1249 dma_unmap.iova = iova;
1251 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1254 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1255 errno, strerror(errno));
1264 vfio_spapr_map_walk(const struct rte_memseg_list *msl __rte_unused,
1265 const struct rte_memseg *ms, void *arg)
1267 int *vfio_container_fd = arg;
1269 return vfio_spapr_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1273 struct spapr_walk_param {
1274 uint64_t window_size;
1275 uint64_t hugepage_sz;
1278 vfio_spapr_window_size_walk(const struct rte_memseg_list *msl __rte_unused,
1279 const struct rte_memseg *ms, void *arg)
1281 struct spapr_walk_param *param = arg;
1282 uint64_t max = ms->iova + ms->len;
1284 if (max > param->window_size) {
1285 param->hugepage_sz = ms->hugepage_sz;
1286 param->window_size = max;
1293 vfio_spapr_create_new_dma_window(int vfio_container_fd,
1294 struct vfio_iommu_spapr_tce_create *create) {
1295 struct vfio_iommu_spapr_tce_remove remove = {
1296 .argsz = sizeof(remove),
1298 struct vfio_iommu_spapr_tce_info info = {
1299 .argsz = sizeof(info),
1303 /* query spapr iommu info */
1304 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1306 RTE_LOG(ERR, EAL, " cannot get iommu info, "
1307 "error %i (%s)\n", errno, strerror(errno));
1311 /* remove default DMA of 32 bit window */
1312 remove.start_addr = info.dma32_window_start;
1313 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1315 RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
1316 "error %i (%s)\n", errno, strerror(errno));
1320 /* create new DMA window */
1321 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);
1323 RTE_LOG(ERR, EAL, " cannot create new DMA window, "
1324 "error %i (%s)\n", errno, strerror(errno));
1328 if (create->start_addr != 0) {
1329 RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
1337 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1338 uint64_t len, int do_map)
1340 struct spapr_walk_param param;
1341 struct vfio_iommu_spapr_tce_create create = {
1342 .argsz = sizeof(create),
1344 struct vfio_config *vfio_cfg;
1345 struct user_mem_maps *user_mem_maps;
1348 vfio_cfg = get_vfio_cfg_by_container_fd(vfio_container_fd);
1349 if (vfio_cfg == NULL) {
1350 RTE_LOG(ERR, EAL, " invalid container fd!\n");
1354 user_mem_maps = &vfio_cfg->mem_maps;
1355 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1357 /* check if window size needs to be adjusted */
1358 memset(¶m, 0, sizeof(param));
1360 if (memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,
1362 RTE_LOG(ERR, EAL, "Could not get window size\n");
1367 /* also check user maps */
1368 for (i = 0; i < user_mem_maps->n_maps; i++) {
1369 uint64_t max = user_mem_maps->maps[i].iova +
1370 user_mem_maps->maps[i].len;
1371 create.window_size = RTE_MAX(create.window_size, max);
1374 /* sPAPR requires window size to be a power of 2 */
1375 create.window_size = rte_align64pow2(param.window_size);
1376 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1381 /* re-create window and remap the entire memory */
1382 if (iova > create.window_size) {
1383 if (vfio_spapr_create_new_dma_window(vfio_container_fd,
1385 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1389 if (memseg_walk_thread_unsafe(vfio_spapr_map_walk,
1390 &vfio_container_fd) < 0) {
1391 RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
1395 /* remap all user maps */
1396 for (i = 0; i < user_mem_maps->n_maps; i++) {
1397 struct user_mem_map *map =
1398 &user_mem_maps->maps[i];
1399 if (vfio_spapr_dma_do_map(vfio_container_fd,
1400 map->addr, map->iova, map->len,
1402 RTE_LOG(ERR, EAL, "Could not recreate user DMA maps\n");
1409 /* now that we've remapped all of the memory that was present
1410 * before, map the segment that we were requested to map.
1412 * however, if we were called by the callback, the memory we
1413 * were called with was already in the memseg list, so previous
1414 * mapping should've mapped that segment already.
1416 * virt2memseg_list is a relatively cheap check, so use that. if
1417 * memory is within any memseg list, it's a memseg, so it's
1420 addr = (void *)(uintptr_t)vaddr;
1421 if (rte_mem_virt2memseg_list(addr) == NULL &&
1422 vfio_spapr_dma_do_map(vfio_container_fd,
1423 vaddr, iova, len, 1) < 0) {
1424 RTE_LOG(ERR, EAL, "Could not map segment\n");
1429 /* for unmap, check if iova within DMA window */
1430 if (iova > create.window_size) {
1431 RTE_LOG(ERR, EAL, "iova beyond DMA window for unmap");
1436 vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);
1439 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1444 vfio_spapr_dma_map(int vfio_container_fd)
1446 struct vfio_iommu_spapr_tce_create create = {
1447 .argsz = sizeof(create),
1449 struct spapr_walk_param param;
1451 memset(¶m, 0, sizeof(param));
1453 /* create DMA window from 0 to max(phys_addr + len) */
1454 rte_memseg_walk(vfio_spapr_window_size_walk, ¶m);
1456 /* sPAPR requires window size to be a power of 2 */
1457 create.window_size = rte_align64pow2(param.window_size);
1458 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1461 if (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {
1462 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1466 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
1467 if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1474 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1476 /* No-IOMMU mode does not need DMA mapping */
1481 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1482 uint64_t __rte_unused vaddr,
1483 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1484 int __rte_unused do_map)
1486 /* No-IOMMU mode does not need DMA mapping */
1491 vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1492 uint64_t len, int do_map)
1494 const struct vfio_iommu_type *t = vfio_cfg->vfio_iommu_type;
1497 RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
1502 if (!t->dma_user_map_func) {
1504 " VFIO custom DMA region maping not supported by IOMMU %s\n",
1506 rte_errno = ENOTSUP;
1510 return t->dma_user_map_func(vfio_cfg->vfio_container_fd, vaddr, iova,
1515 container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1518 struct user_mem_map *new_map;
1519 struct user_mem_maps *user_mem_maps;
1522 user_mem_maps = &vfio_cfg->mem_maps;
1523 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1524 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1525 RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1531 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
1532 /* technically, this will fail if there are currently no devices
1533 * plugged in, even if a device were added later, this mapping
1534 * might have succeeded. however, since we cannot verify if this
1535 * is a valid mapping without having a device attached, consider
1536 * this to be unsupported, because we can't just store any old
1537 * mapping and pollute list of active mappings willy-nilly.
1539 RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1543 /* create new user mem map entry */
1544 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1545 new_map->addr = vaddr;
1546 new_map->iova = iova;
1549 compact_user_maps(user_mem_maps);
1551 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1556 container_dma_unmap(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1559 struct user_mem_map *map, *new_map = NULL;
1560 struct user_mem_maps *user_mem_maps;
1563 user_mem_maps = &vfio_cfg->mem_maps;
1564 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1566 /* find our mapping */
1567 map = find_user_mem_map(user_mem_maps, vaddr, iova, len);
1569 RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1574 if (map->addr != vaddr || map->iova != iova || map->len != len) {
1575 /* we're partially unmapping a previously mapped region, so we
1576 * need to split entry into two.
1578 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1579 RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1584 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1587 /* unmap the entry */
1588 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 0)) {
1589 /* there may not be any devices plugged in, so unmapping will
1590 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1591 * stop us from removing the mapping, as the assumption is we
1592 * won't be needing this memory any more and thus will want to
1593 * prevent it from being remapped again on hotplug. so, only
1594 * fail if we indeed failed to unmap (e.g. if the mapping was
1595 * within our mapped range but had invalid alignment).
1597 if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1598 RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1602 RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1605 /* remove map from the list of active mappings */
1606 if (new_map != NULL) {
1607 adjust_map(map, new_map, vaddr, len);
1609 /* if we've created a new map by splitting, sort everything */
1610 if (!is_null_map(new_map)) {
1611 compact_user_maps(user_mem_maps);
1613 /* we've created a new mapping, but it was unused */
1614 user_mem_maps->n_maps--;
1617 memset(map, 0, sizeof(*map));
1618 compact_user_maps(user_mem_maps);
1619 user_mem_maps->n_maps--;
1623 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1627 int __rte_experimental
1628 rte_vfio_dma_map(uint64_t vaddr, uint64_t iova, uint64_t len)
1635 return container_dma_map(default_vfio_cfg, vaddr, iova, len);
1638 int __rte_experimental
1639 rte_vfio_dma_unmap(uint64_t vaddr, uint64_t iova, uint64_t len)
1646 return container_dma_unmap(default_vfio_cfg, vaddr, iova, len);
1650 rte_vfio_noiommu_is_enabled(void)
1656 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1658 if (errno != ENOENT) {
1659 RTE_LOG(ERR, EAL, " cannot open vfio noiommu file %i (%s)\n",
1660 errno, strerror(errno));
1664 * else the file does not exists
1665 * i.e. noiommu is not enabled
1670 cnt = read(fd, &c, 1);
1673 RTE_LOG(ERR, EAL, " unable to read from vfio noiommu "
1674 "file %i (%s)\n", errno, strerror(errno));
1681 int __rte_experimental
1682 rte_vfio_container_create(void)
1686 /* Find an empty slot to store new vfio config */
1687 for (i = 1; i < VFIO_MAX_CONTAINERS; i++) {
1688 if (vfio_cfgs[i].vfio_container_fd == -1)
1692 if (i == VFIO_MAX_CONTAINERS) {
1693 RTE_LOG(ERR, EAL, "exceed max vfio container limit\n");
1697 vfio_cfgs[i].vfio_container_fd = rte_vfio_get_container_fd();
1698 if (vfio_cfgs[i].vfio_container_fd < 0) {
1699 RTE_LOG(NOTICE, EAL, "fail to create a new container\n");
1703 return vfio_cfgs[i].vfio_container_fd;
1706 int __rte_experimental
1707 rte_vfio_container_destroy(int container_fd)
1709 struct vfio_config *vfio_cfg;
1712 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1713 if (vfio_cfg == NULL) {
1714 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1718 for (i = 0; i < VFIO_MAX_GROUPS; i++)
1719 if (vfio_cfg->vfio_groups[i].group_num != -1)
1720 rte_vfio_container_group_unbind(container_fd,
1721 vfio_cfg->vfio_groups[i].group_num);
1723 close(container_fd);
1724 vfio_cfg->vfio_container_fd = -1;
1725 vfio_cfg->vfio_active_groups = 0;
1726 vfio_cfg->vfio_iommu_type = NULL;
1731 int __rte_experimental
1732 rte_vfio_container_group_bind(int container_fd, int iommu_group_num)
1734 struct vfio_config *vfio_cfg;
1735 struct vfio_group *cur_grp;
1739 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1740 if (vfio_cfg == NULL) {
1741 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1745 /* Check room for new group */
1746 if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
1747 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
1751 /* Get an index for the new group */
1752 for (i = 0; i < VFIO_MAX_GROUPS; i++)
1753 if (vfio_cfg->vfio_groups[i].group_num == -1) {
1754 cur_grp = &vfio_cfg->vfio_groups[i];
1758 /* This should not happen */
1759 if (i == VFIO_MAX_GROUPS) {
1760 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
1764 vfio_group_fd = vfio_open_group_fd(iommu_group_num);
1765 if (vfio_group_fd < 0) {
1766 RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
1769 cur_grp->group_num = iommu_group_num;
1770 cur_grp->fd = vfio_group_fd;
1771 cur_grp->devices = 0;
1772 vfio_cfg->vfio_active_groups++;
1774 return vfio_group_fd;
1777 int __rte_experimental
1778 rte_vfio_container_group_unbind(int container_fd, int iommu_group_num)
1780 struct vfio_config *vfio_cfg;
1781 struct vfio_group *cur_grp;
1784 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1785 if (vfio_cfg == NULL) {
1786 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1790 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
1791 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num) {
1792 cur_grp = &vfio_cfg->vfio_groups[i];
1797 /* This should not happen */
1798 if (i == VFIO_MAX_GROUPS) {
1799 RTE_LOG(ERR, EAL, "Specified group number not found\n");
1803 if (cur_grp->fd >= 0 && close(cur_grp->fd) < 0) {
1804 RTE_LOG(ERR, EAL, "Error when closing vfio_group_fd for"
1805 " iommu_group_num %d\n", iommu_group_num);
1808 cur_grp->group_num = -1;
1810 cur_grp->devices = 0;
1811 vfio_cfg->vfio_active_groups--;
1816 int __rte_experimental
1817 rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, uint64_t iova,
1820 struct vfio_config *vfio_cfg;
1827 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1828 if (vfio_cfg == NULL) {
1829 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1833 return container_dma_map(vfio_cfg, vaddr, iova, len);
1836 int __rte_experimental
1837 rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, uint64_t iova,
1840 struct vfio_config *vfio_cfg;
1847 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1848 if (vfio_cfg == NULL) {
1849 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1853 return container_dma_unmap(vfio_cfg, vaddr, iova, len);
1858 int __rte_experimental
1859 rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
1860 __rte_unused uint64_t len)
1865 int __rte_experimental
1866 rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
1867 __rte_unused uint64_t len)
1873 rte_vfio_setup_device(__rte_unused const char *sysfs_base,
1874 __rte_unused const char *dev_addr,
1875 __rte_unused int *vfio_dev_fd,
1876 __rte_unused struct vfio_device_info *device_info)
1882 rte_vfio_release_device(__rte_unused const char *sysfs_base,
1883 __rte_unused const char *dev_addr, __rte_unused int fd)
1889 rte_vfio_enable(__rte_unused const char *modname)
1895 rte_vfio_is_enabled(__rte_unused const char *modname)
1901 rte_vfio_noiommu_is_enabled(void)
1907 rte_vfio_clear_group(__rte_unused int vfio_group_fd)
1912 int __rte_experimental
1913 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
1914 __rte_unused const char *dev_addr,
1915 __rte_unused int *iommu_group_num)
1920 int __rte_experimental
1921 rte_vfio_get_container_fd(void)
1926 int __rte_experimental
1927 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
1932 int __rte_experimental
1933 rte_vfio_container_create(void)
1938 int __rte_experimental
1939 rte_vfio_container_destroy(__rte_unused int container_fd)
1944 int __rte_experimental
1945 rte_vfio_container_group_bind(__rte_unused int container_fd,
1946 __rte_unused int iommu_group_num)
1951 int __rte_experimental
1952 rte_vfio_container_group_unbind(__rte_unused int container_fd,
1953 __rte_unused int iommu_group_num)
1958 int __rte_experimental
1959 rte_vfio_container_dma_map(__rte_unused int container_fd,
1960 __rte_unused uint64_t vaddr,
1961 __rte_unused uint64_t iova,
1962 __rte_unused uint64_t len)
1967 int __rte_experimental
1968 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
1969 __rte_unused uint64_t vaddr,
1970 __rte_unused uint64_t iova,
1971 __rte_unused uint64_t len)
1976 #endif /* VFIO_PRESENT */