1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
11 #include <rte_errno.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
17 #include "eal_filesystem.h"
18 #include "eal_memcfg.h"
20 #include "eal_private.h"
21 #include "eal_internal_cfg.h"
25 #define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
27 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
28 * recreate the mappings for DPDK segments, but we cannot do so for memory that
29 * was registered by the user themselves, so we need to store the user mappings
30 * somewhere, to recreate them later.
32 #define VFIO_MAX_USER_MEM_MAPS 256
39 struct user_mem_maps {
40 rte_spinlock_recursive_t lock;
42 struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
47 int vfio_container_fd;
48 int vfio_active_groups;
49 const struct vfio_iommu_type *vfio_iommu_type;
50 struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
51 struct user_mem_maps mem_maps;
54 /* per-process VFIO config */
55 static struct vfio_config vfio_cfgs[VFIO_MAX_CONTAINERS];
56 static struct vfio_config *default_vfio_cfg = &vfio_cfgs[0];
58 static int vfio_type1_dma_map(int);
59 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
60 static int vfio_spapr_dma_map(int);
61 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
62 static int vfio_noiommu_dma_map(int);
63 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
64 static int vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr,
65 uint64_t iova, uint64_t len, int do_map);
67 /* IOMMU types we support */
68 static const struct vfio_iommu_type iommu_types[] = {
69 /* x86 IOMMU, otherwise known as type 1 */
71 .type_id = RTE_VFIO_TYPE1,
73 .dma_map_func = &vfio_type1_dma_map,
74 .dma_user_map_func = &vfio_type1_dma_mem_map
76 /* ppc64 IOMMU, otherwise known as spapr */
78 .type_id = RTE_VFIO_SPAPR,
80 .dma_map_func = &vfio_spapr_dma_map,
81 .dma_user_map_func = &vfio_spapr_dma_mem_map
85 .type_id = RTE_VFIO_NOIOMMU,
87 .dma_map_func = &vfio_noiommu_dma_map,
88 .dma_user_map_func = &vfio_noiommu_dma_mem_map
93 is_null_map(const struct user_mem_map *map)
95 return map->addr == 0 && map->iova == 0 && map->len == 0;
98 /* we may need to merge user mem maps together in case of user mapping/unmapping
99 * chunks of memory, so we'll need a comparator function to sort segments.
102 user_mem_map_cmp(const void *a, const void *b)
104 const struct user_mem_map *umm_a = a;
105 const struct user_mem_map *umm_b = b;
107 /* move null entries to end */
108 if (is_null_map(umm_a))
110 if (is_null_map(umm_b))
113 /* sort by iova first */
114 if (umm_a->iova < umm_b->iova)
116 if (umm_a->iova > umm_b->iova)
119 if (umm_a->addr < umm_b->addr)
121 if (umm_a->addr > umm_b->addr)
124 if (umm_a->len < umm_b->len)
126 if (umm_a->len > umm_b->len)
132 /* adjust user map entry. this may result in shortening of existing map, or in
133 * splitting existing map in two pieces.
136 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
137 uint64_t remove_va_start, uint64_t remove_len)
139 /* if va start is same as start address, we're simply moving start */
140 if (remove_va_start == src->addr) {
141 src->addr += remove_len;
142 src->iova += remove_len;
143 src->len -= remove_len;
144 } else if (remove_va_start + remove_len == src->addr + src->len) {
145 /* we're shrinking mapping from the end */
146 src->len -= remove_len;
148 /* we're blowing a hole in the middle */
149 struct user_mem_map tmp;
150 uint64_t total_len = src->len;
152 /* adjust source segment length */
153 src->len = remove_va_start - src->addr;
155 /* create temporary segment in the middle */
156 tmp.addr = src->addr + src->len;
157 tmp.iova = src->iova + src->len;
158 tmp.len = remove_len;
160 /* populate end segment - this one we will be keeping */
161 end->addr = tmp.addr + tmp.len;
162 end->iova = tmp.iova + tmp.len;
163 end->len = total_len - src->len - tmp.len;
167 /* try merging two maps into one, return 1 if succeeded */
169 merge_map(struct user_mem_map *left, struct user_mem_map *right)
171 if (left->addr + left->len != right->addr)
173 if (left->iova + left->len != right->iova)
176 left->len += right->len;
178 memset(right, 0, sizeof(*right));
183 static struct user_mem_map *
184 find_user_mem_map(struct user_mem_maps *user_mem_maps, uint64_t addr,
185 uint64_t iova, uint64_t len)
187 uint64_t va_end = addr + len;
188 uint64_t iova_end = iova + len;
191 for (i = 0; i < user_mem_maps->n_maps; i++) {
192 struct user_mem_map *map = &user_mem_maps->maps[i];
193 uint64_t map_va_end = map->addr + map->len;
194 uint64_t map_iova_end = map->iova + map->len;
197 if (addr < map->addr || addr >= map_va_end)
199 /* check if VA end is within boundaries */
200 if (va_end <= map->addr || va_end > map_va_end)
203 /* check start IOVA */
204 if (iova < map->iova || iova >= map_iova_end)
206 /* check if IOVA end is within boundaries */
207 if (iova_end <= map->iova || iova_end > map_iova_end)
210 /* we've found our map */
216 /* this will sort all user maps, and merge/compact any adjacent maps */
218 compact_user_maps(struct user_mem_maps *user_mem_maps)
220 int i, n_merged, cur_idx;
222 qsort(user_mem_maps->maps, user_mem_maps->n_maps,
223 sizeof(user_mem_maps->maps[0]), user_mem_map_cmp);
225 /* we'll go over the list backwards when merging */
227 for (i = user_mem_maps->n_maps - 2; i >= 0; i--) {
228 struct user_mem_map *l, *r;
230 l = &user_mem_maps->maps[i];
231 r = &user_mem_maps->maps[i + 1];
233 if (is_null_map(l) || is_null_map(r))
240 /* the entries are still sorted, but now they have holes in them, so
241 * walk through the list and remove the holes
245 for (i = 0; i < user_mem_maps->n_maps; i++) {
246 if (!is_null_map(&user_mem_maps->maps[i])) {
247 struct user_mem_map *src, *dst;
249 src = &user_mem_maps->maps[i];
250 dst = &user_mem_maps->maps[cur_idx++];
253 memcpy(dst, src, sizeof(*src));
254 memset(src, 0, sizeof(*src));
258 user_mem_maps->n_maps = cur_idx;
263 vfio_open_group_fd(int iommu_group_num)
266 char filename[PATH_MAX];
267 struct rte_mp_msg mp_req, *mp_rep;
268 struct rte_mp_reply mp_reply = {0};
269 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
270 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
271 const struct internal_config *internal_conf =
272 eal_get_internal_configuration();
274 /* if primary, try to open the group */
275 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
276 /* try regular group format */
277 snprintf(filename, sizeof(filename),
278 VFIO_GROUP_FMT, iommu_group_num);
279 vfio_group_fd = open(filename, O_RDWR);
280 if (vfio_group_fd < 0) {
281 /* if file not found, it's not an error */
282 if (errno != ENOENT) {
283 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
288 /* special case: try no-IOMMU path as well */
289 snprintf(filename, sizeof(filename),
290 VFIO_NOIOMMU_GROUP_FMT,
292 vfio_group_fd = open(filename, O_RDWR);
293 if (vfio_group_fd < 0) {
294 if (errno != ENOENT) {
295 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
301 /* noiommu group found */
304 return vfio_group_fd;
306 /* if we're in a secondary process, request group fd from the primary
307 * process via mp channel.
309 p->req = SOCKET_REQ_GROUP;
310 p->group_num = iommu_group_num;
311 strcpy(mp_req.name, EAL_VFIO_MP);
312 mp_req.len_param = sizeof(*p);
316 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
317 mp_reply.nb_received == 1) {
318 mp_rep = &mp_reply.msgs[0];
319 p = (struct vfio_mp_param *)mp_rep->param;
320 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
321 vfio_group_fd = mp_rep->fds[0];
322 } else if (p->result == SOCKET_NO_FD) {
323 RTE_LOG(ERR, EAL, " bad VFIO group fd\n");
324 vfio_group_fd = -ENOENT;
329 if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
330 RTE_LOG(ERR, EAL, " cannot request group fd\n");
331 return vfio_group_fd;
334 static struct vfio_config *
335 get_vfio_cfg_by_group_num(int iommu_group_num)
337 struct vfio_config *vfio_cfg;
340 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
341 vfio_cfg = &vfio_cfgs[i];
342 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
343 if (vfio_cfg->vfio_groups[j].group_num ==
353 vfio_get_group_fd(struct vfio_config *vfio_cfg,
358 struct vfio_group *cur_grp;
360 /* check if we already have the group descriptor open */
361 for (i = 0; i < VFIO_MAX_GROUPS; i++)
362 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num)
363 return vfio_cfg->vfio_groups[i].fd;
365 /* Lets see first if there is room for a new group */
366 if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
367 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
371 /* Now lets get an index for the new group */
372 for (i = 0; i < VFIO_MAX_GROUPS; i++)
373 if (vfio_cfg->vfio_groups[i].group_num == -1) {
374 cur_grp = &vfio_cfg->vfio_groups[i];
378 /* This should not happen */
379 if (i == VFIO_MAX_GROUPS) {
380 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
384 vfio_group_fd = vfio_open_group_fd(iommu_group_num);
385 if (vfio_group_fd < 0) {
386 RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
387 return vfio_group_fd;
390 cur_grp->group_num = iommu_group_num;
391 cur_grp->fd = vfio_group_fd;
392 vfio_cfg->vfio_active_groups++;
394 return vfio_group_fd;
397 static struct vfio_config *
398 get_vfio_cfg_by_group_fd(int vfio_group_fd)
400 struct vfio_config *vfio_cfg;
403 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
404 vfio_cfg = &vfio_cfgs[i];
405 for (j = 0; j < VFIO_MAX_GROUPS; j++)
406 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
413 static struct vfio_config *
414 get_vfio_cfg_by_container_fd(int container_fd)
418 if (container_fd == RTE_VFIO_DEFAULT_CONTAINER_FD)
419 return default_vfio_cfg;
421 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
422 if (vfio_cfgs[i].vfio_container_fd == container_fd)
423 return &vfio_cfgs[i];
430 rte_vfio_get_group_fd(int iommu_group_num)
432 struct vfio_config *vfio_cfg;
434 /* get the vfio_config it belongs to */
435 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
436 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
438 return vfio_get_group_fd(vfio_cfg, iommu_group_num);
442 get_vfio_group_idx(int vfio_group_fd)
444 struct vfio_config *vfio_cfg;
447 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
448 vfio_cfg = &vfio_cfgs[i];
449 for (j = 0; j < VFIO_MAX_GROUPS; j++)
450 if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
458 vfio_group_device_get(int vfio_group_fd)
460 struct vfio_config *vfio_cfg;
463 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
464 if (vfio_cfg == NULL) {
465 RTE_LOG(ERR, EAL, " invalid group fd!\n");
469 i = get_vfio_group_idx(vfio_group_fd);
470 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
471 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
473 vfio_cfg->vfio_groups[i].devices++;
477 vfio_group_device_put(int vfio_group_fd)
479 struct vfio_config *vfio_cfg;
482 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
483 if (vfio_cfg == NULL) {
484 RTE_LOG(ERR, EAL, " invalid group fd!\n");
488 i = get_vfio_group_idx(vfio_group_fd);
489 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
490 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
492 vfio_cfg->vfio_groups[i].devices--;
496 vfio_group_device_count(int vfio_group_fd)
498 struct vfio_config *vfio_cfg;
501 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
502 if (vfio_cfg == NULL) {
503 RTE_LOG(ERR, EAL, " invalid group fd!\n");
507 i = get_vfio_group_idx(vfio_group_fd);
508 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
509 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
513 return vfio_cfg->vfio_groups[i].devices;
517 vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
518 void *arg __rte_unused)
520 rte_iova_t iova_start, iova_expected;
521 struct rte_memseg_list *msl;
522 struct rte_memseg *ms;
526 msl = rte_mem_virt2memseg_list(addr);
528 /* for IOVA as VA mode, no need to care for IOVA addresses */
529 if (rte_eal_iova_mode() == RTE_IOVA_VA && msl->external == 0) {
530 uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
531 if (type == RTE_MEM_EVENT_ALLOC)
532 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
535 vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
540 /* memsegs are contiguous in memory */
541 ms = rte_mem_virt2memseg(addr, msl);
544 * This memory is not guaranteed to be contiguous, but it still could
545 * be, or it could have some small contiguous chunks. Since the number
546 * of VFIO mappings is limited, and VFIO appears to not concatenate
547 * adjacent mappings, we have to do this ourselves.
549 * So, find contiguous chunks, then map them.
551 va_start = ms->addr_64;
552 iova_start = iova_expected = ms->iova;
553 while (cur_len < len) {
554 bool new_contig_area = ms->iova != iova_expected;
555 bool last_seg = (len - cur_len) == ms->len;
556 bool skip_last = false;
558 /* only do mappings when current contiguous area ends */
559 if (new_contig_area) {
560 if (type == RTE_MEM_EVENT_ALLOC)
561 vfio_dma_mem_map(default_vfio_cfg, va_start,
563 iova_expected - iova_start, 1);
565 vfio_dma_mem_map(default_vfio_cfg, va_start,
567 iova_expected - iova_start, 0);
568 va_start = ms->addr_64;
569 iova_start = ms->iova;
571 /* some memory segments may have invalid IOVA */
572 if (ms->iova == RTE_BAD_IOVA) {
573 RTE_LOG(DEBUG, EAL, "Memory segment at %p has bad IOVA, skipping\n",
577 iova_expected = ms->iova + ms->len;
582 * don't count previous segment, and don't attempt to
583 * dereference a potentially invalid pointer.
585 if (skip_last && !last_seg) {
586 iova_expected = iova_start = ms->iova;
587 va_start = ms->addr_64;
588 } else if (!skip_last && last_seg) {
589 /* this is the last segment and we're not skipping */
590 if (type == RTE_MEM_EVENT_ALLOC)
591 vfio_dma_mem_map(default_vfio_cfg, va_start,
593 iova_expected - iova_start, 1);
595 vfio_dma_mem_map(default_vfio_cfg, va_start,
597 iova_expected - iova_start, 0);
603 vfio_sync_default_container(void)
605 struct rte_mp_msg mp_req, *mp_rep;
606 struct rte_mp_reply mp_reply = {0};
607 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
608 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
612 /* cannot be called from primary */
613 if (rte_eal_process_type() != RTE_PROC_SECONDARY)
616 /* default container fd should have been opened in rte_vfio_enable() */
617 if (!default_vfio_cfg->vfio_enabled ||
618 default_vfio_cfg->vfio_container_fd < 0) {
619 RTE_LOG(ERR, EAL, "VFIO support is not initialized\n");
623 /* find default container's IOMMU type */
624 p->req = SOCKET_REQ_IOMMU_TYPE;
625 strcpy(mp_req.name, EAL_VFIO_MP);
626 mp_req.len_param = sizeof(*p);
630 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
631 mp_reply.nb_received == 1) {
632 mp_rep = &mp_reply.msgs[0];
633 p = (struct vfio_mp_param *)mp_rep->param;
634 if (p->result == SOCKET_OK)
635 iommu_type_id = p->iommu_type_id;
638 if (iommu_type_id < 0) {
639 RTE_LOG(ERR, EAL, "Could not get IOMMU type for default container\n");
643 /* we now have an fd for default container, as well as its IOMMU type.
644 * now, set up default VFIO container config to match.
646 for (i = 0; i < RTE_DIM(iommu_types); i++) {
647 const struct vfio_iommu_type *t = &iommu_types[i];
648 if (t->type_id != iommu_type_id)
651 /* we found our IOMMU type */
652 default_vfio_cfg->vfio_iommu_type = t;
656 RTE_LOG(ERR, EAL, "Could not find IOMMU type id (%i)\n",
662 rte_vfio_clear_group(int vfio_group_fd)
665 struct vfio_config *vfio_cfg;
667 vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
668 if (vfio_cfg == NULL) {
669 RTE_LOG(ERR, EAL, " invalid group fd!\n");
673 i = get_vfio_group_idx(vfio_group_fd);
676 vfio_cfg->vfio_groups[i].group_num = -1;
677 vfio_cfg->vfio_groups[i].fd = -1;
678 vfio_cfg->vfio_groups[i].devices = 0;
679 vfio_cfg->vfio_active_groups--;
685 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
686 int *vfio_dev_fd, struct vfio_device_info *device_info)
688 struct vfio_group_status group_status = {
689 .argsz = sizeof(group_status)
691 struct vfio_config *vfio_cfg;
692 struct user_mem_maps *user_mem_maps;
693 int vfio_container_fd;
698 const struct internal_config *internal_conf =
699 eal_get_internal_configuration();
701 /* get group number */
702 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
704 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
709 /* if negative, something failed */
713 /* get the actual group fd */
714 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
715 if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
719 * if vfio_group_fd == -ENOENT, that means the device
720 * isn't managed by VFIO
722 if (vfio_group_fd == -ENOENT) {
723 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
729 * at this point, we know that this group is viable (meaning, all devices
730 * are either bound to VFIO or not bound to anything)
733 /* check if the group is viable */
734 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
736 RTE_LOG(ERR, EAL, " %s cannot get group status, "
737 "error %i (%s)\n", dev_addr, errno, strerror(errno));
738 close(vfio_group_fd);
739 rte_vfio_clear_group(vfio_group_fd);
741 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
742 RTE_LOG(ERR, EAL, " %s VFIO group is not viable! "
743 "Not all devices in IOMMU group bound to VFIO or unbound\n",
745 close(vfio_group_fd);
746 rte_vfio_clear_group(vfio_group_fd);
750 /* get the vfio_config it belongs to */
751 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
752 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
753 vfio_container_fd = vfio_cfg->vfio_container_fd;
754 user_mem_maps = &vfio_cfg->mem_maps;
756 /* check if group does not have a container yet */
757 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
759 /* add group to a container */
760 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
763 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
764 "error %i (%s)\n", dev_addr, errno, strerror(errno));
765 close(vfio_group_fd);
766 rte_vfio_clear_group(vfio_group_fd);
771 * pick an IOMMU type and set up DMA mappings for container
773 * needs to be done only once, only when first group is
774 * assigned to a container and only in primary process.
775 * Note this can happen several times with the hotplug
778 if (internal_conf->process_type == RTE_PROC_PRIMARY &&
779 vfio_cfg->vfio_active_groups == 1 &&
780 vfio_group_device_count(vfio_group_fd) == 0) {
781 const struct vfio_iommu_type *t;
783 /* select an IOMMU type which we will be using */
784 t = vfio_set_iommu_type(vfio_container_fd);
787 " %s failed to select IOMMU type\n",
789 close(vfio_group_fd);
790 rte_vfio_clear_group(vfio_group_fd);
793 /* lock memory hotplug before mapping and release it
794 * after registering callback, to prevent races
796 rte_mcfg_mem_read_lock();
797 if (vfio_cfg == default_vfio_cfg)
798 ret = t->dma_map_func(vfio_container_fd);
803 " %s DMA remapping failed, error %i (%s)\n",
804 dev_addr, errno, strerror(errno));
805 close(vfio_group_fd);
806 rte_vfio_clear_group(vfio_group_fd);
807 rte_mcfg_mem_read_unlock();
811 vfio_cfg->vfio_iommu_type = t;
813 /* re-map all user-mapped segments */
814 rte_spinlock_recursive_lock(&user_mem_maps->lock);
816 /* this IOMMU type may not support DMA mapping, but
817 * if we have mappings in the list - that means we have
818 * previously mapped something successfully, so we can
819 * be sure that DMA mapping is supported.
821 for (i = 0; i < user_mem_maps->n_maps; i++) {
822 struct user_mem_map *map;
823 map = &user_mem_maps->maps[i];
825 ret = t->dma_user_map_func(
827 map->addr, map->iova, map->len,
830 RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
832 "iova: 0x%" PRIx64 " "
833 "len: 0x%" PRIu64 "\n",
834 map->addr, map->iova,
836 rte_spinlock_recursive_unlock(
837 &user_mem_maps->lock);
838 rte_mcfg_mem_read_unlock();
842 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
844 /* register callback for mem events */
845 if (vfio_cfg == default_vfio_cfg)
846 ret = rte_mem_event_callback_register(
847 VFIO_MEM_EVENT_CLB_NAME,
848 vfio_mem_event_callback, NULL);
851 /* unlock memory hotplug */
852 rte_mcfg_mem_read_unlock();
854 if (ret && rte_errno != ENOTSUP) {
855 RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
859 RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
861 RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
863 } else if (rte_eal_process_type() != RTE_PROC_PRIMARY &&
864 vfio_cfg == default_vfio_cfg &&
865 vfio_cfg->vfio_iommu_type == NULL) {
866 /* if we're not a primary process, we do not set up the VFIO
867 * container because it's already been set up by the primary
868 * process. instead, we simply ask the primary about VFIO type
869 * we are using, and set the VFIO config up appropriately.
871 ret = vfio_sync_default_container();
873 RTE_LOG(ERR, EAL, "Could not sync default VFIO container\n");
874 close(vfio_group_fd);
875 rte_vfio_clear_group(vfio_group_fd);
878 /* we have successfully initialized VFIO, notify user */
879 const struct vfio_iommu_type *t =
880 default_vfio_cfg->vfio_iommu_type;
881 RTE_LOG(INFO, EAL, " using IOMMU type %d (%s)\n",
882 t->type_id, t->name);
885 rte_eal_vfio_get_vf_token(vf_token);
887 /* get a file descriptor for the device with VF token firstly */
888 if (!rte_uuid_is_null(vf_token)) {
889 char vf_token_str[RTE_UUID_STRLEN];
892 rte_uuid_unparse(vf_token, vf_token_str, sizeof(vf_token_str));
893 snprintf(dev, sizeof(dev),
894 "%s vf_token=%s", dev_addr, vf_token_str);
896 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD,
898 if (*vfio_dev_fd >= 0)
902 /* get a file descriptor for the device */
903 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
904 if (*vfio_dev_fd < 0) {
905 /* if we cannot get a device fd, this implies a problem with
906 * the VFIO group or the container not having IOMMU configured.
909 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
911 close(vfio_group_fd);
912 rte_vfio_clear_group(vfio_group_fd);
916 /* test and setup the device */
918 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
920 RTE_LOG(ERR, EAL, " %s cannot get device info, "
921 "error %i (%s)\n", dev_addr, errno,
924 close(vfio_group_fd);
925 rte_vfio_clear_group(vfio_group_fd);
928 vfio_group_device_get(vfio_group_fd);
934 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
937 struct vfio_config *vfio_cfg;
942 /* we don't want any DMA mapping messages to come while we're detaching
943 * VFIO device, because this might be the last device and we might need
944 * to unregister the callback.
946 rte_mcfg_mem_read_lock();
948 /* get group number */
949 ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
951 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
953 /* This is an error at this point. */
958 /* get the actual group fd */
959 vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
960 if (vfio_group_fd < 0) {
961 RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
967 /* get the vfio_config it belongs to */
968 vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
969 vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
971 /* At this point we got an active group. Closing it will make the
972 * container detachment. If this is the last active group, VFIO kernel
973 * code will unset the container and the IOMMU mappings.
976 /* Closing a device */
977 if (close(vfio_dev_fd) < 0) {
978 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
984 /* An VFIO group can have several devices attached. Just when there is
985 * no devices remaining should the group be closed.
987 vfio_group_device_put(vfio_group_fd);
988 if (!vfio_group_device_count(vfio_group_fd)) {
990 if (close(vfio_group_fd) < 0) {
991 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
997 if (rte_vfio_clear_group(vfio_group_fd) < 0) {
998 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
1005 /* if there are no active device groups, unregister the callback to
1006 * avoid spurious attempts to map/unmap memory from VFIO.
1008 if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0 &&
1009 rte_eal_process_type() != RTE_PROC_SECONDARY)
1010 rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
1017 rte_mcfg_mem_read_unlock();
1022 rte_vfio_enable(const char *modname)
1024 /* initialize group list */
1027 const struct internal_config *internal_conf =
1028 eal_get_internal_configuration();
1030 rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
1032 for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
1033 vfio_cfgs[i].vfio_container_fd = -1;
1034 vfio_cfgs[i].vfio_active_groups = 0;
1035 vfio_cfgs[i].vfio_iommu_type = NULL;
1036 vfio_cfgs[i].mem_maps.lock = lock;
1038 for (j = 0; j < VFIO_MAX_GROUPS; j++) {
1039 vfio_cfgs[i].vfio_groups[j].fd = -1;
1040 vfio_cfgs[i].vfio_groups[j].group_num = -1;
1041 vfio_cfgs[i].vfio_groups[j].devices = 0;
1045 /* inform the user that we are probing for VFIO */
1046 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
1048 /* check if vfio module is loaded */
1049 vfio_available = rte_eal_check_module(modname);
1051 /* return error directly */
1052 if (vfio_available == -1) {
1053 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
1057 /* return 0 if VFIO modules not loaded */
1058 if (vfio_available == 0) {
1059 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
1060 "skipping VFIO support...\n");
1064 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1065 /* open a new container */
1066 default_vfio_cfg->vfio_container_fd =
1067 rte_vfio_get_container_fd();
1069 /* get the default container from the primary process */
1070 default_vfio_cfg->vfio_container_fd =
1071 vfio_get_default_container_fd();
1074 /* check if we have VFIO driver enabled */
1075 if (default_vfio_cfg->vfio_container_fd != -1) {
1076 RTE_LOG(INFO, EAL, "VFIO support initialized\n");
1077 default_vfio_cfg->vfio_enabled = 1;
1079 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
1086 rte_vfio_is_enabled(const char *modname)
1088 const int mod_available = rte_eal_check_module(modname) > 0;
1089 return default_vfio_cfg->vfio_enabled && mod_available;
1093 vfio_get_default_container_fd(void)
1095 struct rte_mp_msg mp_req, *mp_rep;
1096 struct rte_mp_reply mp_reply = {0};
1097 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1098 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1100 const struct internal_config *internal_conf =
1101 eal_get_internal_configuration();
1103 if (default_vfio_cfg->vfio_enabled)
1104 return default_vfio_cfg->vfio_container_fd;
1106 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1107 /* if we were secondary process we would try requesting
1108 * container fd from the primary, but we're the primary
1109 * process so just exit here
1114 p->req = SOCKET_REQ_DEFAULT_CONTAINER;
1115 strcpy(mp_req.name, EAL_VFIO_MP);
1116 mp_req.len_param = sizeof(*p);
1119 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1120 mp_reply.nb_received == 1) {
1121 mp_rep = &mp_reply.msgs[0];
1122 p = (struct vfio_mp_param *)mp_rep->param;
1123 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1124 container_fd = mp_rep->fds[0];
1125 free(mp_reply.msgs);
1126 return container_fd;
1130 free(mp_reply.msgs);
1131 RTE_LOG(ERR, EAL, " cannot request default container fd\n");
1136 vfio_get_iommu_type(void)
1138 if (default_vfio_cfg->vfio_iommu_type == NULL)
1141 return default_vfio_cfg->vfio_iommu_type->type_id;
1144 const struct vfio_iommu_type *
1145 vfio_set_iommu_type(int vfio_container_fd)
1148 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1149 const struct vfio_iommu_type *t = &iommu_types[idx];
1151 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
1154 RTE_LOG(INFO, EAL, " using IOMMU type %d (%s)\n",
1155 t->type_id, t->name);
1158 /* not an error, there may be more supported IOMMU types */
1159 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
1160 "error %i (%s)\n", t->type_id, t->name, errno,
1163 /* if we didn't find a suitable IOMMU type, fail */
1168 vfio_has_supported_extensions(int vfio_container_fd)
1171 unsigned idx, n_extensions = 0;
1172 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
1173 const struct vfio_iommu_type *t = &iommu_types[idx];
1175 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
1178 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
1179 "error %i (%s)\n", errno,
1181 close(vfio_container_fd);
1183 } else if (ret == 1) {
1184 /* we found a supported extension */
1187 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
1188 t->type_id, t->name,
1189 ret ? "supported" : "not supported");
1192 /* if we didn't find any supported IOMMU types, fail */
1193 if (!n_extensions) {
1194 close(vfio_container_fd);
1202 rte_vfio_get_container_fd(void)
1204 int ret, vfio_container_fd;
1205 struct rte_mp_msg mp_req, *mp_rep;
1206 struct rte_mp_reply mp_reply = {0};
1207 struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
1208 struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
1209 const struct internal_config *internal_conf =
1210 eal_get_internal_configuration();
1213 /* if we're in a primary process, try to open the container */
1214 if (internal_conf->process_type == RTE_PROC_PRIMARY) {
1215 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
1216 if (vfio_container_fd < 0) {
1217 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
1218 "error %i (%s)\n", errno, strerror(errno));
1222 /* check VFIO API version */
1223 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
1224 if (ret != VFIO_API_VERSION) {
1226 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
1227 "error %i (%s)\n", errno, strerror(errno));
1229 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
1230 close(vfio_container_fd);
1234 ret = vfio_has_supported_extensions(vfio_container_fd);
1236 RTE_LOG(ERR, EAL, " no supported IOMMU "
1237 "extensions found!\n");
1241 return vfio_container_fd;
1244 * if we're in a secondary process, request container fd from the
1245 * primary process via mp channel
1247 p->req = SOCKET_REQ_CONTAINER;
1248 strcpy(mp_req.name, EAL_VFIO_MP);
1249 mp_req.len_param = sizeof(*p);
1252 vfio_container_fd = -1;
1253 if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
1254 mp_reply.nb_received == 1) {
1255 mp_rep = &mp_reply.msgs[0];
1256 p = (struct vfio_mp_param *)mp_rep->param;
1257 if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
1258 vfio_container_fd = mp_rep->fds[0];
1259 free(mp_reply.msgs);
1260 return vfio_container_fd;
1264 free(mp_reply.msgs);
1265 RTE_LOG(ERR, EAL, " cannot request container fd\n");
1270 rte_vfio_get_group_num(const char *sysfs_base,
1271 const char *dev_addr, int *iommu_group_num)
1273 char linkname[PATH_MAX];
1274 char filename[PATH_MAX];
1275 char *tok[16], *group_tok, *end;
1278 memset(linkname, 0, sizeof(linkname));
1279 memset(filename, 0, sizeof(filename));
1281 /* try to find out IOMMU group for this device */
1282 snprintf(linkname, sizeof(linkname),
1283 "%s/%s/iommu_group", sysfs_base, dev_addr);
1285 ret = readlink(linkname, filename, sizeof(filename));
1287 /* if the link doesn't exist, no VFIO for us */
1291 ret = rte_strsplit(filename, sizeof(filename),
1292 tok, RTE_DIM(tok), '/');
1295 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
1299 /* IOMMU group is always the last token */
1301 group_tok = tok[ret - 1];
1303 *iommu_group_num = strtol(group_tok, &end, 10);
1304 if ((end != group_tok && *end != '\0') || errno != 0) {
1305 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
1313 type1_map_contig(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1314 size_t len, void *arg)
1316 int *vfio_container_fd = arg;
1321 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1326 type1_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
1329 int *vfio_container_fd = arg;
1331 /* skip external memory that isn't a heap */
1332 if (msl->external && !msl->heap)
1335 /* skip any segments with invalid IOVA addresses */
1336 if (ms->iova == RTE_BAD_IOVA)
1339 /* if IOVA mode is VA, we've already mapped the internal segments */
1340 if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
1343 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1348 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1349 uint64_t len, int do_map)
1351 struct vfio_iommu_type1_dma_map dma_map;
1352 struct vfio_iommu_type1_dma_unmap dma_unmap;
1356 memset(&dma_map, 0, sizeof(dma_map));
1357 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1358 dma_map.vaddr = vaddr;
1360 dma_map.iova = iova;
1361 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1362 VFIO_DMA_MAP_FLAG_WRITE;
1364 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1367 * In case the mapping was already done EEXIST will be
1368 * returned from kernel.
1370 if (errno == EEXIST) {
1372 " Memory segment is already mapped,"
1376 " cannot set up DMA remapping,"
1378 errno, strerror(errno));
1383 memset(&dma_unmap, 0, sizeof(dma_unmap));
1384 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1385 dma_unmap.size = len;
1386 dma_unmap.iova = iova;
1388 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1391 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1392 errno, strerror(errno));
1401 vfio_type1_dma_map(int vfio_container_fd)
1403 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
1404 /* with IOVA as VA mode, we can get away with mapping contiguous
1405 * chunks rather than going page-by-page.
1407 int ret = rte_memseg_contig_walk(type1_map_contig,
1408 &vfio_container_fd);
1411 /* we have to continue the walk because we've skipped the
1412 * external segments during the config walk.
1415 return rte_memseg_walk(type1_map, &vfio_container_fd);
1418 /* Track the size of the statically allocated DMA window for SPAPR */
1419 uint64_t spapr_dma_win_len;
1420 uint64_t spapr_dma_win_page_sz;
1423 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1424 uint64_t len, int do_map)
1426 struct vfio_iommu_spapr_register_memory reg = {
1427 .argsz = sizeof(reg),
1428 .vaddr = (uintptr_t) vaddr,
1435 struct vfio_iommu_type1_dma_map dma_map;
1437 if (iova + len > spapr_dma_win_len) {
1438 RTE_LOG(ERR, EAL, " dma map attempt outside DMA window\n");
1442 ret = ioctl(vfio_container_fd,
1443 VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®);
1445 RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, "
1446 "error %i (%s)\n", errno, strerror(errno));
1450 memset(&dma_map, 0, sizeof(dma_map));
1451 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
1452 dma_map.vaddr = vaddr;
1454 dma_map.iova = iova;
1455 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
1456 VFIO_DMA_MAP_FLAG_WRITE;
1458 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
1460 RTE_LOG(ERR, EAL, " cannot map vaddr for IOMMU, error %i (%s)\n",
1461 errno, strerror(errno));
1466 struct vfio_iommu_type1_dma_map dma_unmap;
1468 memset(&dma_unmap, 0, sizeof(dma_unmap));
1469 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1470 dma_unmap.size = len;
1471 dma_unmap.iova = iova;
1473 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1476 RTE_LOG(ERR, EAL, " cannot unmap vaddr for IOMMU, error %i (%s)\n",
1477 errno, strerror(errno));
1481 ret = ioctl(vfio_container_fd,
1482 VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®);
1484 RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
1485 errno, strerror(errno));
1494 vfio_spapr_map_walk(const struct rte_memseg_list *msl,
1495 const struct rte_memseg *ms, void *arg)
1497 int *vfio_container_fd = arg;
1499 /* skip external memory that isn't a heap */
1500 if (msl->external && !msl->heap)
1503 /* skip any segments with invalid IOVA addresses */
1504 if (ms->iova == RTE_BAD_IOVA)
1507 return vfio_spapr_dma_do_map(*vfio_container_fd,
1508 ms->addr_64, ms->iova, ms->len, 1);
1511 struct spapr_size_walk_param {
1514 bool is_user_managed;
1518 * In order to set the DMA window size required for the SPAPR IOMMU
1519 * we need to walk the existing virtual memory allocations as well as
1520 * find the hugepage size used.
1523 vfio_spapr_size_walk(const struct rte_memseg_list *msl, void *arg)
1525 struct spapr_size_walk_param *param = arg;
1526 uint64_t max = (uint64_t) msl->base_va + (uint64_t) msl->len;
1528 if (msl->external && !msl->heap) {
1529 /* ignore user managed external memory */
1530 param->is_user_managed = true;
1534 if (max > param->max_va) {
1535 param->page_sz = msl->page_sz;
1536 param->max_va = max;
1543 * Find the highest memory address used in physical or virtual address
1544 * space and use that as the top of the DMA window.
1547 find_highest_mem_addr(struct spapr_size_walk_param *param)
1549 /* find the maximum IOVA address for setting the DMA window size */
1550 if (rte_eal_iova_mode() == RTE_IOVA_PA) {
1551 static const char proc_iomem[] = "/proc/iomem";
1552 static const char str_sysram[] = "System RAM";
1553 uint64_t start, end, max = 0;
1559 * Example "System RAM" in /proc/iomem:
1560 * 00000000-1fffffffff : System RAM
1561 * 200000000000-201fffffffff : System RAM
1563 FILE *fd = fopen(proc_iomem, "r");
1565 RTE_LOG(ERR, EAL, "Cannot open %s\n", proc_iomem);
1568 /* Scan /proc/iomem for the highest PA in the system */
1569 while (getline(&line, &line_len, fd) != -1) {
1570 if (strstr(line, str_sysram) == NULL)
1573 space = strstr(line, " ");
1574 dash = strstr(line, "-");
1576 /* Validate the format of the memory string */
1577 if (space == NULL || dash == NULL || space < dash) {
1578 RTE_LOG(ERR, EAL, "Can't parse line \"%s\" in file %s\n",
1583 start = strtoull(line, NULL, 16);
1584 end = strtoull(dash + 1, NULL, 16);
1585 RTE_LOG(DEBUG, EAL, "Found system RAM from 0x%" PRIx64
1586 " to 0x%" PRIx64 "\n", start, end);
1594 RTE_LOG(ERR, EAL, "Failed to find valid \"System RAM\" "
1595 "entry in file %s\n", proc_iomem);
1599 spapr_dma_win_len = rte_align64pow2(max + 1);
1601 } else if (rte_eal_iova_mode() == RTE_IOVA_VA) {
1602 RTE_LOG(DEBUG, EAL, "Highest VA address in memseg list is 0x%"
1603 PRIx64 "\n", param->max_va);
1604 spapr_dma_win_len = rte_align64pow2(param->max_va);
1608 spapr_dma_win_len = 0;
1609 RTE_LOG(ERR, EAL, "Unsupported IOVA mode\n");
1615 * The SPAPRv2 IOMMU supports 2 DMA windows with starting
1616 * address at 0 or 1<<59. By default, a DMA window is set
1617 * at address 0, 2GB long, with a 4KB page. For DPDK we
1618 * must remove the default window and setup a new DMA window
1619 * based on the hugepage size and memory requirements of
1620 * the application before we can map memory for DMA.
1623 spapr_dma_win_size(void)
1625 struct spapr_size_walk_param param;
1627 /* only create DMA window once */
1628 if (spapr_dma_win_len > 0)
1631 /* walk the memseg list to find the page size/max VA address */
1632 memset(¶m, 0, sizeof(param));
1633 if (rte_memseg_list_walk(vfio_spapr_size_walk, ¶m) < 0) {
1634 RTE_LOG(ERR, EAL, "Failed to walk memseg list for DMA window size\n");
1638 /* we can't be sure if DMA window covers external memory */
1639 if (param.is_user_managed)
1640 RTE_LOG(WARNING, EAL, "Detected user managed external memory which may not be managed by the IOMMU\n");
1642 /* check physical/virtual memory size */
1643 if (find_highest_mem_addr(¶m) < 0)
1645 RTE_LOG(DEBUG, EAL, "Setting DMA window size to 0x%" PRIx64 "\n",
1647 spapr_dma_win_page_sz = param.page_sz;
1648 rte_mem_set_dma_mask(__builtin_ctzll(spapr_dma_win_len));
1653 vfio_spapr_create_dma_window(int vfio_container_fd)
1655 struct vfio_iommu_spapr_tce_create create = {
1656 .argsz = sizeof(create), };
1657 struct vfio_iommu_spapr_tce_remove remove = {
1658 .argsz = sizeof(remove), };
1659 struct vfio_iommu_spapr_tce_info info = {
1660 .argsz = sizeof(info), };
1663 ret = spapr_dma_win_size();
1667 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1669 RTE_LOG(ERR, EAL, " can't get iommu info, error %i (%s)\n",
1670 errno, strerror(errno));
1675 * sPAPR v1/v2 IOMMU always has a default 1G DMA window set. The window
1676 * can't be changed for v1 but it can be changed for v2. Since DPDK only
1677 * supports v2, remove the default DMA window so it can be resized.
1679 remove.start_addr = info.dma32_window_start;
1680 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1684 /* create a new DMA window (start address is not selectable) */
1685 create.window_size = spapr_dma_win_len;
1686 create.page_shift = __builtin_ctzll(spapr_dma_win_page_sz);
1688 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
1689 #ifdef VFIO_IOMMU_SPAPR_INFO_DDW
1691 * The vfio_iommu_spapr_tce_info structure was modified in
1692 * Linux kernel 4.2.0 to add support for the
1693 * vfio_iommu_spapr_tce_ddw_info structure needed to try
1694 * multiple table levels. Skip the attempt if running with
1698 /* if at first we don't succeed, try more levels */
1701 for (levels = create.levels + 1;
1702 ret && levels <= info.ddw.levels; levels++) {
1703 create.levels = levels;
1704 ret = ioctl(vfio_container_fd,
1705 VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
1708 #endif /* VFIO_IOMMU_SPAPR_INFO_DDW */
1710 RTE_LOG(ERR, EAL, " cannot create new DMA window, error %i (%s)\n",
1711 errno, strerror(errno));
1712 RTE_LOG(ERR, EAL, " consider using a larger hugepage size "
1713 "if supported by the system\n");
1717 /* verify the start address */
1718 if (create.start_addr != 0) {
1719 RTE_LOG(ERR, EAL, " received unsupported start address 0x%"
1720 PRIx64 "\n", (uint64_t)create.start_addr);
1727 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr,
1728 uint64_t iova, uint64_t len, int do_map)
1733 if (vfio_spapr_dma_do_map(vfio_container_fd,
1734 vaddr, iova, len, 1)) {
1735 RTE_LOG(ERR, EAL, "Failed to map DMA\n");
1739 if (vfio_spapr_dma_do_map(vfio_container_fd,
1740 vaddr, iova, len, 0)) {
1741 RTE_LOG(ERR, EAL, "Failed to unmap DMA\n");
1750 vfio_spapr_dma_map(int vfio_container_fd)
1752 if (vfio_spapr_create_dma_window(vfio_container_fd) < 0) {
1753 RTE_LOG(ERR, EAL, "Could not create new DMA window!\n");
1757 /* map all existing DPDK segments for DMA */
1758 if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1765 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1767 /* No-IOMMU mode does not need DMA mapping */
1772 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1773 uint64_t __rte_unused vaddr,
1774 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1775 int __rte_unused do_map)
1777 /* No-IOMMU mode does not need DMA mapping */
1782 vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1783 uint64_t len, int do_map)
1785 const struct vfio_iommu_type *t = vfio_cfg->vfio_iommu_type;
1788 RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
1793 if (!t->dma_user_map_func) {
1795 " VFIO custom DMA region maping not supported by IOMMU %s\n",
1797 rte_errno = ENOTSUP;
1801 return t->dma_user_map_func(vfio_cfg->vfio_container_fd, vaddr, iova,
1806 container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1809 struct user_mem_map *new_map;
1810 struct user_mem_maps *user_mem_maps;
1813 user_mem_maps = &vfio_cfg->mem_maps;
1814 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1815 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1816 RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1822 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
1823 /* technically, this will fail if there are currently no devices
1824 * plugged in, even if a device were added later, this mapping
1825 * might have succeeded. however, since we cannot verify if this
1826 * is a valid mapping without having a device attached, consider
1827 * this to be unsupported, because we can't just store any old
1828 * mapping and pollute list of active mappings willy-nilly.
1830 RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1834 /* create new user mem map entry */
1835 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1836 new_map->addr = vaddr;
1837 new_map->iova = iova;
1840 compact_user_maps(user_mem_maps);
1842 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1847 container_dma_unmap(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
1850 struct user_mem_map *map, *new_map = NULL;
1851 struct user_mem_maps *user_mem_maps;
1854 user_mem_maps = &vfio_cfg->mem_maps;
1855 rte_spinlock_recursive_lock(&user_mem_maps->lock);
1857 /* find our mapping */
1858 map = find_user_mem_map(user_mem_maps, vaddr, iova, len);
1860 RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1865 if (map->addr != vaddr || map->iova != iova || map->len != len) {
1866 /* we're partially unmapping a previously mapped region, so we
1867 * need to split entry into two.
1869 if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
1870 RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1875 new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
1878 /* unmap the entry */
1879 if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 0)) {
1880 /* there may not be any devices plugged in, so unmapping will
1881 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1882 * stop us from removing the mapping, as the assumption is we
1883 * won't be needing this memory any more and thus will want to
1884 * prevent it from being remapped again on hotplug. so, only
1885 * fail if we indeed failed to unmap (e.g. if the mapping was
1886 * within our mapped range but had invalid alignment).
1888 if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1889 RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1893 RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1896 /* remove map from the list of active mappings */
1897 if (new_map != NULL) {
1898 adjust_map(map, new_map, vaddr, len);
1900 /* if we've created a new map by splitting, sort everything */
1901 if (!is_null_map(new_map)) {
1902 compact_user_maps(user_mem_maps);
1904 /* we've created a new mapping, but it was unused */
1905 user_mem_maps->n_maps--;
1908 memset(map, 0, sizeof(*map));
1909 compact_user_maps(user_mem_maps);
1910 user_mem_maps->n_maps--;
1914 rte_spinlock_recursive_unlock(&user_mem_maps->lock);
1919 rte_vfio_noiommu_is_enabled(void)
1925 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1927 if (errno != ENOENT) {
1928 RTE_LOG(ERR, EAL, " cannot open vfio noiommu file %i (%s)\n",
1929 errno, strerror(errno));
1933 * else the file does not exists
1934 * i.e. noiommu is not enabled
1939 cnt = read(fd, &c, 1);
1942 RTE_LOG(ERR, EAL, " unable to read from vfio noiommu "
1943 "file %i (%s)\n", errno, strerror(errno));
1951 rte_vfio_container_create(void)
1955 /* Find an empty slot to store new vfio config */
1956 for (i = 1; i < VFIO_MAX_CONTAINERS; i++) {
1957 if (vfio_cfgs[i].vfio_container_fd == -1)
1961 if (i == VFIO_MAX_CONTAINERS) {
1962 RTE_LOG(ERR, EAL, "exceed max vfio container limit\n");
1966 vfio_cfgs[i].vfio_container_fd = rte_vfio_get_container_fd();
1967 if (vfio_cfgs[i].vfio_container_fd < 0) {
1968 RTE_LOG(NOTICE, EAL, "fail to create a new container\n");
1972 return vfio_cfgs[i].vfio_container_fd;
1976 rte_vfio_container_destroy(int container_fd)
1978 struct vfio_config *vfio_cfg;
1981 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
1982 if (vfio_cfg == NULL) {
1983 RTE_LOG(ERR, EAL, "Invalid container fd\n");
1987 for (i = 0; i < VFIO_MAX_GROUPS; i++)
1988 if (vfio_cfg->vfio_groups[i].group_num != -1)
1989 rte_vfio_container_group_unbind(container_fd,
1990 vfio_cfg->vfio_groups[i].group_num);
1992 close(container_fd);
1993 vfio_cfg->vfio_container_fd = -1;
1994 vfio_cfg->vfio_active_groups = 0;
1995 vfio_cfg->vfio_iommu_type = NULL;
2001 rte_vfio_container_group_bind(int container_fd, int iommu_group_num)
2003 struct vfio_config *vfio_cfg;
2005 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2006 if (vfio_cfg == NULL) {
2007 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2011 return vfio_get_group_fd(vfio_cfg, iommu_group_num);
2015 rte_vfio_container_group_unbind(int container_fd, int iommu_group_num)
2017 struct vfio_config *vfio_cfg;
2018 struct vfio_group *cur_grp = NULL;
2021 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2022 if (vfio_cfg == NULL) {
2023 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2027 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
2028 if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num) {
2029 cur_grp = &vfio_cfg->vfio_groups[i];
2034 /* This should not happen */
2035 if (i == VFIO_MAX_GROUPS || cur_grp == NULL) {
2036 RTE_LOG(ERR, EAL, "Specified group number not found\n");
2040 if (cur_grp->fd >= 0 && close(cur_grp->fd) < 0) {
2041 RTE_LOG(ERR, EAL, "Error when closing vfio_group_fd for"
2042 " iommu_group_num %d\n", iommu_group_num);
2045 cur_grp->group_num = -1;
2047 cur_grp->devices = 0;
2048 vfio_cfg->vfio_active_groups--;
2054 rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, uint64_t iova,
2057 struct vfio_config *vfio_cfg;
2064 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2065 if (vfio_cfg == NULL) {
2066 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2070 return container_dma_map(vfio_cfg, vaddr, iova, len);
2074 rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, uint64_t iova,
2077 struct vfio_config *vfio_cfg;
2084 vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
2085 if (vfio_cfg == NULL) {
2086 RTE_LOG(ERR, EAL, "Invalid container fd\n");
2090 return container_dma_unmap(vfio_cfg, vaddr, iova, len);
2096 rte_vfio_setup_device(__rte_unused const char *sysfs_base,
2097 __rte_unused const char *dev_addr,
2098 __rte_unused int *vfio_dev_fd,
2099 __rte_unused struct vfio_device_info *device_info)
2105 rte_vfio_release_device(__rte_unused const char *sysfs_base,
2106 __rte_unused const char *dev_addr, __rte_unused int fd)
2112 rte_vfio_enable(__rte_unused const char *modname)
2118 rte_vfio_is_enabled(__rte_unused const char *modname)
2124 rte_vfio_noiommu_is_enabled(void)
2130 rte_vfio_clear_group(__rte_unused int vfio_group_fd)
2136 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
2137 __rte_unused const char *dev_addr,
2138 __rte_unused int *iommu_group_num)
2144 rte_vfio_get_container_fd(void)
2150 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
2156 rte_vfio_container_create(void)
2162 rte_vfio_container_destroy(__rte_unused int container_fd)
2168 rte_vfio_container_group_bind(__rte_unused int container_fd,
2169 __rte_unused int iommu_group_num)
2175 rte_vfio_container_group_unbind(__rte_unused int container_fd,
2176 __rte_unused int iommu_group_num)
2182 rte_vfio_container_dma_map(__rte_unused int container_fd,
2183 __rte_unused uint64_t vaddr,
2184 __rte_unused uint64_t iova,
2185 __rte_unused uint64_t len)
2191 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
2192 __rte_unused uint64_t vaddr,
2193 __rte_unused uint64_t iova,
2194 __rte_unused uint64_t len)
2199 #endif /* VFIO_PRESENT */