1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_errno.h>
13 #include <rte_memory.h>
14 #include <rte_eal_memconfig.h>
17 #include "eal_filesystem.h"
19 #include "eal_private.h"
23 /* per-process VFIO config */
24 static struct vfio_config vfio_cfg;
26 static int vfio_type1_dma_map(int);
27 static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
28 static int vfio_spapr_dma_map(int);
29 static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
30 static int vfio_noiommu_dma_map(int);
31 static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
32 static int vfio_dma_mem_map(uint64_t vaddr, uint64_t iova, uint64_t len,
35 /* IOMMU types we support */
36 static const struct vfio_iommu_type iommu_types[] = {
37 /* x86 IOMMU, otherwise known as type 1 */
39 .type_id = RTE_VFIO_TYPE1,
41 .dma_map_func = &vfio_type1_dma_map,
42 .dma_user_map_func = &vfio_type1_dma_mem_map
44 /* ppc64 IOMMU, otherwise known as spapr */
46 .type_id = RTE_VFIO_SPAPR,
48 .dma_map_func = &vfio_spapr_dma_map,
49 .dma_user_map_func = &vfio_spapr_dma_mem_map
53 .type_id = RTE_VFIO_NOIOMMU,
55 .dma_map_func = &vfio_noiommu_dma_map,
56 .dma_user_map_func = &vfio_noiommu_dma_mem_map
60 /* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
61 * recreate the mappings for DPDK segments, but we cannot do so for memory that
62 * was registered by the user themselves, so we need to store the user mappings
63 * somewhere, to recreate them later.
65 #define VFIO_MAX_USER_MEM_MAPS 256
74 struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
76 .lock = RTE_SPINLOCK_INITIALIZER
80 is_null_map(const struct user_mem_map *map)
82 return map->addr == 0 && map->iova == 0 && map->len == 0;
85 /* we may need to merge user mem maps together in case of user mapping/unmapping
86 * chunks of memory, so we'll need a comparator function to sort segments.
89 user_mem_map_cmp(const void *a, const void *b)
91 const struct user_mem_map *umm_a = a;
92 const struct user_mem_map *umm_b = b;
94 /* move null entries to end */
95 if (is_null_map(umm_a))
97 if (is_null_map(umm_b))
100 /* sort by iova first */
101 if (umm_a->iova < umm_b->iova)
103 if (umm_a->iova > umm_b->iova)
106 if (umm_a->addr < umm_b->addr)
108 if (umm_a->addr > umm_b->addr)
111 if (umm_a->len < umm_b->len)
113 if (umm_a->len > umm_b->len)
119 /* adjust user map entry. this may result in shortening of existing map, or in
120 * splitting existing map in two pieces.
123 adjust_map(struct user_mem_map *src, struct user_mem_map *end,
124 uint64_t remove_va_start, uint64_t remove_len)
126 /* if va start is same as start address, we're simply moving start */
127 if (remove_va_start == src->addr) {
128 src->addr += remove_len;
129 src->iova += remove_len;
130 src->len -= remove_len;
131 } else if (remove_va_start + remove_len == src->addr + src->len) {
132 /* we're shrinking mapping from the end */
133 src->len -= remove_len;
135 /* we're blowing a hole in the middle */
136 struct user_mem_map tmp;
137 uint64_t total_len = src->len;
139 /* adjust source segment length */
140 src->len = remove_va_start - src->addr;
142 /* create temporary segment in the middle */
143 tmp.addr = src->addr + src->len;
144 tmp.iova = src->iova + src->len;
145 tmp.len = remove_len;
147 /* populate end segment - this one we will be keeping */
148 end->addr = tmp.addr + tmp.len;
149 end->iova = tmp.iova + tmp.len;
150 end->len = total_len - src->len - tmp.len;
154 /* try merging two maps into one, return 1 if succeeded */
156 merge_map(struct user_mem_map *left, struct user_mem_map *right)
158 if (left->addr + left->len != right->addr)
160 if (left->iova + left->len != right->iova)
163 left->len += right->len;
165 memset(right, 0, sizeof(*right));
170 static struct user_mem_map *
171 find_user_mem_map(uint64_t addr, uint64_t iova, uint64_t len)
173 uint64_t va_end = addr + len;
174 uint64_t iova_end = iova + len;
177 for (i = 0; i < user_mem_maps.n_maps; i++) {
178 struct user_mem_map *map = &user_mem_maps.maps[i];
179 uint64_t map_va_end = map->addr + map->len;
180 uint64_t map_iova_end = map->iova + map->len;
183 if (addr < map->addr || addr >= map_va_end)
185 /* check if IOVA end is within boundaries */
186 if (va_end <= map->addr || va_end >= map_va_end)
190 if (iova < map->iova || iova >= map_iova_end)
192 /* check if IOVA end is within boundaries */
193 if (iova_end <= map->iova || iova_end >= map_iova_end)
196 /* we've found our map */
202 /* this will sort all user maps, and merge/compact any adjacent maps */
204 compact_user_maps(void)
206 int i, n_merged, cur_idx;
208 qsort(user_mem_maps.maps, user_mem_maps.n_maps,
209 sizeof(user_mem_maps.maps[0]), user_mem_map_cmp);
211 /* we'll go over the list backwards when merging */
213 for (i = user_mem_maps.n_maps - 2; i >= 0; i--) {
214 struct user_mem_map *l, *r;
216 l = &user_mem_maps.maps[i];
217 r = &user_mem_maps.maps[i + 1];
219 if (is_null_map(l) || is_null_map(r))
226 /* the entries are still sorted, but now they have holes in them, so
227 * walk through the list and remove the holes
231 for (i = 0; i < user_mem_maps.n_maps; i++) {
232 if (!is_null_map(&user_mem_maps.maps[i])) {
233 struct user_mem_map *src, *dst;
235 src = &user_mem_maps.maps[i];
236 dst = &user_mem_maps.maps[cur_idx++];
239 memcpy(dst, src, sizeof(*src));
240 memset(src, 0, sizeof(*src));
244 user_mem_maps.n_maps = cur_idx;
249 vfio_get_group_fd(int iommu_group_no)
253 char filename[PATH_MAX];
254 struct vfio_group *cur_grp;
256 /* check if we already have the group descriptor open */
257 for (i = 0; i < VFIO_MAX_GROUPS; i++)
258 if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
259 return vfio_cfg.vfio_groups[i].fd;
261 /* Lets see first if there is room for a new group */
262 if (vfio_cfg.vfio_active_groups == VFIO_MAX_GROUPS) {
263 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
267 /* Now lets get an index for the new group */
268 for (i = 0; i < VFIO_MAX_GROUPS; i++)
269 if (vfio_cfg.vfio_groups[i].group_no == -1) {
270 cur_grp = &vfio_cfg.vfio_groups[i];
274 /* This should not happen */
275 if (i == VFIO_MAX_GROUPS) {
276 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
279 /* if primary, try to open the group */
280 if (internal_config.process_type == RTE_PROC_PRIMARY) {
281 /* try regular group format */
282 snprintf(filename, sizeof(filename),
283 VFIO_GROUP_FMT, iommu_group_no);
284 vfio_group_fd = open(filename, O_RDWR);
285 if (vfio_group_fd < 0) {
286 /* if file not found, it's not an error */
287 if (errno != ENOENT) {
288 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
293 /* special case: try no-IOMMU path as well */
294 snprintf(filename, sizeof(filename),
295 VFIO_NOIOMMU_GROUP_FMT, iommu_group_no);
296 vfio_group_fd = open(filename, O_RDWR);
297 if (vfio_group_fd < 0) {
298 if (errno != ENOENT) {
299 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
305 /* noiommu group found */
308 cur_grp->group_no = iommu_group_no;
309 cur_grp->fd = vfio_group_fd;
310 vfio_cfg.vfio_active_groups++;
311 return vfio_group_fd;
313 /* if we're in a secondary process, request group fd from the primary
314 * process via our socket
319 socket_fd = vfio_mp_sync_connect_to_primary();
322 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
325 if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) {
326 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
330 if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) {
331 RTE_LOG(ERR, EAL, " cannot send group number!\n");
335 ret = vfio_mp_sync_receive_request(socket_fd);
341 vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
342 /* if we got the fd, store it and return it */
343 if (vfio_group_fd > 0) {
345 cur_grp->group_no = iommu_group_no;
346 cur_grp->fd = vfio_group_fd;
347 vfio_cfg.vfio_active_groups++;
348 return vfio_group_fd;
350 /* fall-through on error */
352 RTE_LOG(ERR, EAL, " cannot get container fd!\n");
362 get_vfio_group_idx(int vfio_group_fd)
365 for (i = 0; i < VFIO_MAX_GROUPS; i++)
366 if (vfio_cfg.vfio_groups[i].fd == vfio_group_fd)
372 vfio_group_device_get(int vfio_group_fd)
376 i = get_vfio_group_idx(vfio_group_fd);
377 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
378 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
380 vfio_cfg.vfio_groups[i].devices++;
384 vfio_group_device_put(int vfio_group_fd)
388 i = get_vfio_group_idx(vfio_group_fd);
389 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
390 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
392 vfio_cfg.vfio_groups[i].devices--;
396 vfio_group_device_count(int vfio_group_fd)
400 i = get_vfio_group_idx(vfio_group_fd);
401 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
402 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
406 return vfio_cfg.vfio_groups[i].devices;
410 rte_vfio_clear_group(int vfio_group_fd)
415 if (internal_config.process_type == RTE_PROC_PRIMARY) {
417 i = get_vfio_group_idx(vfio_group_fd);
420 vfio_cfg.vfio_groups[i].group_no = -1;
421 vfio_cfg.vfio_groups[i].fd = -1;
422 vfio_cfg.vfio_groups[i].devices = 0;
423 vfio_cfg.vfio_active_groups--;
427 /* This is just for SECONDARY processes */
428 socket_fd = vfio_mp_sync_connect_to_primary();
431 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
435 if (vfio_mp_sync_send_request(socket_fd, SOCKET_CLR_GROUP) < 0) {
436 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
441 if (vfio_mp_sync_send_request(socket_fd, vfio_group_fd) < 0) {
442 RTE_LOG(ERR, EAL, " cannot send group fd!\n");
447 ret = vfio_mp_sync_receive_request(socket_fd);
450 RTE_LOG(ERR, EAL, " BAD VFIO group fd!\n");
457 RTE_LOG(ERR, EAL, " Socket error\n");
461 RTE_LOG(ERR, EAL, " UNKNOWN reply, %d\n", ret);
468 rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
469 int *vfio_dev_fd, struct vfio_device_info *device_info)
471 struct vfio_group_status group_status = {
472 .argsz = sizeof(group_status)
478 /* get group number */
479 ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
481 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
486 /* if negative, something failed */
490 /* get the actual group fd */
491 vfio_group_fd = vfio_get_group_fd(iommu_group_no);
492 if (vfio_group_fd < 0)
495 /* if group_fd == 0, that means the device isn't managed by VFIO */
496 if (vfio_group_fd == 0) {
497 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
503 * at this point, we know that this group is viable (meaning, all devices
504 * are either bound to VFIO or not bound to anything)
507 /* check if the group is viable */
508 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
510 RTE_LOG(ERR, EAL, " %s cannot get group status, "
511 "error %i (%s)\n", dev_addr, errno, strerror(errno));
512 close(vfio_group_fd);
513 rte_vfio_clear_group(vfio_group_fd);
515 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
516 RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
517 close(vfio_group_fd);
518 rte_vfio_clear_group(vfio_group_fd);
522 /* check if group does not have a container yet */
523 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
525 /* add group to a container */
526 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
527 &vfio_cfg.vfio_container_fd);
529 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
530 "error %i (%s)\n", dev_addr, errno, strerror(errno));
531 close(vfio_group_fd);
532 rte_vfio_clear_group(vfio_group_fd);
537 * pick an IOMMU type and set up DMA mappings for container
539 * needs to be done only once, only when first group is
540 * assigned to a container and only in primary process.
541 * Note this can happen several times with the hotplug
544 if (internal_config.process_type == RTE_PROC_PRIMARY &&
545 vfio_cfg.vfio_active_groups == 1) {
546 const struct vfio_iommu_type *t;
548 /* select an IOMMU type which we will be using */
549 t = vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
552 " %s failed to select IOMMU type\n",
554 close(vfio_group_fd);
555 rte_vfio_clear_group(vfio_group_fd);
558 ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
561 " %s DMA remapping failed, error %i (%s)\n",
562 dev_addr, errno, strerror(errno));
563 close(vfio_group_fd);
564 rte_vfio_clear_group(vfio_group_fd);
568 vfio_cfg.vfio_iommu_type = t;
570 /* re-map all user-mapped segments */
571 rte_spinlock_lock(&user_mem_maps.lock);
573 /* this IOMMU type may not support DMA mapping, but
574 * if we have mappings in the list - that means we have
575 * previously mapped something successfully, so we can
576 * be sure that DMA mapping is supported.
578 for (i = 0; i < user_mem_maps.n_maps; i++) {
579 struct user_mem_map *map;
580 map = &user_mem_maps.maps[i];
582 ret = t->dma_user_map_func(
583 vfio_cfg.vfio_container_fd,
584 map->addr, map->iova, map->len,
587 RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
589 "iova: 0x%" PRIx64 " "
590 "len: 0x%" PRIu64 "\n",
591 map->addr, map->iova,
594 &user_mem_maps.lock);
598 rte_spinlock_unlock(&user_mem_maps.lock);
602 /* get a file descriptor for the device */
603 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
604 if (*vfio_dev_fd < 0) {
605 /* if we cannot get a device fd, this implies a problem with
606 * the VFIO group or the container not having IOMMU configured.
609 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
611 close(vfio_group_fd);
612 rte_vfio_clear_group(vfio_group_fd);
616 /* test and setup the device */
617 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
619 RTE_LOG(ERR, EAL, " %s cannot get device info, "
620 "error %i (%s)\n", dev_addr, errno,
623 close(vfio_group_fd);
624 rte_vfio_clear_group(vfio_group_fd);
627 vfio_group_device_get(vfio_group_fd);
633 rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
636 struct vfio_group_status group_status = {
637 .argsz = sizeof(group_status)
643 /* get group number */
644 ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
646 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
648 /* This is an error at this point. */
652 /* get the actual group fd */
653 vfio_group_fd = vfio_get_group_fd(iommu_group_no);
654 if (vfio_group_fd <= 0) {
655 RTE_LOG(INFO, EAL, "vfio_get_group_fd failed for %s\n",
660 /* At this point we got an active group. Closing it will make the
661 * container detachment. If this is the last active group, VFIO kernel
662 * code will unset the container and the IOMMU mappings.
665 /* Closing a device */
666 if (close(vfio_dev_fd) < 0) {
667 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
672 /* An VFIO group can have several devices attached. Just when there is
673 * no devices remaining should the group be closed.
675 vfio_group_device_put(vfio_group_fd);
676 if (!vfio_group_device_count(vfio_group_fd)) {
678 if (close(vfio_group_fd) < 0) {
679 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
684 if (rte_vfio_clear_group(vfio_group_fd) < 0) {
685 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
695 rte_vfio_enable(const char *modname)
697 /* initialize group list */
701 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
702 vfio_cfg.vfio_groups[i].fd = -1;
703 vfio_cfg.vfio_groups[i].group_no = -1;
704 vfio_cfg.vfio_groups[i].devices = 0;
707 /* inform the user that we are probing for VFIO */
708 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
710 /* check if vfio module is loaded */
711 vfio_available = rte_eal_check_module(modname);
713 /* return error directly */
714 if (vfio_available == -1) {
715 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
719 /* return 0 if VFIO modules not loaded */
720 if (vfio_available == 0) {
721 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
722 "skipping VFIO support...\n");
726 vfio_cfg.vfio_container_fd = vfio_get_container_fd();
728 /* check if we have VFIO driver enabled */
729 if (vfio_cfg.vfio_container_fd != -1) {
730 RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
731 vfio_cfg.vfio_enabled = 1;
733 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
740 rte_vfio_is_enabled(const char *modname)
742 const int mod_available = rte_eal_check_module(modname) > 0;
743 return vfio_cfg.vfio_enabled && mod_available;
746 const struct vfio_iommu_type *
747 vfio_set_iommu_type(int vfio_container_fd)
750 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
751 const struct vfio_iommu_type *t = &iommu_types[idx];
753 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
756 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
757 t->type_id, t->name);
760 /* not an error, there may be more supported IOMMU types */
761 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
762 "error %i (%s)\n", t->type_id, t->name, errno,
765 /* if we didn't find a suitable IOMMU type, fail */
770 vfio_has_supported_extensions(int vfio_container_fd)
773 unsigned idx, n_extensions = 0;
774 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
775 const struct vfio_iommu_type *t = &iommu_types[idx];
777 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
780 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
781 "error %i (%s)\n", errno,
783 close(vfio_container_fd);
785 } else if (ret == 1) {
786 /* we found a supported extension */
789 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
791 ret ? "supported" : "not supported");
794 /* if we didn't find any supported IOMMU types, fail */
796 close(vfio_container_fd);
804 vfio_get_container_fd(void)
806 int ret, vfio_container_fd;
808 /* if we're in a primary process, try to open the container */
809 if (internal_config.process_type == RTE_PROC_PRIMARY) {
810 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
811 if (vfio_container_fd < 0) {
812 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
813 "error %i (%s)\n", errno, strerror(errno));
817 /* check VFIO API version */
818 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
819 if (ret != VFIO_API_VERSION) {
821 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
822 "error %i (%s)\n", errno, strerror(errno));
824 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
825 close(vfio_container_fd);
829 ret = vfio_has_supported_extensions(vfio_container_fd);
831 RTE_LOG(ERR, EAL, " no supported IOMMU "
832 "extensions found!\n");
836 return vfio_container_fd;
839 * if we're in a secondary process, request container fd from the
840 * primary process via our socket
844 socket_fd = vfio_mp_sync_connect_to_primary();
846 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
849 if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) {
850 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
854 vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd);
855 if (vfio_container_fd < 0) {
856 RTE_LOG(ERR, EAL, " cannot get container fd!\n");
861 return vfio_container_fd;
868 vfio_get_group_no(const char *sysfs_base,
869 const char *dev_addr, int *iommu_group_no)
871 char linkname[PATH_MAX];
872 char filename[PATH_MAX];
873 char *tok[16], *group_tok, *end;
876 memset(linkname, 0, sizeof(linkname));
877 memset(filename, 0, sizeof(filename));
879 /* try to find out IOMMU group for this device */
880 snprintf(linkname, sizeof(linkname),
881 "%s/%s/iommu_group", sysfs_base, dev_addr);
883 ret = readlink(linkname, filename, sizeof(filename));
885 /* if the link doesn't exist, no VFIO for us */
889 ret = rte_strsplit(filename, sizeof(filename),
890 tok, RTE_DIM(tok), '/');
893 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
897 /* IOMMU group is always the last token */
899 group_tok = tok[ret - 1];
901 *iommu_group_no = strtol(group_tok, &end, 10);
902 if ((end != group_tok && *end != '\0') || errno != 0) {
903 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
911 type1_map(const struct rte_memseg_list *msl __rte_unused,
912 const struct rte_memseg *ms, void *arg)
914 int *vfio_container_fd = arg;
916 return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
921 vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
922 uint64_t len, int do_map)
924 struct vfio_iommu_type1_dma_map dma_map;
925 struct vfio_iommu_type1_dma_unmap dma_unmap;
929 memset(&dma_map, 0, sizeof(dma_map));
930 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
931 dma_map.vaddr = vaddr;
934 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
935 VFIO_DMA_MAP_FLAG_WRITE;
937 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
939 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
940 errno, strerror(errno));
944 memset(&dma_unmap, 0, sizeof(dma_unmap));
945 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
946 dma_unmap.size = len;
947 dma_unmap.iova = iova;
949 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
952 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
953 errno, strerror(errno));
962 vfio_type1_dma_map(int vfio_container_fd)
964 return rte_memseg_walk(type1_map, &vfio_container_fd);
968 vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
969 uint64_t len, int do_map)
971 struct vfio_iommu_type1_dma_map dma_map;
972 struct vfio_iommu_type1_dma_unmap dma_unmap;
976 memset(&dma_map, 0, sizeof(dma_map));
977 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
978 dma_map.vaddr = vaddr;
981 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
982 VFIO_DMA_MAP_FLAG_WRITE;
984 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
986 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
987 errno, strerror(errno));
992 struct vfio_iommu_spapr_register_memory reg = {
993 .argsz = sizeof(reg),
996 reg.vaddr = (uintptr_t) vaddr;
999 ret = ioctl(vfio_container_fd,
1000 VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®);
1002 RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
1003 errno, strerror(errno));
1007 memset(&dma_unmap, 0, sizeof(dma_unmap));
1008 dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
1009 dma_unmap.size = len;
1010 dma_unmap.iova = iova;
1012 ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
1015 RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
1016 errno, strerror(errno));
1025 vfio_spapr_map_walk(const struct rte_memseg_list *msl __rte_unused,
1026 const struct rte_memseg *ms, void *arg)
1028 int *vfio_container_fd = arg;
1030 return vfio_spapr_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
1034 struct spapr_walk_param {
1035 uint64_t window_size;
1036 uint64_t hugepage_sz;
1039 vfio_spapr_window_size_walk(const struct rte_memseg_list *msl __rte_unused,
1040 const struct rte_memseg *ms, void *arg)
1042 struct spapr_walk_param *param = arg;
1043 uint64_t max = ms->iova + ms->len;
1045 if (max > param->window_size) {
1046 param->hugepage_sz = ms->hugepage_sz;
1047 param->window_size = max;
1054 vfio_spapr_create_new_dma_window(int vfio_container_fd,
1055 struct vfio_iommu_spapr_tce_create *create) {
1056 struct vfio_iommu_spapr_tce_remove remove = {
1057 .argsz = sizeof(remove),
1059 struct vfio_iommu_spapr_tce_info info = {
1060 .argsz = sizeof(info),
1064 /* query spapr iommu info */
1065 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
1067 RTE_LOG(ERR, EAL, " cannot get iommu info, "
1068 "error %i (%s)\n", errno, strerror(errno));
1072 /* remove default DMA of 32 bit window */
1073 remove.start_addr = info.dma32_window_start;
1074 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
1076 RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
1077 "error %i (%s)\n", errno, strerror(errno));
1081 /* create new DMA window */
1082 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);
1084 RTE_LOG(ERR, EAL, " cannot create new DMA window, "
1085 "error %i (%s)\n", errno, strerror(errno));
1089 if (create->start_addr != 0) {
1090 RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
1098 vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
1099 uint64_t len, int do_map)
1101 struct spapr_walk_param param;
1102 struct vfio_iommu_spapr_tce_create create = {
1103 .argsz = sizeof(create),
1107 rte_spinlock_lock(&user_mem_maps.lock);
1109 /* check if window size needs to be adjusted */
1110 memset(¶m, 0, sizeof(param));
1112 if (rte_memseg_walk(vfio_spapr_window_size_walk, ¶m) < 0) {
1113 RTE_LOG(ERR, EAL, "Could not get window size\n");
1118 /* also check user maps */
1119 for (i = 0; i < user_mem_maps.n_maps; i++) {
1120 uint64_t max = user_mem_maps.maps[i].iova +
1121 user_mem_maps.maps[i].len;
1122 create.window_size = RTE_MAX(create.window_size, max);
1125 /* sPAPR requires window size to be a power of 2 */
1126 create.window_size = rte_align64pow2(param.window_size);
1127 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1131 /* re-create window and remap the entire memory */
1132 if (iova > create.window_size) {
1133 if (vfio_spapr_create_new_dma_window(vfio_container_fd,
1135 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1139 if (rte_memseg_walk(vfio_spapr_map_walk,
1140 &vfio_container_fd) < 0) {
1141 RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
1145 /* remap all user maps */
1146 for (i = 0; i < user_mem_maps.n_maps; i++) {
1147 struct user_mem_map *map =
1148 &user_mem_maps.maps[i];
1149 if (vfio_spapr_dma_do_map(vfio_container_fd,
1150 map->addr, map->iova, map->len,
1152 RTE_LOG(ERR, EAL, "Could not recreate user DMA maps\n");
1159 /* now that we've remapped all of the memory that was present
1160 * before, map the segment that we were requested to map.
1162 if (vfio_spapr_dma_do_map(vfio_container_fd,
1163 vaddr, iova, len, 1) < 0) {
1164 RTE_LOG(ERR, EAL, "Could not map segment\n");
1169 /* for unmap, check if iova within DMA window */
1170 if (iova > create.window_size) {
1171 RTE_LOG(ERR, EAL, "iova beyond DMA window for unmap");
1176 vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);
1179 rte_spinlock_unlock(&user_mem_maps.lock);
1184 vfio_spapr_dma_map(int vfio_container_fd)
1186 struct vfio_iommu_spapr_tce_create create = {
1187 .argsz = sizeof(create),
1189 struct spapr_walk_param param;
1191 memset(¶m, 0, sizeof(param));
1193 /* create DMA window from 0 to max(phys_addr + len) */
1194 rte_memseg_walk(vfio_spapr_window_size_walk, ¶m);
1196 /* sPAPR requires window size to be a power of 2 */
1197 create.window_size = rte_align64pow2(param.window_size);
1198 create.page_shift = __builtin_ctzll(param.hugepage_sz);
1201 if (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {
1202 RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
1206 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
1207 if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
1214 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
1216 /* No-IOMMU mode does not need DMA mapping */
1221 vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
1222 uint64_t __rte_unused vaddr,
1223 uint64_t __rte_unused iova, uint64_t __rte_unused len,
1224 int __rte_unused do_map)
1226 /* No-IOMMU mode does not need DMA mapping */
1231 vfio_dma_mem_map(uint64_t vaddr, uint64_t iova, uint64_t len, int do_map)
1233 const struct vfio_iommu_type *t = vfio_cfg.vfio_iommu_type;
1236 RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
1241 if (!t->dma_user_map_func) {
1243 " VFIO custom DMA region maping not supported by IOMMU %s\n",
1245 rte_errno = ENOTSUP;
1249 return t->dma_user_map_func(vfio_cfg.vfio_container_fd, vaddr, iova,
1253 int __rte_experimental
1254 rte_vfio_dma_map(uint64_t vaddr, uint64_t iova, uint64_t len)
1256 struct user_mem_map *new_map;
1264 rte_spinlock_lock(&user_mem_maps.lock);
1265 if (user_mem_maps.n_maps == VFIO_MAX_USER_MEM_MAPS) {
1266 RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
1272 if (vfio_dma_mem_map(vaddr, iova, len, 1)) {
1273 /* technically, this will fail if there are currently no devices
1274 * plugged in, even if a device were added later, this mapping
1275 * might have succeeded. however, since we cannot verify if this
1276 * is a valid mapping without having a device attached, consider
1277 * this to be unsupported, because we can't just store any old
1278 * mapping and pollute list of active mappings willy-nilly.
1280 RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
1284 /* create new user mem map entry */
1285 new_map = &user_mem_maps.maps[user_mem_maps.n_maps++];
1286 new_map->addr = vaddr;
1287 new_map->iova = iova;
1290 compact_user_maps();
1292 rte_spinlock_unlock(&user_mem_maps.lock);
1296 int __rte_experimental
1297 rte_vfio_dma_unmap(uint64_t vaddr, uint64_t iova, uint64_t len)
1299 struct user_mem_map *map, *new_map = NULL;
1307 rte_spinlock_lock(&user_mem_maps.lock);
1309 /* find our mapping */
1310 map = find_user_mem_map(vaddr, iova, len);
1312 RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
1317 if (map->addr != vaddr || map->iova != iova || map->len != len) {
1318 /* we're partially unmapping a previously mapped region, so we
1319 * need to split entry into two.
1321 if (user_mem_maps.n_maps == VFIO_MAX_USER_MEM_MAPS) {
1322 RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
1327 new_map = &user_mem_maps.maps[user_mem_maps.n_maps++];
1330 /* unmap the entry */
1331 if (vfio_dma_mem_map(vaddr, iova, len, 0)) {
1332 /* there may not be any devices plugged in, so unmapping will
1333 * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
1334 * stop us from removing the mapping, as the assumption is we
1335 * won't be needing this memory any more and thus will want to
1336 * prevent it from being remapped again on hotplug. so, only
1337 * fail if we indeed failed to unmap (e.g. if the mapping was
1338 * within our mapped range but had invalid alignment).
1340 if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
1341 RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
1345 RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
1348 /* remove map from the list of active mappings */
1349 if (new_map != NULL) {
1350 adjust_map(map, new_map, vaddr, len);
1352 /* if we've created a new map by splitting, sort everything */
1353 if (!is_null_map(new_map)) {
1354 compact_user_maps();
1356 /* we've created a new mapping, but it was unused */
1357 user_mem_maps.n_maps--;
1360 memset(map, 0, sizeof(*map));
1361 compact_user_maps();
1362 user_mem_maps.n_maps--;
1366 rte_spinlock_unlock(&user_mem_maps.lock);
1371 rte_vfio_noiommu_is_enabled(void)
1377 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
1379 if (errno != ENOENT) {
1380 RTE_LOG(ERR, EAL, " cannot open vfio noiommu file %i (%s)\n",
1381 errno, strerror(errno));
1385 * else the file does not exists
1386 * i.e. noiommu is not enabled
1391 cnt = read(fd, &c, 1);
1394 RTE_LOG(ERR, EAL, " unable to read from vfio noiommu "
1395 "file %i (%s)\n", errno, strerror(errno));
1404 int __rte_experimental
1405 rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
1406 __rte_unused uint64_t len)
1411 int __rte_experimental
1412 rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
1413 __rte_unused uint64_t len)