1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
14 #include <sys/types.h>
16 #include <sys/queue.h>
21 #include <sys/ioctl.h>
25 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
26 #include <linux/memfd.h>
27 #define MEMFD_SUPPORTED
29 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
33 #include <linux/falloc.h>
34 #include <linux/mman.h> /* for hugetlb-related mmap flags */
36 #include <rte_common.h>
38 #include <rte_eal_memconfig.h>
40 #include <rte_errno.h>
41 #include <rte_memory.h>
42 #include <rte_spinlock.h>
44 #include "eal_filesystem.h"
45 #include "eal_internal_cfg.h"
46 #include "eal_memalloc.h"
47 #include "eal_memcfg.h"
48 #include "eal_private.h"
50 const int anonymous_hugepages_supported =
53 #define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
56 #define RTE_MAP_HUGE_SHIFT 26
60 * we've already checked memfd support at compile-time, but we also need to
61 * check if we can create hugepage files with memfd.
63 * also, this is not a constant, because while we may be *compiled* with memfd
64 * hugetlbfs support, we might not be *running* on a system that supports memfd
65 * and/or memfd with hugetlbfs, so we need to be able to adjust this flag at
66 * runtime, and fall back to anonymous memory.
68 static int memfd_create_supported =
71 #define RTE_MFD_HUGETLB MFD_HUGETLB
74 #define RTE_MFD_HUGETLB 4U
78 * not all kernel version support fallocate on hugetlbfs, so fall back to
79 * ftruncate and disallow deallocation if fallocate is not supported.
81 static int fallocate_supported = -1; /* unknown */
84 * we have two modes - single file segments, and file-per-page mode.
86 * for single-file segments, we use memseg_list_fd to store the segment fd,
87 * while the fds[] will not be allocated, and len will be set to 0.
89 * for file-per-page mode, each page will have its own fd, so 'memseg_list_fd'
90 * will be invalid (set to -1), and we'll use 'fds' to keep track of page fd's.
92 * we cannot know how many pages a system will have in advance, but we do know
93 * that they come in lists, and we know lengths of these lists. so, simply store
94 * a malloc'd array of fd's indexed by list and segment index.
96 * they will be initialized at startup, and filled as we allocate/deallocate
100 int *fds; /**< dynamically allocated array of segment lock fd's */
101 int memseg_list_fd; /**< memseg list fd */
102 int len; /**< total length of the array */
103 int count; /**< entries used in an array */
104 } fd_list[RTE_MAX_MEMSEG_LISTS];
106 /** local copy of a memory map, used to synchronize memory hotplug in MP */
107 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
109 static sigjmp_buf huge_jmpenv;
111 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
113 siglongjmp(huge_jmpenv, 1);
116 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
117 * non-static local variable in the stack frame calling sigsetjmp might be
118 * clobbered by a call to longjmp.
120 static int __rte_unused huge_wrap_sigsetjmp(void)
122 return sigsetjmp(huge_jmpenv, 1);
125 static struct sigaction huge_action_old;
126 static int huge_need_recover;
128 static void __rte_unused
129 huge_register_sigbus(void)
132 struct sigaction action;
135 sigaddset(&mask, SIGBUS);
137 action.sa_mask = mask;
138 action.sa_handler = huge_sigbus_handler;
140 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
143 static void __rte_unused
144 huge_recover_sigbus(void)
146 if (huge_need_recover) {
147 sigaction(SIGBUS, &huge_action_old, NULL);
148 huge_need_recover = 0;
152 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
157 /* Check if kernel supports NUMA. */
158 if (numa_available() != 0) {
159 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
166 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
168 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
169 if (get_mempolicy(oldpolicy, oldmask->maskp,
170 oldmask->size + 1, 0, 0) < 0) {
172 "Failed to get current mempolicy: %s. "
173 "Assuming MPOL_DEFAULT.\n", strerror(errno));
174 *oldpolicy = MPOL_DEFAULT;
177 "Setting policy MPOL_PREFERRED for socket %d\n",
179 numa_set_preferred(socket_id);
183 restore_numa(int *oldpolicy, struct bitmask *oldmask)
186 "Restoring previous memory policy: %d\n", *oldpolicy);
187 if (*oldpolicy == MPOL_DEFAULT) {
188 numa_set_localalloc();
189 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
190 oldmask->size + 1) < 0) {
191 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
193 numa_set_localalloc();
195 numa_free_cpumask(oldmask);
200 * uses fstat to report the size of a file on disk
203 get_file_size(int fd)
206 if (fstat(fd, &st) < 0)
212 pagesz_flags(uint64_t page_sz)
214 /* as per mmap() manpage, all page sizes are log2 of page size
215 * shifted by MAP_HUGE_SHIFT
217 int log2 = rte_log2_u64(page_sz);
218 return log2 << RTE_MAP_HUGE_SHIFT;
221 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
222 static int lock(int fd, int type)
226 /* flock may be interrupted */
228 ret = flock(fd, type | LOCK_NB);
229 } while (ret && errno == EINTR);
231 if (ret && errno == EWOULDBLOCK) {
235 RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
236 __func__, strerror(errno));
239 /* lock was successful */
244 get_seg_memfd(struct hugepage_info *hi __rte_unused,
245 unsigned int list_idx __rte_unused,
246 unsigned int seg_idx __rte_unused)
248 #ifdef MEMFD_SUPPORTED
250 char segname[250]; /* as per manpage, limit is 249 bytes plus null */
252 int flags = RTE_MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
254 if (internal_config.single_file_segments) {
255 fd = fd_list[list_idx].memseg_list_fd;
258 snprintf(segname, sizeof(segname), "seg_%i", list_idx);
259 fd = memfd_create(segname, flags);
261 RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
262 __func__, strerror(errno));
265 fd_list[list_idx].memseg_list_fd = fd;
268 fd = fd_list[list_idx].fds[seg_idx];
271 snprintf(segname, sizeof(segname), "seg_%i-%i",
273 fd = memfd_create(segname, flags);
275 RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
276 __func__, strerror(errno));
279 fd_list[list_idx].fds[seg_idx] = fd;
288 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
289 unsigned int list_idx, unsigned int seg_idx)
293 /* for in-memory mode, we only make it here when we're sure we support
294 * memfd, and this is a special case.
296 if (internal_config.in_memory)
297 return get_seg_memfd(hi, list_idx, seg_idx);
299 if (internal_config.single_file_segments) {
300 /* create a hugepage file path */
301 eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
303 fd = fd_list[list_idx].memseg_list_fd;
306 fd = open(path, O_CREAT | O_RDWR, 0600);
308 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
309 __func__, strerror(errno));
312 /* take out a read lock and keep it indefinitely */
313 if (lock(fd, LOCK_SH) < 0) {
314 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
315 __func__, strerror(errno));
319 fd_list[list_idx].memseg_list_fd = fd;
322 /* create a hugepage file path */
323 eal_get_hugefile_path(path, buflen, hi->hugedir,
324 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
326 fd = fd_list[list_idx].fds[seg_idx];
329 fd = open(path, O_CREAT | O_RDWR, 0600);
331 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
332 __func__, strerror(errno));
335 /* take out a read lock */
336 if (lock(fd, LOCK_SH) < 0) {
337 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
338 __func__, strerror(errno));
342 fd_list[list_idx].fds[seg_idx] = fd;
349 resize_hugefile_in_memory(int fd, uint64_t fa_offset,
350 uint64_t page_sz, bool grow)
352 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
356 /* grow or shrink the file */
357 ret = fallocate(fd, flags, fa_offset, page_sz);
360 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
369 resize_hugefile_in_filesystem(int fd, uint64_t fa_offset, uint64_t page_sz,
375 if (fallocate_supported == 0) {
376 /* we cannot deallocate memory if fallocate() is not
377 * supported, and hugepage file is already locked at
378 * creation, so no further synchronization needed.
382 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
386 uint64_t new_size = fa_offset + page_sz;
387 uint64_t cur_size = get_file_size(fd);
389 /* fallocate isn't supported, fall back to ftruncate */
390 if (new_size > cur_size &&
391 ftruncate(fd, new_size) < 0) {
392 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
393 __func__, strerror(errno));
397 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
402 * technically, it is perfectly safe for both primary
403 * and secondary to grow and shrink the page files:
404 * growing the file repeatedly has no effect because
405 * a page can only be allocated once, while mmap ensures
406 * that secondaries hold on to the page even after the
407 * page itself is removed from the filesystem.
409 * however, leaving growing/shrinking to the primary
410 * tends to expose bugs in fdlist page count handling,
411 * so leave this here just in case.
413 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
416 /* grow or shrink the file */
417 ret = fallocate(fd, flags, fa_offset, page_sz);
420 if (fallocate_supported == -1 &&
422 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
425 fallocate_supported = 0;
427 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
433 fallocate_supported = 1;
441 close_hugefile(int fd, char *path, int list_idx)
444 * primary process must unlink the file, but only when not in in-memory
445 * mode (as in that case there is no file to unlink).
447 if (!internal_config.in_memory &&
448 rte_eal_process_type() == RTE_PROC_PRIMARY &&
450 RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
451 __func__, path, strerror(errno));
454 fd_list[list_idx].memseg_list_fd = -1;
458 resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow)
460 /* in-memory mode is a special case, because we can be sure that
461 * fallocate() is supported.
463 if (internal_config.in_memory)
464 return resize_hugefile_in_memory(fd, fa_offset,
467 return resize_hugefile_in_filesystem(fd, fa_offset, page_sz,
472 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
473 struct hugepage_info *hi, unsigned int list_idx,
474 unsigned int seg_idx)
476 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
477 int cur_socket_id = 0;
489 alloc_sz = hi->hugepage_sz;
491 /* these are checked at init, but code analyzers don't know that */
492 if (internal_config.in_memory && !anonymous_hugepages_supported) {
493 RTE_LOG(ERR, EAL, "Anonymous hugepages not supported, in-memory mode cannot allocate memory\n");
496 if (internal_config.in_memory && !memfd_create_supported &&
497 internal_config.single_file_segments) {
498 RTE_LOG(ERR, EAL, "Single-file segments are not supported without memfd support\n");
502 /* in-memory without memfd is a special case */
505 if (internal_config.in_memory && !memfd_create_supported) {
506 const int in_memory_flags = MAP_HUGETLB | MAP_FIXED |
507 MAP_PRIVATE | MAP_ANONYMOUS;
510 pagesz_flag = pagesz_flags(alloc_sz);
512 mmap_flags = in_memory_flags | pagesz_flag;
514 /* single-file segments codepath will never be active
515 * here because in-memory mode is incompatible with the
516 * fallback path, and it's stopped at EAL initialization
521 /* takes out a read lock on segment or segment list */
522 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
524 RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
528 if (internal_config.single_file_segments) {
529 map_offset = seg_idx * alloc_sz;
530 ret = resize_hugefile(fd, map_offset, alloc_sz, true);
534 fd_list[list_idx].count++;
537 if (ftruncate(fd, alloc_sz) < 0) {
538 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
539 __func__, strerror(errno));
542 if (internal_config.hugepage_unlink &&
543 !internal_config.in_memory) {
545 RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
546 __func__, strerror(errno));
551 mmap_flags = MAP_SHARED | MAP_POPULATE | MAP_FIXED;
555 * map the segment, and populate page tables, the kernel fills
556 * this segment with zeros if it's a new page.
558 va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, mmap_flags, fd,
561 if (va == MAP_FAILED) {
562 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
564 /* mmap failed, but the previous region might have been
565 * unmapped anyway. try to remap it
570 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
571 munmap(va, alloc_sz);
575 /* In linux, hugetlb limitations, like cgroup, are
576 * enforced at fault time instead of mmap(), even
577 * with the option of MAP_POPULATE. Kernel will send
578 * a SIGBUS signal. To avoid to be killed, save stack
579 * environment here, if SIGBUS happens, we can jump
582 if (huge_wrap_sigsetjmp()) {
583 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
584 (unsigned int)(alloc_sz >> 20));
588 /* we need to trigger a write to the page to enforce page fault and
589 * ensure that page is accessible to us, but we can't overwrite value
590 * that is already there, so read the old value, and write itback.
591 * kernel populates the page with zeroes initially.
593 *(volatile int *)addr = *(volatile int *)addr;
595 iova = rte_mem_virt2iova(addr);
596 if (iova == RTE_BAD_PHYS_ADDR) {
597 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
602 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
603 ret = get_mempolicy(&cur_socket_id, NULL, 0, addr,
604 MPOL_F_NODE | MPOL_F_ADDR);
606 RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n",
607 __func__, strerror(errno));
609 } else if (cur_socket_id != socket_id) {
611 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
612 __func__, socket_id, cur_socket_id);
616 if (rte_socket_count() > 1)
617 RTE_LOG(DEBUG, EAL, "%s(): not checking hugepage NUMA node.\n",
622 ms->hugepage_sz = alloc_sz;
624 ms->nchannel = rte_memory_get_nchannel();
625 ms->nrank = rte_memory_get_nrank();
627 ms->socket_id = socket_id;
632 munmap(addr, alloc_sz);
635 new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
636 if (new_addr != addr) {
637 if (new_addr != NULL)
638 munmap(new_addr, alloc_sz);
639 /* we're leaving a hole in our virtual address space. if
640 * somebody else maps this hole now, we could accidentally
641 * override it in the future.
643 RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
645 /* roll back the ref count */
646 if (internal_config.single_file_segments)
647 fd_list[list_idx].count--;
649 /* some codepaths will return negative fd, so exit early */
653 if (internal_config.single_file_segments) {
654 resize_hugefile(fd, map_offset, alloc_sz, false);
655 /* ignore failure, can't make it any worse */
657 /* if refcount is at zero, close the file */
658 if (fd_list[list_idx].count == 0)
659 close_hugefile(fd, path, list_idx);
661 /* only remove file if we can take out a write lock */
662 if (internal_config.hugepage_unlink == 0 &&
663 internal_config.in_memory == 0 &&
664 lock(fd, LOCK_EX) == 1)
667 fd_list[list_idx].fds[seg_idx] = -1;
673 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
674 unsigned int list_idx, unsigned int seg_idx)
681 /* erase page data */
682 memset(ms->addr, 0, ms->len);
684 if (mmap(ms->addr, ms->len, PROT_READ,
685 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
687 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
693 /* if we're using anonymous hugepages, nothing to be done */
694 if (internal_config.in_memory && !memfd_create_supported)
697 /* if we've already unlinked the page, nothing needs to be done */
698 if (!internal_config.in_memory && internal_config.hugepage_unlink)
702 memset(ms, 0, sizeof(*ms));
706 /* if we are not in single file segments mode, we're going to unmap the
707 * segment and thus drop the lock on original fd, but hugepage dir is
708 * now locked so we can take out another one without races.
710 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
714 if (internal_config.single_file_segments) {
715 map_offset = seg_idx * ms->len;
716 if (resize_hugefile(fd, map_offset, ms->len, false))
719 if (--(fd_list[list_idx].count) == 0)
720 close_hugefile(fd, path, list_idx);
724 /* if we're able to take out a write lock, we're the last one
725 * holding onto this page.
727 if (!internal_config.in_memory) {
728 ret = lock(fd, LOCK_EX);
730 /* no one else is using this page */
735 /* closing fd will drop the lock */
737 fd_list[list_idx].fds[seg_idx] = -1;
740 memset(ms, 0, sizeof(*ms));
742 return ret < 0 ? -1 : 0;
745 struct alloc_walk_param {
746 struct hugepage_info *hi;
747 struct rte_memseg **ms;
749 unsigned int segs_allocated;
755 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
757 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
758 struct alloc_walk_param *wa = arg;
759 struct rte_memseg_list *cur_msl;
761 int cur_idx, start_idx, j, dir_fd = -1;
762 unsigned int msl_idx, need, i;
764 if (msl->page_sz != wa->page_sz)
766 if (msl->socket_id != wa->socket)
769 page_sz = (size_t)msl->page_sz;
771 msl_idx = msl - mcfg->memsegs;
772 cur_msl = &mcfg->memsegs[msl_idx];
776 /* try finding space in memseg list */
778 /* if we require exact number of pages in a list, find them */
779 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0,
787 /* we don't require exact number of pages, so we're going to go
788 * for best-effort allocation. that means finding the biggest
789 * unused block, and going with that.
791 cur_idx = rte_fbarray_find_biggest_free(&cur_msl->memseg_arr,
796 /* adjust the size to possibly be smaller than original
797 * request, but do not allow it to be bigger.
799 cur_len = rte_fbarray_find_contig_free(&cur_msl->memseg_arr,
801 need = RTE_MIN(need, (unsigned int)cur_len);
804 /* do not allow any page allocations during the time we're allocating,
805 * because file creation and locking operations are not atomic,
806 * and we might be the first or the last ones to use a particular page,
807 * so we need to ensure atomicity of every operation.
809 * during init, we already hold a write lock, so don't try to take out
812 if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
813 dir_fd = open(wa->hi->hugedir, O_RDONLY);
815 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
816 __func__, wa->hi->hugedir, strerror(errno));
819 /* blocking writelock */
820 if (flock(dir_fd, LOCK_EX)) {
821 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
822 __func__, wa->hi->hugedir, strerror(errno));
828 for (i = 0; i < need; i++, cur_idx++) {
829 struct rte_memseg *cur;
832 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
833 map_addr = RTE_PTR_ADD(cur_msl->base_va,
836 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
838 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
841 /* if exact number wasn't requested, stop */
846 for (j = start_idx; j < cur_idx; j++) {
847 struct rte_memseg *tmp;
848 struct rte_fbarray *arr =
849 &cur_msl->memseg_arr;
851 tmp = rte_fbarray_get(arr, j);
852 rte_fbarray_set_free(arr, j);
854 /* free_seg may attempt to create a file, which
857 if (free_seg(tmp, wa->hi, msl_idx, j))
858 RTE_LOG(DEBUG, EAL, "Cannot free page\n");
862 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
871 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
874 wa->segs_allocated = i;
879 /* if we didn't allocate any segments, move on to the next list */
883 struct free_walk_param {
884 struct hugepage_info *hi;
885 struct rte_memseg *ms;
888 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
890 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
891 struct rte_memseg_list *found_msl;
892 struct free_walk_param *wa = arg;
893 uintptr_t start_addr, end_addr;
894 int msl_idx, seg_idx, ret, dir_fd = -1;
896 start_addr = (uintptr_t) msl->base_va;
897 end_addr = start_addr + msl->len;
899 if ((uintptr_t)wa->ms->addr < start_addr ||
900 (uintptr_t)wa->ms->addr >= end_addr)
903 msl_idx = msl - mcfg->memsegs;
904 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
907 found_msl = &mcfg->memsegs[msl_idx];
909 /* do not allow any page allocations during the time we're freeing,
910 * because file creation and locking operations are not atomic,
911 * and we might be the first or the last ones to use a particular page,
912 * so we need to ensure atomicity of every operation.
914 * during init, we already hold a write lock, so don't try to take out
917 if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
918 dir_fd = open(wa->hi->hugedir, O_RDONLY);
920 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
921 __func__, wa->hi->hugedir, strerror(errno));
924 /* blocking writelock */
925 if (flock(dir_fd, LOCK_EX)) {
926 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
927 __func__, wa->hi->hugedir, strerror(errno));
933 found_msl->version++;
935 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
937 ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
949 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
950 int socket, bool exact)
953 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
954 bool have_numa = false;
956 struct bitmask *oldmask;
958 struct alloc_walk_param wa;
959 struct hugepage_info *hi = NULL;
961 memset(&wa, 0, sizeof(wa));
963 /* dynamic allocation not supported in legacy mode */
964 if (internal_config.legacy_mem)
967 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
969 internal_config.hugepage_info[i].hugepage_sz) {
970 hi = &internal_config.hugepage_info[i];
975 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
980 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
982 oldmask = numa_allocate_nodemask();
983 prepare_numa(&oldpolicy, oldmask, socket);
992 wa.page_sz = page_sz;
994 wa.segs_allocated = 0;
996 /* memalloc is locked, so it's safe to use thread-unsafe version */
997 ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
999 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
1002 } else if (ret > 0) {
1003 ret = (int)wa.segs_allocated;
1006 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
1008 restore_numa(&oldpolicy, oldmask);
1014 eal_memalloc_alloc_seg(size_t page_sz, int socket)
1016 struct rte_memseg *ms;
1017 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
1019 /* return pointer to newly allocated memseg */
1024 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
1028 /* dynamic free not supported in legacy mode */
1029 if (internal_config.legacy_mem)
1032 for (seg = 0; seg < n_segs; seg++) {
1033 struct rte_memseg *cur = ms[seg];
1034 struct hugepage_info *hi = NULL;
1035 struct free_walk_param wa;
1038 /* if this page is marked as unfreeable, fail */
1039 if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
1040 RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
1045 memset(&wa, 0, sizeof(wa));
1047 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
1049 hi = &internal_config.hugepage_info[i];
1050 if (cur->hugepage_sz == hi->hugepage_sz)
1053 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
1054 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1062 /* memalloc is locked, so it's safe to use thread-unsafe version
1064 walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
1069 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
1076 eal_memalloc_free_seg(struct rte_memseg *ms)
1078 /* dynamic free not supported in legacy mode */
1079 if (internal_config.legacy_mem)
1082 return eal_memalloc_free_seg_bulk(&ms, 1);
1086 sync_chunk(struct rte_memseg_list *primary_msl,
1087 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1088 unsigned int msl_idx, bool used, int start, int end)
1090 struct rte_fbarray *l_arr, *p_arr;
1091 int i, ret, chunk_len, diff_len;
1093 l_arr = &local_msl->memseg_arr;
1094 p_arr = &primary_msl->memseg_arr;
1096 /* we need to aggregate allocations/deallocations into bigger chunks,
1097 * as we don't want to spam the user with per-page callbacks.
1099 * to avoid any potential issues, we also want to trigger
1100 * deallocation callbacks *before* we actually deallocate
1101 * memory, so that the user application could wrap up its use
1102 * before it goes away.
1105 chunk_len = end - start;
1107 /* find how many contiguous pages we can map/unmap for this chunk */
1109 rte_fbarray_find_contig_free(l_arr, start) :
1110 rte_fbarray_find_contig_used(l_arr, start);
1112 /* has to be at least one page */
1116 diff_len = RTE_MIN(chunk_len, diff_len);
1118 /* if we are freeing memory, notify the application */
1120 struct rte_memseg *ms;
1122 size_t len, page_sz;
1124 ms = rte_fbarray_get(l_arr, start);
1125 start_va = ms->addr;
1126 page_sz = (size_t)primary_msl->page_sz;
1127 len = page_sz * diff_len;
1129 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
1133 for (i = 0; i < diff_len; i++) {
1134 struct rte_memseg *p_ms, *l_ms;
1135 int seg_idx = start + i;
1137 l_ms = rte_fbarray_get(l_arr, seg_idx);
1138 p_ms = rte_fbarray_get(p_arr, seg_idx);
1140 if (l_ms == NULL || p_ms == NULL)
1144 ret = alloc_seg(l_ms, p_ms->addr,
1145 p_ms->socket_id, hi,
1149 rte_fbarray_set_used(l_arr, seg_idx);
1151 ret = free_seg(l_ms, hi, msl_idx, seg_idx);
1152 rte_fbarray_set_free(l_arr, seg_idx);
1158 /* if we just allocated memory, notify the application */
1160 struct rte_memseg *ms;
1162 size_t len, page_sz;
1164 ms = rte_fbarray_get(l_arr, start);
1165 start_va = ms->addr;
1166 page_sz = (size_t)primary_msl->page_sz;
1167 len = page_sz * diff_len;
1169 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1173 /* calculate how much we can advance until next chunk */
1175 rte_fbarray_find_contig_used(l_arr, start) :
1176 rte_fbarray_find_contig_free(l_arr, start);
1177 ret = RTE_MIN(chunk_len, diff_len);
1183 sync_status(struct rte_memseg_list *primary_msl,
1184 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1185 unsigned int msl_idx, bool used)
1187 struct rte_fbarray *l_arr, *p_arr;
1188 int p_idx, l_chunk_len, p_chunk_len, ret;
1191 /* this is a little bit tricky, but the basic idea is - walk both lists
1192 * and spot any places where there are discrepancies. walking both lists
1193 * and noting discrepancies in a single go is a hard problem, so we do
1194 * it in two passes - first we spot any places where allocated segments
1195 * mismatch (i.e. ensure that everything that's allocated in the primary
1196 * is also allocated in the secondary), and then we do it by looking at
1197 * free segments instead.
1199 * we also need to aggregate changes into chunks, as we have to call
1200 * callbacks per allocation, not per page.
1202 l_arr = &local_msl->memseg_arr;
1203 p_arr = &primary_msl->memseg_arr;
1206 p_idx = rte_fbarray_find_next_used(p_arr, 0);
1208 p_idx = rte_fbarray_find_next_free(p_arr, 0);
1210 while (p_idx >= 0) {
1211 int next_chunk_search_idx;
1214 p_chunk_len = rte_fbarray_find_contig_used(p_arr,
1216 l_chunk_len = rte_fbarray_find_contig_used(l_arr,
1219 p_chunk_len = rte_fbarray_find_contig_free(p_arr,
1221 l_chunk_len = rte_fbarray_find_contig_free(l_arr,
1224 /* best case scenario - no differences (or bigger, which will be
1225 * fixed during next iteration), look for next chunk
1227 if (l_chunk_len >= p_chunk_len) {
1228 next_chunk_search_idx = p_idx + p_chunk_len;
1232 /* if both chunks start at the same point, skip parts we know
1233 * are identical, and sync the rest. each call to sync_chunk
1234 * will only sync contiguous segments, so we need to call this
1235 * until we are sure there are no more differences in this
1238 start = p_idx + l_chunk_len;
1239 end = p_idx + p_chunk_len;
1241 ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
1244 } while (start < end && ret >= 0);
1245 /* if ret is negative, something went wrong */
1249 next_chunk_search_idx = p_idx + p_chunk_len;
1251 /* skip to end of this chunk */
1253 p_idx = rte_fbarray_find_next_used(p_arr,
1254 next_chunk_search_idx);
1256 p_idx = rte_fbarray_find_next_free(p_arr,
1257 next_chunk_search_idx);
1264 sync_existing(struct rte_memseg_list *primary_msl,
1265 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1266 unsigned int msl_idx)
1270 /* do not allow any page allocations during the time we're allocating,
1271 * because file creation and locking operations are not atomic,
1272 * and we might be the first or the last ones to use a particular page,
1273 * so we need to ensure atomicity of every operation.
1275 dir_fd = open(hi->hugedir, O_RDONLY);
1277 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
1278 hi->hugedir, strerror(errno));
1281 /* blocking writelock */
1282 if (flock(dir_fd, LOCK_EX)) {
1283 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
1284 hi->hugedir, strerror(errno));
1289 /* ensure all allocated space is the same in both lists */
1290 ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
1294 /* ensure all unallocated space is the same in both lists */
1295 ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
1299 /* update version number */
1300 local_msl->version = primary_msl->version;
1311 sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
1313 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1314 struct rte_memseg_list *primary_msl, *local_msl;
1315 struct hugepage_info *hi = NULL;
1322 msl_idx = msl - mcfg->memsegs;
1323 primary_msl = &mcfg->memsegs[msl_idx];
1324 local_msl = &local_memsegs[msl_idx];
1326 for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
1328 internal_config.hugepage_info[i].hugepage_sz;
1329 uint64_t msl_sz = primary_msl->page_sz;
1330 if (msl_sz == cur_sz) {
1331 hi = &internal_config.hugepage_info[i];
1336 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1340 /* if versions don't match, synchronize everything */
1341 if (local_msl->version != primary_msl->version &&
1342 sync_existing(primary_msl, local_msl, hi, msl_idx))
1349 eal_memalloc_sync_with_primary(void)
1351 /* nothing to be done in primary */
1352 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1355 /* memalloc is locked, so it's safe to call thread-unsafe version */
1356 if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
1362 secondary_msl_create_walk(const struct rte_memseg_list *msl,
1363 void *arg __rte_unused)
1365 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1366 struct rte_memseg_list *primary_msl, *local_msl;
1367 char name[PATH_MAX];
1373 msl_idx = msl - mcfg->memsegs;
1374 primary_msl = &mcfg->memsegs[msl_idx];
1375 local_msl = &local_memsegs[msl_idx];
1377 /* create distinct fbarrays for each secondary */
1378 snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
1379 primary_msl->memseg_arr.name, getpid());
1381 ret = rte_fbarray_init(&local_msl->memseg_arr, name,
1382 primary_msl->memseg_arr.len,
1383 primary_msl->memseg_arr.elt_sz);
1385 RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
1388 local_msl->base_va = primary_msl->base_va;
1389 local_msl->len = primary_msl->len;
1395 alloc_list(int list_idx, int len)
1400 /* single-file segments mode does not need fd list */
1401 if (!internal_config.single_file_segments) {
1402 /* ensure we have space to store fd per each possible segment */
1403 data = malloc(sizeof(int) * len);
1405 RTE_LOG(ERR, EAL, "Unable to allocate space for file descriptors\n");
1408 /* set all fd's as invalid */
1409 for (i = 0; i < len; i++)
1411 fd_list[list_idx].fds = data;
1412 fd_list[list_idx].len = len;
1414 fd_list[list_idx].fds = NULL;
1415 fd_list[list_idx].len = 0;
1418 fd_list[list_idx].count = 0;
1419 fd_list[list_idx].memseg_list_fd = -1;
1425 fd_list_create_walk(const struct rte_memseg_list *msl,
1426 void *arg __rte_unused)
1428 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1435 msl_idx = msl - mcfg->memsegs;
1436 len = msl->memseg_arr.len;
1438 return alloc_list(msl_idx, len);
1442 eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
1444 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1446 /* single file segments mode doesn't support individual segment fd's */
1447 if (internal_config.single_file_segments)
1450 /* if list is not allocated, allocate it */
1451 if (fd_list[list_idx].len == 0) {
1452 int len = mcfg->memsegs[list_idx].memseg_arr.len;
1454 if (alloc_list(list_idx, len) < 0)
1457 fd_list[list_idx].fds[seg_idx] = fd;
1463 eal_memalloc_set_seg_list_fd(int list_idx, int fd)
1465 /* non-single file segment mode doesn't support segment list fd's */
1466 if (!internal_config.single_file_segments)
1469 fd_list[list_idx].memseg_list_fd = fd;
1475 eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
1479 if (internal_config.in_memory || internal_config.no_hugetlbfs) {
1480 #ifndef MEMFD_SUPPORTED
1481 /* in in-memory or no-huge mode, we rely on memfd support */
1484 /* memfd supported, but hugetlbfs memfd may not be */
1485 if (!internal_config.no_hugetlbfs && !memfd_create_supported)
1489 if (internal_config.single_file_segments) {
1490 fd = fd_list[list_idx].memseg_list_fd;
1491 } else if (fd_list[list_idx].len == 0) {
1492 /* list not initialized */
1495 fd = fd_list[list_idx].fds[seg_idx];
1503 test_memfd_create(void)
1505 #ifdef MEMFD_SUPPORTED
1507 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1508 uint64_t pagesz = internal_config.hugepage_info[i].hugepage_sz;
1509 int pagesz_flag = pagesz_flags(pagesz);
1512 flags = pagesz_flag | RTE_MFD_HUGETLB;
1513 int fd = memfd_create("test", flags);
1515 /* we failed - let memalloc know this isn't working */
1516 if (errno == EINVAL) {
1517 memfd_create_supported = 0;
1518 return 0; /* not supported */
1521 /* we got other error - something's wrong */
1522 return -1; /* error */
1525 return 1; /* supported */
1528 return 0; /* not supported */
1532 eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
1534 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1536 if (internal_config.in_memory || internal_config.no_hugetlbfs) {
1537 #ifndef MEMFD_SUPPORTED
1538 /* in in-memory or no-huge mode, we rely on memfd support */
1541 /* memfd supported, but hugetlbfs memfd may not be */
1542 if (!internal_config.no_hugetlbfs && !memfd_create_supported)
1546 if (internal_config.single_file_segments) {
1547 size_t pgsz = mcfg->memsegs[list_idx].page_sz;
1549 /* segment not active? */
1550 if (fd_list[list_idx].memseg_list_fd < 0)
1552 *offset = pgsz * seg_idx;
1554 /* fd_list not initialized? */
1555 if (fd_list[list_idx].len == 0)
1558 /* segment not active? */
1559 if (fd_list[list_idx].fds[seg_idx] < 0)
1567 eal_memalloc_init(void)
1569 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1570 if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
1572 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1573 internal_config.in_memory) {
1574 int mfd_res = test_memfd_create();
1577 RTE_LOG(ERR, EAL, "Unable to check if memfd is supported\n");
1581 RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1583 RTE_LOG(INFO, EAL, "Using memfd is not supported, falling back to anonymous hugepages\n");
1585 /* we only support single-file segments mode with in-memory mode
1586 * if we support hugetlbfs with memfd_create. this code will
1589 if (internal_config.single_file_segments &&
1591 RTE_LOG(ERR, EAL, "Single-file segments mode cannot be used without memfd support\n");
1594 /* this cannot ever happen but better safe than sorry */
1595 if (!anonymous_hugepages_supported) {
1596 RTE_LOG(ERR, EAL, "Using anonymous memory is not supported\n");
1601 /* initialize all of the fd lists */
1602 if (rte_memseg_list_walk(fd_list_create_walk, NULL))