1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
14 #include <sys/types.h>
16 #include <sys/queue.h>
21 #include <sys/ioctl.h>
25 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
26 #include <linux/memfd.h>
27 #define MEMFD_SUPPORTED
29 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
33 #include <linux/falloc.h>
34 #include <linux/mman.h> /* for hugetlb-related mmap flags */
36 #include <rte_common.h>
39 #include <rte_errno.h>
40 #include <rte_memory.h>
41 #include <rte_spinlock.h>
43 #include "eal_filesystem.h"
44 #include "eal_internal_cfg.h"
45 #include "eal_memalloc.h"
46 #include "eal_memcfg.h"
47 #include "eal_private.h"
49 const int anonymous_hugepages_supported =
52 #define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
55 #define RTE_MAP_HUGE_SHIFT 26
59 * we've already checked memfd support at compile-time, but we also need to
60 * check if we can create hugepage files with memfd.
62 * also, this is not a constant, because while we may be *compiled* with memfd
63 * hugetlbfs support, we might not be *running* on a system that supports memfd
64 * and/or memfd with hugetlbfs, so we need to be able to adjust this flag at
65 * runtime, and fall back to anonymous memory.
67 static int memfd_create_supported =
70 #define RTE_MFD_HUGETLB MFD_HUGETLB
73 #define RTE_MFD_HUGETLB 4U
77 * not all kernel version support fallocate on hugetlbfs, so fall back to
78 * ftruncate and disallow deallocation if fallocate is not supported.
80 static int fallocate_supported = -1; /* unknown */
83 * we have two modes - single file segments, and file-per-page mode.
85 * for single-file segments, we use memseg_list_fd to store the segment fd,
86 * while the fds[] will not be allocated, and len will be set to 0.
88 * for file-per-page mode, each page will have its own fd, so 'memseg_list_fd'
89 * will be invalid (set to -1), and we'll use 'fds' to keep track of page fd's.
91 * we cannot know how many pages a system will have in advance, but we do know
92 * that they come in lists, and we know lengths of these lists. so, simply store
93 * a malloc'd array of fd's indexed by list and segment index.
95 * they will be initialized at startup, and filled as we allocate/deallocate
99 int *fds; /**< dynamically allocated array of segment lock fd's */
100 int memseg_list_fd; /**< memseg list fd */
101 int len; /**< total length of the array */
102 int count; /**< entries used in an array */
103 } fd_list[RTE_MAX_MEMSEG_LISTS];
105 /** local copy of a memory map, used to synchronize memory hotplug in MP */
106 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
108 static sigjmp_buf huge_jmpenv;
110 static void huge_sigbus_handler(int signo __rte_unused)
112 siglongjmp(huge_jmpenv, 1);
115 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
116 * non-static local variable in the stack frame calling sigsetjmp might be
117 * clobbered by a call to longjmp.
119 static int huge_wrap_sigsetjmp(void)
121 return sigsetjmp(huge_jmpenv, 1);
124 static struct sigaction huge_action_old;
125 static int huge_need_recover;
128 huge_register_sigbus(void)
131 struct sigaction action;
134 sigaddset(&mask, SIGBUS);
136 action.sa_mask = mask;
137 action.sa_handler = huge_sigbus_handler;
139 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
143 huge_recover_sigbus(void)
145 if (huge_need_recover) {
146 sigaction(SIGBUS, &huge_action_old, NULL);
147 huge_need_recover = 0;
151 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
156 /* Check if kernel supports NUMA. */
157 if (numa_available() != 0) {
158 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
165 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
167 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
168 if (get_mempolicy(oldpolicy, oldmask->maskp,
169 oldmask->size + 1, 0, 0) < 0) {
171 "Failed to get current mempolicy: %s. "
172 "Assuming MPOL_DEFAULT.\n", strerror(errno));
173 *oldpolicy = MPOL_DEFAULT;
176 "Setting policy MPOL_PREFERRED for socket %d\n",
178 numa_set_preferred(socket_id);
182 restore_numa(int *oldpolicy, struct bitmask *oldmask)
185 "Restoring previous memory policy: %d\n", *oldpolicy);
186 if (*oldpolicy == MPOL_DEFAULT) {
187 numa_set_localalloc();
188 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
189 oldmask->size + 1) < 0) {
190 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
192 numa_set_localalloc();
194 numa_free_cpumask(oldmask);
199 * uses fstat to report the size of a file on disk
202 get_file_size(int fd)
205 if (fstat(fd, &st) < 0)
211 pagesz_flags(uint64_t page_sz)
213 /* as per mmap() manpage, all page sizes are log2 of page size
214 * shifted by MAP_HUGE_SHIFT
216 int log2 = rte_log2_u64(page_sz);
217 return log2 << RTE_MAP_HUGE_SHIFT;
220 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
221 static int lock(int fd, int type)
225 /* flock may be interrupted */
227 ret = flock(fd, type | LOCK_NB);
228 } while (ret && errno == EINTR);
230 if (ret && errno == EWOULDBLOCK) {
234 RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
235 __func__, strerror(errno));
238 /* lock was successful */
243 get_seg_memfd(struct hugepage_info *hi __rte_unused,
244 unsigned int list_idx __rte_unused,
245 unsigned int seg_idx __rte_unused)
247 #ifdef MEMFD_SUPPORTED
249 char segname[250]; /* as per manpage, limit is 249 bytes plus null */
251 int flags = RTE_MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
252 const struct internal_config *internal_conf =
253 eal_get_internal_configuration();
255 if (internal_conf->single_file_segments) {
256 fd = fd_list[list_idx].memseg_list_fd;
259 snprintf(segname, sizeof(segname), "seg_%i", list_idx);
260 fd = memfd_create(segname, flags);
262 RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
263 __func__, strerror(errno));
266 fd_list[list_idx].memseg_list_fd = fd;
269 fd = fd_list[list_idx].fds[seg_idx];
272 snprintf(segname, sizeof(segname), "seg_%i-%i",
274 fd = memfd_create(segname, flags);
276 RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
277 __func__, strerror(errno));
280 fd_list[list_idx].fds[seg_idx] = fd;
289 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
290 unsigned int list_idx, unsigned int seg_idx)
293 const struct internal_config *internal_conf =
294 eal_get_internal_configuration();
296 /* for in-memory mode, we only make it here when we're sure we support
297 * memfd, and this is a special case.
299 if (internal_conf->in_memory)
300 return get_seg_memfd(hi, list_idx, seg_idx);
302 if (internal_conf->single_file_segments) {
303 /* create a hugepage file path */
304 eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
306 fd = fd_list[list_idx].memseg_list_fd;
309 fd = open(path, O_CREAT | O_RDWR, 0600);
311 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
312 __func__, strerror(errno));
315 /* take out a read lock and keep it indefinitely */
316 if (lock(fd, LOCK_SH) < 0) {
317 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
318 __func__, strerror(errno));
322 fd_list[list_idx].memseg_list_fd = fd;
325 /* create a hugepage file path */
326 eal_get_hugefile_path(path, buflen, hi->hugedir,
327 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
329 fd = fd_list[list_idx].fds[seg_idx];
332 /* A primary process is the only one creating these
333 * files. If there is a leftover that was not cleaned
334 * by clear_hugedir(), we must *now* make sure to drop
335 * the file or we will remap old stuff while the rest
336 * of the code is built on the assumption that a new
339 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
340 unlink(path) == -1 &&
342 RTE_LOG(DEBUG, EAL, "%s(): could not remove '%s': %s\n",
343 __func__, path, strerror(errno));
347 fd = open(path, O_CREAT | O_RDWR, 0600);
349 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
350 __func__, strerror(errno));
353 /* take out a read lock */
354 if (lock(fd, LOCK_SH) < 0) {
355 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
356 __func__, strerror(errno));
360 fd_list[list_idx].fds[seg_idx] = fd;
367 resize_hugefile_in_memory(int fd, uint64_t fa_offset,
368 uint64_t page_sz, bool grow)
370 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
374 /* grow or shrink the file */
375 ret = fallocate(fd, flags, fa_offset, page_sz);
378 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
387 resize_hugefile_in_filesystem(int fd, uint64_t fa_offset, uint64_t page_sz,
393 if (fallocate_supported == 0) {
394 /* we cannot deallocate memory if fallocate() is not
395 * supported, and hugepage file is already locked at
396 * creation, so no further synchronization needed.
400 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
404 uint64_t new_size = fa_offset + page_sz;
405 uint64_t cur_size = get_file_size(fd);
407 /* fallocate isn't supported, fall back to ftruncate */
408 if (new_size > cur_size &&
409 ftruncate(fd, new_size) < 0) {
410 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
411 __func__, strerror(errno));
415 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
420 * technically, it is perfectly safe for both primary
421 * and secondary to grow and shrink the page files:
422 * growing the file repeatedly has no effect because
423 * a page can only be allocated once, while mmap ensures
424 * that secondaries hold on to the page even after the
425 * page itself is removed from the filesystem.
427 * however, leaving growing/shrinking to the primary
428 * tends to expose bugs in fdlist page count handling,
429 * so leave this here just in case.
431 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
434 /* grow or shrink the file */
435 ret = fallocate(fd, flags, fa_offset, page_sz);
438 if (fallocate_supported == -1 &&
440 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
443 fallocate_supported = 0;
445 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
451 fallocate_supported = 1;
459 close_hugefile(int fd, char *path, int list_idx)
461 const struct internal_config *internal_conf =
462 eal_get_internal_configuration();
464 * primary process must unlink the file, but only when not in in-memory
465 * mode (as in that case there is no file to unlink).
467 if (!internal_conf->in_memory &&
468 rte_eal_process_type() == RTE_PROC_PRIMARY &&
470 RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
471 __func__, path, strerror(errno));
474 fd_list[list_idx].memseg_list_fd = -1;
478 resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow)
480 /* in-memory mode is a special case, because we can be sure that
481 * fallocate() is supported.
483 const struct internal_config *internal_conf =
484 eal_get_internal_configuration();
486 if (internal_conf->in_memory)
487 return resize_hugefile_in_memory(fd, fa_offset,
490 return resize_hugefile_in_filesystem(fd, fa_offset, page_sz,
495 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
496 struct hugepage_info *hi, unsigned int list_idx,
497 unsigned int seg_idx)
499 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
500 int cur_socket_id = 0;
511 const struct internal_config *internal_conf =
512 eal_get_internal_configuration();
514 alloc_sz = hi->hugepage_sz;
516 /* these are checked at init, but code analyzers don't know that */
517 if (internal_conf->in_memory && !anonymous_hugepages_supported) {
518 RTE_LOG(ERR, EAL, "Anonymous hugepages not supported, in-memory mode cannot allocate memory\n");
521 if (internal_conf->in_memory && !memfd_create_supported &&
522 internal_conf->single_file_segments) {
523 RTE_LOG(ERR, EAL, "Single-file segments are not supported without memfd support\n");
527 /* in-memory without memfd is a special case */
530 if (internal_conf->in_memory && !memfd_create_supported) {
531 const int in_memory_flags = MAP_HUGETLB | MAP_FIXED |
532 MAP_PRIVATE | MAP_ANONYMOUS;
535 pagesz_flag = pagesz_flags(alloc_sz);
537 mmap_flags = in_memory_flags | pagesz_flag;
539 /* single-file segments codepath will never be active
540 * here because in-memory mode is incompatible with the
541 * fallback path, and it's stopped at EAL initialization
546 /* takes out a read lock on segment or segment list */
547 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
549 RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
553 if (internal_conf->single_file_segments) {
554 map_offset = seg_idx * alloc_sz;
555 ret = resize_hugefile(fd, map_offset, alloc_sz, true);
559 fd_list[list_idx].count++;
562 if (ftruncate(fd, alloc_sz) < 0) {
563 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
564 __func__, strerror(errno));
567 if (internal_conf->hugepage_unlink &&
568 !internal_conf->in_memory) {
570 RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
571 __func__, strerror(errno));
576 mmap_flags = MAP_SHARED | MAP_POPULATE | MAP_FIXED;
579 huge_register_sigbus();
582 * map the segment, and populate page tables, the kernel fills
583 * this segment with zeros if it's a new page.
585 va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, mmap_flags, fd,
588 if (va == MAP_FAILED) {
589 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
591 /* mmap failed, but the previous region might have been
592 * unmapped anyway. try to remap it
597 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
598 munmap(va, alloc_sz);
602 /* In linux, hugetlb limitations, like cgroup, are
603 * enforced at fault time instead of mmap(), even
604 * with the option of MAP_POPULATE. Kernel will send
605 * a SIGBUS signal. To avoid to be killed, save stack
606 * environment here, if SIGBUS happens, we can jump
609 if (huge_wrap_sigsetjmp()) {
610 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
611 (unsigned int)(alloc_sz >> 20));
615 /* we need to trigger a write to the page to enforce page fault and
616 * ensure that page is accessible to us, but we can't overwrite value
617 * that is already there, so read the old value, and write itback.
618 * kernel populates the page with zeroes initially.
620 *(volatile int *)addr = *(volatile int *)addr;
622 iova = rte_mem_virt2iova(addr);
623 if (iova == RTE_BAD_PHYS_ADDR) {
624 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
629 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
631 * If the kernel has been built without NUMA support, get_mempolicy()
632 * will return an error. If check_numa() returns false, memory
633 * allocation is not NUMA aware and the socket_id should not be
637 ret = get_mempolicy(&cur_socket_id, NULL, 0, addr,
638 MPOL_F_NODE | MPOL_F_ADDR);
640 RTE_LOG(DEBUG, EAL, "%s(): get_mempolicy: %s\n",
641 __func__, strerror(errno));
643 } else if (cur_socket_id != socket_id) {
645 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
646 __func__, socket_id, cur_socket_id);
651 if (rte_socket_count() > 1)
652 RTE_LOG(DEBUG, EAL, "%s(): not checking hugepage NUMA node.\n",
656 huge_recover_sigbus();
659 ms->hugepage_sz = alloc_sz;
661 ms->nchannel = rte_memory_get_nchannel();
662 ms->nrank = rte_memory_get_nrank();
664 ms->socket_id = socket_id;
669 munmap(addr, alloc_sz);
671 huge_recover_sigbus();
672 flags = EAL_RESERVE_FORCE_ADDRESS;
673 new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
674 if (new_addr != addr) {
675 if (new_addr != NULL)
676 munmap(new_addr, alloc_sz);
677 /* we're leaving a hole in our virtual address space. if
678 * somebody else maps this hole now, we could accidentally
679 * override it in the future.
681 RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
683 /* roll back the ref count */
684 if (internal_conf->single_file_segments)
685 fd_list[list_idx].count--;
687 /* some codepaths will return negative fd, so exit early */
691 if (internal_conf->single_file_segments) {
692 resize_hugefile(fd, map_offset, alloc_sz, false);
693 /* ignore failure, can't make it any worse */
695 /* if refcount is at zero, close the file */
696 if (fd_list[list_idx].count == 0)
697 close_hugefile(fd, path, list_idx);
699 /* only remove file if we can take out a write lock */
700 if (internal_conf->hugepage_unlink == 0 &&
701 internal_conf->in_memory == 0 &&
702 lock(fd, LOCK_EX) == 1)
705 fd_list[list_idx].fds[seg_idx] = -1;
711 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
712 unsigned int list_idx, unsigned int seg_idx)
717 const struct internal_config *internal_conf =
718 eal_get_internal_configuration();
720 /* erase page data */
721 memset(ms->addr, 0, ms->len);
723 if (mmap(ms->addr, ms->len, PROT_NONE,
724 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
726 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
730 eal_mem_set_dump(ms->addr, ms->len, false);
732 /* if we're using anonymous hugepages, nothing to be done */
733 if (internal_conf->in_memory && !memfd_create_supported) {
734 memset(ms, 0, sizeof(*ms));
738 /* if we are not in single file segments mode, we're going to unmap the
739 * segment and thus drop the lock on original fd, but hugepage dir is
740 * now locked so we can take out another one without races.
742 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
746 if (internal_conf->single_file_segments) {
747 map_offset = seg_idx * ms->len;
748 if (resize_hugefile(fd, map_offset, ms->len, false))
751 if (--(fd_list[list_idx].count) == 0)
752 close_hugefile(fd, path, list_idx);
756 /* if we're able to take out a write lock, we're the last one
757 * holding onto this page.
759 if (!internal_conf->in_memory && !internal_conf->hugepage_unlink) {
760 ret = lock(fd, LOCK_EX);
762 /* no one else is using this page */
767 /* closing fd will drop the lock */
769 fd_list[list_idx].fds[seg_idx] = -1;
772 memset(ms, 0, sizeof(*ms));
774 return ret < 0 ? -1 : 0;
777 struct alloc_walk_param {
778 struct hugepage_info *hi;
779 struct rte_memseg **ms;
781 unsigned int segs_allocated;
787 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
789 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
790 struct alloc_walk_param *wa = arg;
791 struct rte_memseg_list *cur_msl;
793 int cur_idx, start_idx, j, dir_fd = -1;
794 unsigned int msl_idx, need, i;
795 const struct internal_config *internal_conf =
796 eal_get_internal_configuration();
798 if (msl->page_sz != wa->page_sz)
800 if (msl->socket_id != wa->socket)
803 page_sz = (size_t)msl->page_sz;
805 msl_idx = msl - mcfg->memsegs;
806 cur_msl = &mcfg->memsegs[msl_idx];
810 /* try finding space in memseg list */
812 /* if we require exact number of pages in a list, find them */
813 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0,
821 /* we don't require exact number of pages, so we're going to go
822 * for best-effort allocation. that means finding the biggest
823 * unused block, and going with that.
825 cur_idx = rte_fbarray_find_biggest_free(&cur_msl->memseg_arr,
830 /* adjust the size to possibly be smaller than original
831 * request, but do not allow it to be bigger.
833 cur_len = rte_fbarray_find_contig_free(&cur_msl->memseg_arr,
835 need = RTE_MIN(need, (unsigned int)cur_len);
838 /* do not allow any page allocations during the time we're allocating,
839 * because file creation and locking operations are not atomic,
840 * and we might be the first or the last ones to use a particular page,
841 * so we need to ensure atomicity of every operation.
843 * during init, we already hold a write lock, so don't try to take out
846 if (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {
847 dir_fd = open(wa->hi->hugedir, O_RDONLY);
849 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
850 __func__, wa->hi->hugedir, strerror(errno));
853 /* blocking writelock */
854 if (flock(dir_fd, LOCK_EX)) {
855 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
856 __func__, wa->hi->hugedir, strerror(errno));
862 for (i = 0; i < need; i++, cur_idx++) {
863 struct rte_memseg *cur;
866 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
867 map_addr = RTE_PTR_ADD(cur_msl->base_va,
870 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
872 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
875 /* if exact number wasn't requested, stop */
880 for (j = start_idx; j < cur_idx; j++) {
881 struct rte_memseg *tmp;
882 struct rte_fbarray *arr =
883 &cur_msl->memseg_arr;
885 tmp = rte_fbarray_get(arr, j);
886 rte_fbarray_set_free(arr, j);
888 /* free_seg may attempt to create a file, which
891 if (free_seg(tmp, wa->hi, msl_idx, j))
892 RTE_LOG(DEBUG, EAL, "Cannot free page\n");
896 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
905 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
908 wa->segs_allocated = i;
913 /* if we didn't allocate any segments, move on to the next list */
917 struct free_walk_param {
918 struct hugepage_info *hi;
919 struct rte_memseg *ms;
922 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
924 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
925 struct rte_memseg_list *found_msl;
926 struct free_walk_param *wa = arg;
927 uintptr_t start_addr, end_addr;
928 int msl_idx, seg_idx, ret, dir_fd = -1;
929 const struct internal_config *internal_conf =
930 eal_get_internal_configuration();
932 start_addr = (uintptr_t) msl->base_va;
933 end_addr = start_addr + msl->len;
935 if ((uintptr_t)wa->ms->addr < start_addr ||
936 (uintptr_t)wa->ms->addr >= end_addr)
939 msl_idx = msl - mcfg->memsegs;
940 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
943 found_msl = &mcfg->memsegs[msl_idx];
945 /* do not allow any page allocations during the time we're freeing,
946 * because file creation and locking operations are not atomic,
947 * and we might be the first or the last ones to use a particular page,
948 * so we need to ensure atomicity of every operation.
950 * during init, we already hold a write lock, so don't try to take out
953 if (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {
954 dir_fd = open(wa->hi->hugedir, O_RDONLY);
956 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
957 __func__, wa->hi->hugedir, strerror(errno));
960 /* blocking writelock */
961 if (flock(dir_fd, LOCK_EX)) {
962 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
963 __func__, wa->hi->hugedir, strerror(errno));
969 found_msl->version++;
971 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
973 ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
985 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
986 int socket, bool exact)
989 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
990 bool have_numa = false;
992 struct bitmask *oldmask;
994 struct alloc_walk_param wa;
995 struct hugepage_info *hi = NULL;
996 struct internal_config *internal_conf =
997 eal_get_internal_configuration();
999 memset(&wa, 0, sizeof(wa));
1001 /* dynamic allocation not supported in legacy mode */
1002 if (internal_conf->legacy_mem)
1005 for (i = 0; i < (int) RTE_DIM(internal_conf->hugepage_info); i++) {
1007 internal_conf->hugepage_info[i].hugepage_sz) {
1008 hi = &internal_conf->hugepage_info[i];
1013 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
1018 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
1020 oldmask = numa_allocate_nodemask();
1021 prepare_numa(&oldpolicy, oldmask, socket);
1030 wa.page_sz = page_sz;
1032 wa.segs_allocated = 0;
1034 /* memalloc is locked, so it's safe to use thread-unsafe version */
1035 ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
1037 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
1040 } else if (ret > 0) {
1041 ret = (int)wa.segs_allocated;
1044 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
1046 restore_numa(&oldpolicy, oldmask);
1052 eal_memalloc_alloc_seg(size_t page_sz, int socket)
1054 struct rte_memseg *ms;
1055 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
1057 /* return pointer to newly allocated memseg */
1062 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
1065 struct internal_config *internal_conf =
1066 eal_get_internal_configuration();
1068 /* dynamic free not supported in legacy mode */
1069 if (internal_conf->legacy_mem)
1072 for (seg = 0; seg < n_segs; seg++) {
1073 struct rte_memseg *cur = ms[seg];
1074 struct hugepage_info *hi = NULL;
1075 struct free_walk_param wa;
1078 /* if this page is marked as unfreeable, fail */
1079 if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
1080 RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
1085 memset(&wa, 0, sizeof(wa));
1087 for (i = 0; i < (int)RTE_DIM(internal_conf->hugepage_info);
1089 hi = &internal_conf->hugepage_info[i];
1090 if (cur->hugepage_sz == hi->hugepage_sz)
1093 if (i == (int)RTE_DIM(internal_conf->hugepage_info)) {
1094 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1102 /* memalloc is locked, so it's safe to use thread-unsafe version
1104 walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
1109 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
1116 eal_memalloc_free_seg(struct rte_memseg *ms)
1118 const struct internal_config *internal_conf =
1119 eal_get_internal_configuration();
1121 /* dynamic free not supported in legacy mode */
1122 if (internal_conf->legacy_mem)
1125 return eal_memalloc_free_seg_bulk(&ms, 1);
1129 sync_chunk(struct rte_memseg_list *primary_msl,
1130 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1131 unsigned int msl_idx, bool used, int start, int end)
1133 struct rte_fbarray *l_arr, *p_arr;
1134 int i, ret, chunk_len, diff_len;
1136 l_arr = &local_msl->memseg_arr;
1137 p_arr = &primary_msl->memseg_arr;
1139 /* we need to aggregate allocations/deallocations into bigger chunks,
1140 * as we don't want to spam the user with per-page callbacks.
1142 * to avoid any potential issues, we also want to trigger
1143 * deallocation callbacks *before* we actually deallocate
1144 * memory, so that the user application could wrap up its use
1145 * before it goes away.
1148 chunk_len = end - start;
1150 /* find how many contiguous pages we can map/unmap for this chunk */
1152 rte_fbarray_find_contig_free(l_arr, start) :
1153 rte_fbarray_find_contig_used(l_arr, start);
1155 /* has to be at least one page */
1159 diff_len = RTE_MIN(chunk_len, diff_len);
1161 /* if we are freeing memory, notify the application */
1163 struct rte_memseg *ms;
1165 size_t len, page_sz;
1167 ms = rte_fbarray_get(l_arr, start);
1168 start_va = ms->addr;
1169 page_sz = (size_t)primary_msl->page_sz;
1170 len = page_sz * diff_len;
1172 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
1176 for (i = 0; i < diff_len; i++) {
1177 struct rte_memseg *p_ms, *l_ms;
1178 int seg_idx = start + i;
1180 l_ms = rte_fbarray_get(l_arr, seg_idx);
1181 p_ms = rte_fbarray_get(p_arr, seg_idx);
1183 if (l_ms == NULL || p_ms == NULL)
1187 ret = alloc_seg(l_ms, p_ms->addr,
1188 p_ms->socket_id, hi,
1192 rte_fbarray_set_used(l_arr, seg_idx);
1194 ret = free_seg(l_ms, hi, msl_idx, seg_idx);
1195 rte_fbarray_set_free(l_arr, seg_idx);
1201 /* if we just allocated memory, notify the application */
1203 struct rte_memseg *ms;
1205 size_t len, page_sz;
1207 ms = rte_fbarray_get(l_arr, start);
1208 start_va = ms->addr;
1209 page_sz = (size_t)primary_msl->page_sz;
1210 len = page_sz * diff_len;
1212 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1216 /* calculate how much we can advance until next chunk */
1218 rte_fbarray_find_contig_used(l_arr, start) :
1219 rte_fbarray_find_contig_free(l_arr, start);
1220 ret = RTE_MIN(chunk_len, diff_len);
1226 sync_status(struct rte_memseg_list *primary_msl,
1227 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1228 unsigned int msl_idx, bool used)
1230 struct rte_fbarray *l_arr, *p_arr;
1231 int p_idx, l_chunk_len, p_chunk_len, ret;
1234 /* this is a little bit tricky, but the basic idea is - walk both lists
1235 * and spot any places where there are discrepancies. walking both lists
1236 * and noting discrepancies in a single go is a hard problem, so we do
1237 * it in two passes - first we spot any places where allocated segments
1238 * mismatch (i.e. ensure that everything that's allocated in the primary
1239 * is also allocated in the secondary), and then we do it by looking at
1240 * free segments instead.
1242 * we also need to aggregate changes into chunks, as we have to call
1243 * callbacks per allocation, not per page.
1245 l_arr = &local_msl->memseg_arr;
1246 p_arr = &primary_msl->memseg_arr;
1249 p_idx = rte_fbarray_find_next_used(p_arr, 0);
1251 p_idx = rte_fbarray_find_next_free(p_arr, 0);
1253 while (p_idx >= 0) {
1254 int next_chunk_search_idx;
1257 p_chunk_len = rte_fbarray_find_contig_used(p_arr,
1259 l_chunk_len = rte_fbarray_find_contig_used(l_arr,
1262 p_chunk_len = rte_fbarray_find_contig_free(p_arr,
1264 l_chunk_len = rte_fbarray_find_contig_free(l_arr,
1267 /* best case scenario - no differences (or bigger, which will be
1268 * fixed during next iteration), look for next chunk
1270 if (l_chunk_len >= p_chunk_len) {
1271 next_chunk_search_idx = p_idx + p_chunk_len;
1275 /* if both chunks start at the same point, skip parts we know
1276 * are identical, and sync the rest. each call to sync_chunk
1277 * will only sync contiguous segments, so we need to call this
1278 * until we are sure there are no more differences in this
1281 start = p_idx + l_chunk_len;
1282 end = p_idx + p_chunk_len;
1284 ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
1287 } while (start < end && ret >= 0);
1288 /* if ret is negative, something went wrong */
1292 next_chunk_search_idx = p_idx + p_chunk_len;
1294 /* skip to end of this chunk */
1296 p_idx = rte_fbarray_find_next_used(p_arr,
1297 next_chunk_search_idx);
1299 p_idx = rte_fbarray_find_next_free(p_arr,
1300 next_chunk_search_idx);
1307 sync_existing(struct rte_memseg_list *primary_msl,
1308 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1309 unsigned int msl_idx)
1313 /* do not allow any page allocations during the time we're allocating,
1314 * because file creation and locking operations are not atomic,
1315 * and we might be the first or the last ones to use a particular page,
1316 * so we need to ensure atomicity of every operation.
1318 dir_fd = open(hi->hugedir, O_RDONLY);
1320 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
1321 hi->hugedir, strerror(errno));
1324 /* blocking writelock */
1325 if (flock(dir_fd, LOCK_EX)) {
1326 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
1327 hi->hugedir, strerror(errno));
1332 /* ensure all allocated space is the same in both lists */
1333 ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
1337 /* ensure all unallocated space is the same in both lists */
1338 ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
1342 /* update version number */
1343 local_msl->version = primary_msl->version;
1354 sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
1356 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1357 struct rte_memseg_list *primary_msl, *local_msl;
1358 struct hugepage_info *hi = NULL;
1361 struct internal_config *internal_conf =
1362 eal_get_internal_configuration();
1367 msl_idx = msl - mcfg->memsegs;
1368 primary_msl = &mcfg->memsegs[msl_idx];
1369 local_msl = &local_memsegs[msl_idx];
1371 for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
1373 internal_conf->hugepage_info[i].hugepage_sz;
1374 uint64_t msl_sz = primary_msl->page_sz;
1375 if (msl_sz == cur_sz) {
1376 hi = &internal_conf->hugepage_info[i];
1381 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1385 /* if versions don't match, synchronize everything */
1386 if (local_msl->version != primary_msl->version &&
1387 sync_existing(primary_msl, local_msl, hi, msl_idx))
1394 eal_memalloc_sync_with_primary(void)
1396 /* nothing to be done in primary */
1397 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1400 /* memalloc is locked, so it's safe to call thread-unsafe version */
1401 if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
1407 secondary_msl_create_walk(const struct rte_memseg_list *msl,
1408 void *arg __rte_unused)
1410 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1411 struct rte_memseg_list *primary_msl, *local_msl;
1412 char name[PATH_MAX];
1418 msl_idx = msl - mcfg->memsegs;
1419 primary_msl = &mcfg->memsegs[msl_idx];
1420 local_msl = &local_memsegs[msl_idx];
1422 /* create distinct fbarrays for each secondary */
1423 snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
1424 primary_msl->memseg_arr.name, getpid());
1426 ret = rte_fbarray_init(&local_msl->memseg_arr, name,
1427 primary_msl->memseg_arr.len,
1428 primary_msl->memseg_arr.elt_sz);
1430 RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
1433 local_msl->base_va = primary_msl->base_va;
1434 local_msl->len = primary_msl->len;
1440 secondary_msl_destroy_walk(const struct rte_memseg_list *msl,
1441 void *arg __rte_unused)
1443 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1444 struct rte_memseg_list *local_msl;
1450 msl_idx = msl - mcfg->memsegs;
1451 local_msl = &local_memsegs[msl_idx];
1453 ret = rte_fbarray_destroy(&local_msl->memseg_arr);
1455 RTE_LOG(ERR, EAL, "Cannot destroy local memory map\n");
1458 local_msl->base_va = NULL;
1465 alloc_list(int list_idx, int len)
1469 const struct internal_config *internal_conf =
1470 eal_get_internal_configuration();
1472 /* single-file segments mode does not need fd list */
1473 if (!internal_conf->single_file_segments) {
1474 /* ensure we have space to store fd per each possible segment */
1475 data = malloc(sizeof(int) * len);
1477 RTE_LOG(ERR, EAL, "Unable to allocate space for file descriptors\n");
1480 /* set all fd's as invalid */
1481 for (i = 0; i < len; i++)
1483 fd_list[list_idx].fds = data;
1484 fd_list[list_idx].len = len;
1486 fd_list[list_idx].fds = NULL;
1487 fd_list[list_idx].len = 0;
1490 fd_list[list_idx].count = 0;
1491 fd_list[list_idx].memseg_list_fd = -1;
1497 destroy_list(int list_idx)
1499 const struct internal_config *internal_conf =
1500 eal_get_internal_configuration();
1502 /* single-file segments mode does not need fd list */
1503 if (!internal_conf->single_file_segments) {
1504 int *fds = fd_list[list_idx].fds;
1506 /* go through each fd and ensure it's closed */
1507 for (i = 0; i < fd_list[list_idx].len; i++) {
1514 fd_list[list_idx].fds = NULL;
1515 fd_list[list_idx].len = 0;
1516 } else if (fd_list[list_idx].memseg_list_fd >= 0) {
1517 close(fd_list[list_idx].memseg_list_fd);
1518 fd_list[list_idx].count = 0;
1519 fd_list[list_idx].memseg_list_fd = -1;
1525 fd_list_create_walk(const struct rte_memseg_list *msl,
1526 void *arg __rte_unused)
1528 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1535 msl_idx = msl - mcfg->memsegs;
1536 len = msl->memseg_arr.len;
1538 return alloc_list(msl_idx, len);
1542 fd_list_destroy_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
1544 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1550 msl_idx = msl - mcfg->memsegs;
1552 return destroy_list(msl_idx);
1556 eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
1558 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1559 const struct internal_config *internal_conf =
1560 eal_get_internal_configuration();
1562 /* single file segments mode doesn't support individual segment fd's */
1563 if (internal_conf->single_file_segments)
1566 /* if list is not allocated, allocate it */
1567 if (fd_list[list_idx].len == 0) {
1568 int len = mcfg->memsegs[list_idx].memseg_arr.len;
1570 if (alloc_list(list_idx, len) < 0)
1573 fd_list[list_idx].fds[seg_idx] = fd;
1579 eal_memalloc_set_seg_list_fd(int list_idx, int fd)
1581 const struct internal_config *internal_conf =
1582 eal_get_internal_configuration();
1584 /* non-single file segment mode doesn't support segment list fd's */
1585 if (!internal_conf->single_file_segments)
1588 fd_list[list_idx].memseg_list_fd = fd;
1594 eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
1597 const struct internal_config *internal_conf =
1598 eal_get_internal_configuration();
1600 if (internal_conf->in_memory || internal_conf->no_hugetlbfs) {
1601 #ifndef MEMFD_SUPPORTED
1602 /* in in-memory or no-huge mode, we rely on memfd support */
1605 /* memfd supported, but hugetlbfs memfd may not be */
1606 if (!internal_conf->no_hugetlbfs && !memfd_create_supported)
1610 if (internal_conf->single_file_segments) {
1611 fd = fd_list[list_idx].memseg_list_fd;
1612 } else if (fd_list[list_idx].len == 0) {
1613 /* list not initialized */
1616 fd = fd_list[list_idx].fds[seg_idx];
1624 test_memfd_create(void)
1626 #ifdef MEMFD_SUPPORTED
1627 const struct internal_config *internal_conf =
1628 eal_get_internal_configuration();
1630 for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
1631 uint64_t pagesz = internal_conf->hugepage_info[i].hugepage_sz;
1632 int pagesz_flag = pagesz_flags(pagesz);
1635 flags = pagesz_flag | RTE_MFD_HUGETLB;
1636 int fd = memfd_create("test", flags);
1638 /* we failed - let memalloc know this isn't working */
1639 if (errno == EINVAL) {
1640 memfd_create_supported = 0;
1641 return 0; /* not supported */
1644 /* we got other error - something's wrong */
1645 return -1; /* error */
1648 return 1; /* supported */
1651 return 0; /* not supported */
1655 eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
1657 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1658 const struct internal_config *internal_conf =
1659 eal_get_internal_configuration();
1661 if (internal_conf->in_memory || internal_conf->no_hugetlbfs) {
1662 #ifndef MEMFD_SUPPORTED
1663 /* in in-memory or no-huge mode, we rely on memfd support */
1666 /* memfd supported, but hugetlbfs memfd may not be */
1667 if (!internal_conf->no_hugetlbfs && !memfd_create_supported)
1671 if (internal_conf->single_file_segments) {
1672 size_t pgsz = mcfg->memsegs[list_idx].page_sz;
1674 /* segment not active? */
1675 if (fd_list[list_idx].memseg_list_fd < 0)
1677 *offset = pgsz * seg_idx;
1679 /* fd_list not initialized? */
1680 if (fd_list[list_idx].len == 0)
1683 /* segment not active? */
1684 if (fd_list[list_idx].fds[seg_idx] < 0)
1692 eal_memalloc_cleanup(void)
1694 /* close all remaining fd's - these are per-process, so it's safe */
1695 if (rte_memseg_list_walk_thread_unsafe(fd_list_destroy_walk, NULL))
1698 /* destroy the shadow page table if we're a secondary process */
1699 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1702 if (rte_memseg_list_walk_thread_unsafe(secondary_msl_destroy_walk,
1710 eal_memalloc_init(void)
1712 const struct internal_config *internal_conf =
1713 eal_get_internal_configuration();
1715 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1716 if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
1718 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1719 internal_conf->in_memory) {
1720 int mfd_res = test_memfd_create();
1723 RTE_LOG(ERR, EAL, "Unable to check if memfd is supported\n");
1727 RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1729 RTE_LOG(INFO, EAL, "Using memfd is not supported, falling back to anonymous hugepages\n");
1731 /* we only support single-file segments mode with in-memory mode
1732 * if we support hugetlbfs with memfd_create. this code will
1735 if (internal_conf->single_file_segments &&
1737 RTE_LOG(ERR, EAL, "Single-file segments mode cannot be used without memfd support\n");
1740 /* this cannot ever happen but better safe than sorry */
1741 if (!anonymous_hugepages_supported) {
1742 RTE_LOG(ERR, EAL, "Using anonymous memory is not supported\n");
1747 /* initialize all of the fd lists */
1748 if (rte_memseg_list_walk(fd_list_create_walk, NULL))