1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #define _FILE_OFFSET_BITS 64
15 #include <sys/types.h>
17 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
27 #include <linux/memfd.h>
28 #define MEMFD_SUPPORTED
30 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
34 #include <linux/falloc.h>
35 #include <linux/mman.h> /* for hugetlb-related mmap flags */
37 #include <rte_common.h>
39 #include <rte_eal_memconfig.h>
41 #include <rte_errno.h>
42 #include <rte_memory.h>
43 #include <rte_spinlock.h>
45 #include "eal_filesystem.h"
46 #include "eal_internal_cfg.h"
47 #include "eal_memalloc.h"
48 #include "eal_private.h"
50 const int anonymous_hugepages_supported =
53 #define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
56 #define RTE_MAP_HUGE_SHIFT 26
60 * we've already checked memfd support at compile-time, but we also need to
61 * check if we can create hugepage files with memfd.
63 * also, this is not a constant, because while we may be *compiled* with memfd
64 * hugetlbfs support, we might not be *running* on a system that supports memfd
65 * and/or memfd with hugetlbfs, so we need to be able to adjust this flag at
66 * runtime, and fall back to anonymous memory.
68 static int memfd_create_supported =
71 #define RTE_MFD_HUGETLB MFD_HUGETLB
74 #define RTE_MFD_HUGETLB 4U
78 * not all kernel version support fallocate on hugetlbfs, so fall back to
79 * ftruncate and disallow deallocation if fallocate is not supported.
81 static int fallocate_supported = -1; /* unknown */
84 * we have two modes - single file segments, and file-per-page mode.
86 * for single-file segments, we need some kind of mechanism to keep track of
87 * which hugepages can be freed back to the system, and which cannot. we cannot
88 * use flock() because they don't allow locking parts of a file, and we cannot
89 * use fcntl() due to issues with their semantics, so we will have to rely on a
90 * bunch of lockfiles for each page. so, we will use 'fds' array to keep track
91 * of per-page lockfiles. we will store the actual segment list fd in the
92 * 'memseg_list_fd' field.
94 * for file-per-page mode, each page will have its own fd, so 'memseg_list_fd'
95 * will be invalid (set to -1), and we'll use 'fds' to keep track of page fd's.
97 * we cannot know how many pages a system will have in advance, but we do know
98 * that they come in lists, and we know lengths of these lists. so, simply store
99 * a malloc'd array of fd's indexed by list and segment index.
101 * they will be initialized at startup, and filled as we allocate/deallocate
105 int *fds; /**< dynamically allocated array of segment lock fd's */
106 int memseg_list_fd; /**< memseg list fd */
107 int len; /**< total length of the array */
108 int count; /**< entries used in an array */
109 } fd_list[RTE_MAX_MEMSEG_LISTS];
111 /** local copy of a memory map, used to synchronize memory hotplug in MP */
112 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
114 static sigjmp_buf huge_jmpenv;
116 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
118 siglongjmp(huge_jmpenv, 1);
121 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
122 * non-static local variable in the stack frame calling sigsetjmp might be
123 * clobbered by a call to longjmp.
125 static int __rte_unused huge_wrap_sigsetjmp(void)
127 return sigsetjmp(huge_jmpenv, 1);
130 static struct sigaction huge_action_old;
131 static int huge_need_recover;
133 static void __rte_unused
134 huge_register_sigbus(void)
137 struct sigaction action;
140 sigaddset(&mask, SIGBUS);
142 action.sa_mask = mask;
143 action.sa_handler = huge_sigbus_handler;
145 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
148 static void __rte_unused
149 huge_recover_sigbus(void)
151 if (huge_need_recover) {
152 sigaction(SIGBUS, &huge_action_old, NULL);
153 huge_need_recover = 0;
157 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
162 /* Check if kernel supports NUMA. */
163 if (numa_available() != 0) {
164 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
171 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
173 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
174 if (get_mempolicy(oldpolicy, oldmask->maskp,
175 oldmask->size + 1, 0, 0) < 0) {
177 "Failed to get current mempolicy: %s. "
178 "Assuming MPOL_DEFAULT.\n", strerror(errno));
179 oldpolicy = MPOL_DEFAULT;
182 "Setting policy MPOL_PREFERRED for socket %d\n",
184 numa_set_preferred(socket_id);
188 restore_numa(int *oldpolicy, struct bitmask *oldmask)
191 "Restoring previous memory policy: %d\n", *oldpolicy);
192 if (*oldpolicy == MPOL_DEFAULT) {
193 numa_set_localalloc();
194 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
195 oldmask->size + 1) < 0) {
196 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
198 numa_set_localalloc();
200 numa_free_cpumask(oldmask);
205 * uses fstat to report the size of a file on disk
208 get_file_size(int fd)
211 if (fstat(fd, &st) < 0)
216 static inline uint32_t
221 v = rte_align64pow2(v);
226 pagesz_flags(uint64_t page_sz)
228 /* as per mmap() manpage, all page sizes are log2 of page size
229 * shifted by MAP_HUGE_SHIFT
231 int log2 = log2_u64(page_sz);
232 return log2 << RTE_MAP_HUGE_SHIFT;
235 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
236 static int lock(int fd, int type)
240 /* flock may be interrupted */
242 ret = flock(fd, type | LOCK_NB);
243 } while (ret && errno == EINTR);
245 if (ret && errno == EWOULDBLOCK) {
249 RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
250 __func__, strerror(errno));
253 /* lock was successful */
257 static int get_segment_lock_fd(int list_idx, int seg_idx)
259 char path[PATH_MAX] = {0};
262 if (list_idx < 0 || list_idx >= (int)RTE_DIM(fd_list))
264 if (seg_idx < 0 || seg_idx >= fd_list[list_idx].len)
267 fd = fd_list[list_idx].fds[seg_idx];
268 /* does this lock already exist? */
272 eal_get_hugefile_lock_path(path, sizeof(path),
273 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
275 fd = open(path, O_CREAT | O_RDWR, 0660);
277 RTE_LOG(ERR, EAL, "%s(): error creating lockfile '%s': %s\n",
278 __func__, path, strerror(errno));
281 /* take out a read lock */
282 if (lock(fd, LOCK_SH) != 1) {
283 RTE_LOG(ERR, EAL, "%s(): failed to take out a readlock on '%s': %s\n",
284 __func__, path, strerror(errno));
288 /* store it for future reference */
289 fd_list[list_idx].fds[seg_idx] = fd;
290 fd_list[list_idx].count++;
294 static int unlock_segment(int list_idx, int seg_idx)
298 if (list_idx < 0 || list_idx >= (int)RTE_DIM(fd_list))
300 if (seg_idx < 0 || seg_idx >= fd_list[list_idx].len)
303 fd = fd_list[list_idx].fds[seg_idx];
305 /* upgrade lock to exclusive to see if we can remove the lockfile */
306 ret = lock(fd, LOCK_EX);
308 /* we've succeeded in taking exclusive lock, this lockfile may
311 char path[PATH_MAX] = {0};
312 eal_get_hugefile_lock_path(path, sizeof(path),
313 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
315 RTE_LOG(ERR, EAL, "%s(): error removing lockfile '%s': %s\n",
316 __func__, path, strerror(errno));
319 /* we don't want to leak the fd, so even if we fail to lock, close fd
320 * and remove it from list anyway.
323 fd_list[list_idx].fds[seg_idx] = -1;
324 fd_list[list_idx].count--;
332 get_seg_memfd(struct hugepage_info *hi __rte_unused,
333 unsigned int list_idx __rte_unused,
334 unsigned int seg_idx __rte_unused)
336 #ifdef MEMFD_SUPPORTED
338 char segname[250]; /* as per manpage, limit is 249 bytes plus null */
340 int flags = RTE_MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
342 if (internal_config.single_file_segments) {
343 fd = fd_list[list_idx].memseg_list_fd;
346 snprintf(segname, sizeof(segname), "seg_%i", list_idx);
347 fd = memfd_create(segname, flags);
349 RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
350 __func__, strerror(errno));
353 fd_list[list_idx].memseg_list_fd = fd;
356 fd = fd_list[list_idx].fds[seg_idx];
359 snprintf(segname, sizeof(segname), "seg_%i-%i",
361 fd = memfd_create(segname, flags);
363 RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
364 __func__, strerror(errno));
367 fd_list[list_idx].fds[seg_idx] = fd;
376 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
377 unsigned int list_idx, unsigned int seg_idx)
381 /* for in-memory mode, we only make it here when we're sure we support
382 * memfd, and this is a special case.
384 if (internal_config.in_memory)
385 return get_seg_memfd(hi, list_idx, seg_idx);
387 if (internal_config.single_file_segments) {
388 /* create a hugepage file path */
389 eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
391 fd = fd_list[list_idx].memseg_list_fd;
394 fd = open(path, O_CREAT | O_RDWR, 0600);
396 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
397 __func__, strerror(errno));
400 /* take out a read lock and keep it indefinitely */
401 if (lock(fd, LOCK_SH) < 0) {
402 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
403 __func__, strerror(errno));
407 fd_list[list_idx].memseg_list_fd = fd;
410 /* create a hugepage file path */
411 eal_get_hugefile_path(path, buflen, hi->hugedir,
412 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
414 fd = fd_list[list_idx].fds[seg_idx];
417 fd = open(path, O_CREAT | O_RDWR, 0600);
419 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
420 __func__, strerror(errno));
423 /* take out a read lock */
424 if (lock(fd, LOCK_SH) < 0) {
425 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
426 __func__, strerror(errno));
430 fd_list[list_idx].fds[seg_idx] = fd;
437 resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
438 uint64_t fa_offset, uint64_t page_sz, bool grow)
442 /* in-memory mode is a special case, because we don't need to perform
443 * any locking, and we can be sure that fallocate() is supported.
445 if (internal_config.in_memory) {
446 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
450 /* grow or shrink the file */
451 ret = fallocate(fd, flags, fa_offset, page_sz);
454 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
459 /* increase/decrease total segment count */
460 fd_list[list_idx].count += (grow ? 1 : -1);
461 if (!grow && fd_list[list_idx].count == 0) {
462 close(fd_list[list_idx].memseg_list_fd);
463 fd_list[list_idx].memseg_list_fd = -1;
469 if (fallocate_supported == 0) {
470 /* we cannot deallocate memory if fallocate() is not
471 * supported, and hugepage file is already locked at
472 * creation, so no further synchronization needed.
476 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
480 uint64_t new_size = fa_offset + page_sz;
481 uint64_t cur_size = get_file_size(fd);
483 /* fallocate isn't supported, fall back to ftruncate */
484 if (new_size > cur_size &&
485 ftruncate(fd, new_size) < 0) {
486 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
487 __func__, strerror(errno));
491 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
495 /* if fallocate() is supported, we need to take out a
496 * read lock on allocate (to prevent other processes
497 * from deallocating this page), and take out a write
498 * lock on deallocate (to ensure nobody else is using
501 * read locks on page itself are already taken out at
502 * file creation, in get_seg_fd().
504 * we cannot rely on simple use of flock() call, because
505 * we need to be able to lock a section of the file,
506 * and we cannot use fcntl() locks, because of numerous
507 * problems with their semantics, so we will use
508 * deterministically named lock files for each section
511 * if we're shrinking the file, we want to upgrade our
512 * lock from shared to exclusive.
514 * lock_fd is an fd for a lockfile, not for the segment
517 lock_fd = get_segment_lock_fd(list_idx, seg_idx);
520 /* we are using this lockfile to determine
521 * whether this particular page is locked, as we
522 * are in single file segments mode and thus
523 * cannot use regular flock() to get this info.
525 * we want to try and take out an exclusive lock
526 * on the lock file to determine if we're the
527 * last ones using this page, and if not, we
528 * won't be shrinking it, and will instead exit
531 ret = lock(lock_fd, LOCK_EX);
533 /* drop the lock on the lockfile, so that even
534 * if we couldn't shrink the file ourselves, we
535 * are signalling to other processes that we're
536 * no longer using this page.
538 if (unlock_segment(list_idx, seg_idx))
539 RTE_LOG(ERR, EAL, "Could not unlock segment\n");
541 /* additionally, if this was the last lock on
542 * this segment list, we can safely close the
543 * page file fd, so that one of the processes
544 * could then delete the file after shrinking.
546 if (ret < 1 && fd_list[list_idx].count == 0) {
548 fd_list[list_idx].memseg_list_fd = -1;
552 RTE_LOG(ERR, EAL, "Could not lock segment\n");
556 /* failed to lock, not an error. */
560 /* grow or shrink the file */
561 ret = fallocate(fd, flags, fa_offset, page_sz);
564 if (fallocate_supported == -1 &&
566 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
569 fallocate_supported = 0;
571 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
577 fallocate_supported = 1;
579 /* we've grew/shrunk the file, and we hold an
580 * exclusive lock now. check if there are no
581 * more segments active in this segment list,
582 * and remove the file if there aren't.
584 if (fd_list[list_idx].count == 0) {
586 RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
590 fd_list[list_idx].memseg_list_fd = -1;
599 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
600 struct hugepage_info *hi, unsigned int list_idx,
601 unsigned int seg_idx)
603 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
604 int cur_socket_id = 0;
616 alloc_sz = hi->hugepage_sz;
618 /* these are checked at init, but code analyzers don't know that */
619 if (internal_config.in_memory && !anonymous_hugepages_supported) {
620 RTE_LOG(ERR, EAL, "Anonymous hugepages not supported, in-memory mode cannot allocate memory\n");
623 if (internal_config.in_memory && !memfd_create_supported &&
624 internal_config.single_file_segments) {
625 RTE_LOG(ERR, EAL, "Single-file segments are not supported without memfd support\n");
629 /* in-memory without memfd is a special case */
632 if (internal_config.in_memory && !memfd_create_supported) {
633 int pagesz_flag, flags;
635 pagesz_flag = pagesz_flags(alloc_sz);
636 flags = pagesz_flag | MAP_HUGETLB | MAP_FIXED |
637 MAP_PRIVATE | MAP_ANONYMOUS;
641 /* single-file segments codepath will never be active
642 * here because in-memory mode is incompatible with the
643 * fallback path, and it's stopped at EAL initialization
648 /* takes out a read lock on segment or segment list */
649 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
651 RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
655 if (internal_config.single_file_segments) {
656 map_offset = seg_idx * alloc_sz;
657 ret = resize_hugefile(fd, path, list_idx, seg_idx,
658 map_offset, alloc_sz, true);
663 if (ftruncate(fd, alloc_sz) < 0) {
664 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
665 __func__, strerror(errno));
668 if (internal_config.hugepage_unlink &&
669 !internal_config.in_memory) {
671 RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
672 __func__, strerror(errno));
677 mmap_flags = MAP_SHARED | MAP_POPULATE | MAP_FIXED;
681 * map the segment, and populate page tables, the kernel fills
682 * this segment with zeros if it's a new page.
684 va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, mmap_flags, fd,
687 if (va == MAP_FAILED) {
688 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
690 /* mmap failed, but the previous region might have been
691 * unmapped anyway. try to remap it
696 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
697 munmap(va, alloc_sz);
701 /* In linux, hugetlb limitations, like cgroup, are
702 * enforced at fault time instead of mmap(), even
703 * with the option of MAP_POPULATE. Kernel will send
704 * a SIGBUS signal. To avoid to be killed, save stack
705 * environment here, if SIGBUS happens, we can jump
708 if (huge_wrap_sigsetjmp()) {
709 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
710 (unsigned int)(alloc_sz >> 20));
714 /* we need to trigger a write to the page to enforce page fault and
715 * ensure that page is accessible to us, but we can't overwrite value
716 * that is already there, so read the old value, and write itback.
717 * kernel populates the page with zeroes initially.
719 *(volatile int *)addr = *(volatile int *)addr;
721 iova = rte_mem_virt2iova(addr);
722 if (iova == RTE_BAD_PHYS_ADDR) {
723 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
728 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
729 move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
731 if (cur_socket_id != socket_id) {
733 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
734 __func__, socket_id, cur_socket_id);
740 ms->hugepage_sz = alloc_sz;
742 ms->nchannel = rte_memory_get_nchannel();
743 ms->nrank = rte_memory_get_nrank();
745 ms->socket_id = socket_id;
750 munmap(addr, alloc_sz);
753 new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
754 if (new_addr != addr) {
755 if (new_addr != NULL)
756 munmap(new_addr, alloc_sz);
757 /* we're leaving a hole in our virtual address space. if
758 * somebody else maps this hole now, we could accidentally
759 * override it in the future.
761 RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
764 /* some codepaths will return negative fd, so exit early */
768 if (internal_config.single_file_segments) {
769 resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
771 /* ignore failure, can't make it any worse */
773 /* only remove file if we can take out a write lock */
774 if (internal_config.hugepage_unlink == 0 &&
775 internal_config.in_memory == 0 &&
776 lock(fd, LOCK_EX) == 1)
779 fd_list[list_idx].fds[seg_idx] = -1;
785 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
786 unsigned int list_idx, unsigned int seg_idx)
793 /* erase page data */
794 memset(ms->addr, 0, ms->len);
796 if (mmap(ms->addr, ms->len, PROT_READ,
797 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
799 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
805 /* if we're using anonymous hugepages, nothing to be done */
806 if (internal_config.in_memory && !memfd_create_supported)
809 /* if we've already unlinked the page, nothing needs to be done */
810 if (!internal_config.in_memory && internal_config.hugepage_unlink)
814 memset(ms, 0, sizeof(*ms));
818 /* if we are not in single file segments mode, we're going to unmap the
819 * segment and thus drop the lock on original fd, but hugepage dir is
820 * now locked so we can take out another one without races.
822 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
826 if (internal_config.single_file_segments) {
827 map_offset = seg_idx * ms->len;
828 if (resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
833 /* if we're able to take out a write lock, we're the last one
834 * holding onto this page.
836 if (!internal_config.in_memory) {
837 ret = lock(fd, LOCK_EX);
839 /* no one else is using this page */
844 /* closing fd will drop the lock */
846 fd_list[list_idx].fds[seg_idx] = -1;
849 memset(ms, 0, sizeof(*ms));
851 return ret < 0 ? -1 : 0;
854 struct alloc_walk_param {
855 struct hugepage_info *hi;
856 struct rte_memseg **ms;
858 unsigned int segs_allocated;
864 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
866 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
867 struct alloc_walk_param *wa = arg;
868 struct rte_memseg_list *cur_msl;
870 int cur_idx, start_idx, j, dir_fd = -1;
871 unsigned int msl_idx, need, i;
873 if (msl->page_sz != wa->page_sz)
875 if (msl->socket_id != wa->socket)
878 page_sz = (size_t)msl->page_sz;
880 msl_idx = msl - mcfg->memsegs;
881 cur_msl = &mcfg->memsegs[msl_idx];
885 /* try finding space in memseg list */
886 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
891 /* do not allow any page allocations during the time we're allocating,
892 * because file creation and locking operations are not atomic,
893 * and we might be the first or the last ones to use a particular page,
894 * so we need to ensure atomicity of every operation.
896 * during init, we already hold a write lock, so don't try to take out
899 if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
900 dir_fd = open(wa->hi->hugedir, O_RDONLY);
902 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
903 __func__, wa->hi->hugedir, strerror(errno));
906 /* blocking writelock */
907 if (flock(dir_fd, LOCK_EX)) {
908 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
909 __func__, wa->hi->hugedir, strerror(errno));
915 for (i = 0; i < need; i++, cur_idx++) {
916 struct rte_memseg *cur;
919 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
920 map_addr = RTE_PTR_ADD(cur_msl->base_va,
923 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
925 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
928 /* if exact number wasn't requested, stop */
933 for (j = start_idx; j < cur_idx; j++) {
934 struct rte_memseg *tmp;
935 struct rte_fbarray *arr =
936 &cur_msl->memseg_arr;
938 tmp = rte_fbarray_get(arr, j);
939 rte_fbarray_set_free(arr, j);
941 /* free_seg may attempt to create a file, which
944 if (free_seg(tmp, wa->hi, msl_idx, j))
945 RTE_LOG(DEBUG, EAL, "Cannot free page\n");
949 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
958 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
961 wa->segs_allocated = i;
969 struct free_walk_param {
970 struct hugepage_info *hi;
971 struct rte_memseg *ms;
974 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
976 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
977 struct rte_memseg_list *found_msl;
978 struct free_walk_param *wa = arg;
979 uintptr_t start_addr, end_addr;
980 int msl_idx, seg_idx, ret, dir_fd = -1;
982 start_addr = (uintptr_t) msl->base_va;
983 end_addr = start_addr + msl->len;
985 if ((uintptr_t)wa->ms->addr < start_addr ||
986 (uintptr_t)wa->ms->addr >= end_addr)
989 msl_idx = msl - mcfg->memsegs;
990 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
993 found_msl = &mcfg->memsegs[msl_idx];
995 /* do not allow any page allocations during the time we're freeing,
996 * because file creation and locking operations are not atomic,
997 * and we might be the first or the last ones to use a particular page,
998 * so we need to ensure atomicity of every operation.
1000 * during init, we already hold a write lock, so don't try to take out
1003 if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
1004 dir_fd = open(wa->hi->hugedir, O_RDONLY);
1006 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
1007 __func__, wa->hi->hugedir, strerror(errno));
1010 /* blocking writelock */
1011 if (flock(dir_fd, LOCK_EX)) {
1012 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
1013 __func__, wa->hi->hugedir, strerror(errno));
1019 found_msl->version++;
1021 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
1023 ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
1035 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
1036 int socket, bool exact)
1039 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
1040 bool have_numa = false;
1042 struct bitmask *oldmask;
1044 struct alloc_walk_param wa;
1045 struct hugepage_info *hi = NULL;
1047 memset(&wa, 0, sizeof(wa));
1049 /* dynamic allocation not supported in legacy mode */
1050 if (internal_config.legacy_mem)
1053 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
1055 internal_config.hugepage_info[i].hugepage_sz) {
1056 hi = &internal_config.hugepage_info[i];
1061 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
1066 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
1068 oldmask = numa_allocate_nodemask();
1069 prepare_numa(&oldpolicy, oldmask, socket);
1078 wa.page_sz = page_sz;
1080 wa.segs_allocated = 0;
1082 /* memalloc is locked, so it's safe to use thread-unsafe version */
1083 ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
1085 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
1088 } else if (ret > 0) {
1089 ret = (int)wa.segs_allocated;
1092 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
1094 restore_numa(&oldpolicy, oldmask);
1100 eal_memalloc_alloc_seg(size_t page_sz, int socket)
1102 struct rte_memseg *ms;
1103 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
1105 /* return pointer to newly allocated memseg */
1110 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
1114 /* dynamic free not supported in legacy mode */
1115 if (internal_config.legacy_mem)
1118 for (seg = 0; seg < n_segs; seg++) {
1119 struct rte_memseg *cur = ms[seg];
1120 struct hugepage_info *hi = NULL;
1121 struct free_walk_param wa;
1124 /* if this page is marked as unfreeable, fail */
1125 if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
1126 RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
1131 memset(&wa, 0, sizeof(wa));
1133 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
1135 hi = &internal_config.hugepage_info[i];
1136 if (cur->hugepage_sz == hi->hugepage_sz)
1139 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
1140 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1148 /* memalloc is locked, so it's safe to use thread-unsafe version
1150 walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
1155 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
1162 eal_memalloc_free_seg(struct rte_memseg *ms)
1164 /* dynamic free not supported in legacy mode */
1165 if (internal_config.legacy_mem)
1168 return eal_memalloc_free_seg_bulk(&ms, 1);
1172 sync_chunk(struct rte_memseg_list *primary_msl,
1173 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1174 unsigned int msl_idx, bool used, int start, int end)
1176 struct rte_fbarray *l_arr, *p_arr;
1177 int i, ret, chunk_len, diff_len;
1179 l_arr = &local_msl->memseg_arr;
1180 p_arr = &primary_msl->memseg_arr;
1182 /* we need to aggregate allocations/deallocations into bigger chunks,
1183 * as we don't want to spam the user with per-page callbacks.
1185 * to avoid any potential issues, we also want to trigger
1186 * deallocation callbacks *before* we actually deallocate
1187 * memory, so that the user application could wrap up its use
1188 * before it goes away.
1191 chunk_len = end - start;
1193 /* find how many contiguous pages we can map/unmap for this chunk */
1195 rte_fbarray_find_contig_free(l_arr, start) :
1196 rte_fbarray_find_contig_used(l_arr, start);
1198 /* has to be at least one page */
1202 diff_len = RTE_MIN(chunk_len, diff_len);
1204 /* if we are freeing memory, notify the application */
1206 struct rte_memseg *ms;
1208 size_t len, page_sz;
1210 ms = rte_fbarray_get(l_arr, start);
1211 start_va = ms->addr;
1212 page_sz = (size_t)primary_msl->page_sz;
1213 len = page_sz * diff_len;
1215 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
1219 for (i = 0; i < diff_len; i++) {
1220 struct rte_memseg *p_ms, *l_ms;
1221 int seg_idx = start + i;
1223 l_ms = rte_fbarray_get(l_arr, seg_idx);
1224 p_ms = rte_fbarray_get(p_arr, seg_idx);
1226 if (l_ms == NULL || p_ms == NULL)
1230 ret = alloc_seg(l_ms, p_ms->addr,
1231 p_ms->socket_id, hi,
1235 rte_fbarray_set_used(l_arr, seg_idx);
1237 ret = free_seg(l_ms, hi, msl_idx, seg_idx);
1238 rte_fbarray_set_free(l_arr, seg_idx);
1244 /* if we just allocated memory, notify the application */
1246 struct rte_memseg *ms;
1248 size_t len, page_sz;
1250 ms = rte_fbarray_get(l_arr, start);
1251 start_va = ms->addr;
1252 page_sz = (size_t)primary_msl->page_sz;
1253 len = page_sz * diff_len;
1255 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1259 /* calculate how much we can advance until next chunk */
1261 rte_fbarray_find_contig_used(l_arr, start) :
1262 rte_fbarray_find_contig_free(l_arr, start);
1263 ret = RTE_MIN(chunk_len, diff_len);
1269 sync_status(struct rte_memseg_list *primary_msl,
1270 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1271 unsigned int msl_idx, bool used)
1273 struct rte_fbarray *l_arr, *p_arr;
1274 int p_idx, l_chunk_len, p_chunk_len, ret;
1277 /* this is a little bit tricky, but the basic idea is - walk both lists
1278 * and spot any places where there are discrepancies. walking both lists
1279 * and noting discrepancies in a single go is a hard problem, so we do
1280 * it in two passes - first we spot any places where allocated segments
1281 * mismatch (i.e. ensure that everything that's allocated in the primary
1282 * is also allocated in the secondary), and then we do it by looking at
1283 * free segments instead.
1285 * we also need to aggregate changes into chunks, as we have to call
1286 * callbacks per allocation, not per page.
1288 l_arr = &local_msl->memseg_arr;
1289 p_arr = &primary_msl->memseg_arr;
1292 p_idx = rte_fbarray_find_next_used(p_arr, 0);
1294 p_idx = rte_fbarray_find_next_free(p_arr, 0);
1296 while (p_idx >= 0) {
1297 int next_chunk_search_idx;
1300 p_chunk_len = rte_fbarray_find_contig_used(p_arr,
1302 l_chunk_len = rte_fbarray_find_contig_used(l_arr,
1305 p_chunk_len = rte_fbarray_find_contig_free(p_arr,
1307 l_chunk_len = rte_fbarray_find_contig_free(l_arr,
1310 /* best case scenario - no differences (or bigger, which will be
1311 * fixed during next iteration), look for next chunk
1313 if (l_chunk_len >= p_chunk_len) {
1314 next_chunk_search_idx = p_idx + p_chunk_len;
1318 /* if both chunks start at the same point, skip parts we know
1319 * are identical, and sync the rest. each call to sync_chunk
1320 * will only sync contiguous segments, so we need to call this
1321 * until we are sure there are no more differences in this
1324 start = p_idx + l_chunk_len;
1325 end = p_idx + p_chunk_len;
1327 ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
1330 } while (start < end && ret >= 0);
1331 /* if ret is negative, something went wrong */
1335 next_chunk_search_idx = p_idx + p_chunk_len;
1337 /* skip to end of this chunk */
1339 p_idx = rte_fbarray_find_next_used(p_arr,
1340 next_chunk_search_idx);
1342 p_idx = rte_fbarray_find_next_free(p_arr,
1343 next_chunk_search_idx);
1350 sync_existing(struct rte_memseg_list *primary_msl,
1351 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1352 unsigned int msl_idx)
1356 /* do not allow any page allocations during the time we're allocating,
1357 * because file creation and locking operations are not atomic,
1358 * and we might be the first or the last ones to use a particular page,
1359 * so we need to ensure atomicity of every operation.
1361 dir_fd = open(hi->hugedir, O_RDONLY);
1363 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
1364 hi->hugedir, strerror(errno));
1367 /* blocking writelock */
1368 if (flock(dir_fd, LOCK_EX)) {
1369 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
1370 hi->hugedir, strerror(errno));
1375 /* ensure all allocated space is the same in both lists */
1376 ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
1380 /* ensure all unallocated space is the same in both lists */
1381 ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
1385 /* update version number */
1386 local_msl->version = primary_msl->version;
1397 sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
1399 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1400 struct rte_memseg_list *primary_msl, *local_msl;
1401 struct hugepage_info *hi = NULL;
1408 msl_idx = msl - mcfg->memsegs;
1409 primary_msl = &mcfg->memsegs[msl_idx];
1410 local_msl = &local_memsegs[msl_idx];
1412 for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
1414 internal_config.hugepage_info[i].hugepage_sz;
1415 uint64_t msl_sz = primary_msl->page_sz;
1416 if (msl_sz == cur_sz) {
1417 hi = &internal_config.hugepage_info[i];
1422 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1426 /* if versions don't match, synchronize everything */
1427 if (local_msl->version != primary_msl->version &&
1428 sync_existing(primary_msl, local_msl, hi, msl_idx))
1435 eal_memalloc_sync_with_primary(void)
1437 /* nothing to be done in primary */
1438 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1441 /* memalloc is locked, so it's safe to call thread-unsafe version */
1442 if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
1448 secondary_msl_create_walk(const struct rte_memseg_list *msl,
1449 void *arg __rte_unused)
1451 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1452 struct rte_memseg_list *primary_msl, *local_msl;
1453 char name[PATH_MAX];
1459 msl_idx = msl - mcfg->memsegs;
1460 primary_msl = &mcfg->memsegs[msl_idx];
1461 local_msl = &local_memsegs[msl_idx];
1463 /* create distinct fbarrays for each secondary */
1464 snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
1465 primary_msl->memseg_arr.name, getpid());
1467 ret = rte_fbarray_init(&local_msl->memseg_arr, name,
1468 primary_msl->memseg_arr.len,
1469 primary_msl->memseg_arr.elt_sz);
1471 RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
1474 local_msl->base_va = primary_msl->base_va;
1475 local_msl->len = primary_msl->len;
1481 alloc_list(int list_idx, int len)
1486 /* ensure we have space to store fd per each possible segment */
1487 data = malloc(sizeof(int) * len);
1489 RTE_LOG(ERR, EAL, "Unable to allocate space for file descriptors\n");
1492 /* set all fd's as invalid */
1493 for (i = 0; i < len; i++)
1496 fd_list[list_idx].fds = data;
1497 fd_list[list_idx].len = len;
1498 fd_list[list_idx].count = 0;
1499 fd_list[list_idx].memseg_list_fd = -1;
1505 fd_list_create_walk(const struct rte_memseg_list *msl,
1506 void *arg __rte_unused)
1508 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1515 msl_idx = msl - mcfg->memsegs;
1516 len = msl->memseg_arr.len;
1518 return alloc_list(msl_idx, len);
1522 eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
1524 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1526 /* single file segments mode doesn't support individual segment fd's */
1527 if (internal_config.single_file_segments)
1530 /* if list is not allocated, allocate it */
1531 if (fd_list[list_idx].len == 0) {
1532 int len = mcfg->memsegs[list_idx].memseg_arr.len;
1534 if (alloc_list(list_idx, len) < 0)
1537 fd_list[list_idx].fds[seg_idx] = fd;
1543 eal_memalloc_set_seg_list_fd(int list_idx, int fd)
1545 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1547 /* non-single file segment mode doesn't support segment list fd's */
1548 if (!internal_config.single_file_segments)
1551 /* if list is not allocated, allocate it */
1552 if (fd_list[list_idx].len == 0) {
1553 int len = mcfg->memsegs[list_idx].memseg_arr.len;
1555 if (alloc_list(list_idx, len) < 0)
1559 fd_list[list_idx].memseg_list_fd = fd;
1565 eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
1569 if (internal_config.in_memory || internal_config.no_hugetlbfs) {
1570 #ifndef MEMFD_SUPPORTED
1571 /* in in-memory or no-huge mode, we rely on memfd support */
1574 /* memfd supported, but hugetlbfs memfd may not be */
1575 if (!internal_config.no_hugetlbfs && !memfd_create_supported)
1579 if (internal_config.single_file_segments) {
1580 fd = fd_list[list_idx].memseg_list_fd;
1581 } else if (fd_list[list_idx].len == 0) {
1582 /* list not initialized */
1585 fd = fd_list[list_idx].fds[seg_idx];
1593 test_memfd_create(void)
1595 #ifdef MEMFD_SUPPORTED
1597 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1598 uint64_t pagesz = internal_config.hugepage_info[i].hugepage_sz;
1599 int pagesz_flag = pagesz_flags(pagesz);
1602 flags = pagesz_flag | RTE_MFD_HUGETLB;
1603 int fd = memfd_create("test", flags);
1605 /* we failed - let memalloc know this isn't working */
1606 if (errno == EINVAL) {
1607 memfd_create_supported = 0;
1608 return 0; /* not supported */
1611 /* we got other error - something's wrong */
1612 return -1; /* error */
1615 return 1; /* supported */
1618 return 0; /* not supported */
1622 eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
1624 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1626 if (internal_config.in_memory || internal_config.no_hugetlbfs) {
1627 #ifndef MEMFD_SUPPORTED
1628 /* in in-memory or no-huge mode, we rely on memfd support */
1631 /* memfd supported, but hugetlbfs memfd may not be */
1632 if (!internal_config.no_hugetlbfs && !memfd_create_supported)
1636 /* fd_list not initialized? */
1637 if (fd_list[list_idx].len == 0)
1639 if (internal_config.single_file_segments) {
1640 size_t pgsz = mcfg->memsegs[list_idx].page_sz;
1642 /* segment not active? */
1643 if (fd_list[list_idx].memseg_list_fd < 0)
1645 *offset = pgsz * seg_idx;
1647 /* segment not active? */
1648 if (fd_list[list_idx].fds[seg_idx] < 0)
1656 eal_memalloc_init(void)
1658 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1659 if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
1661 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
1662 internal_config.in_memory) {
1663 int mfd_res = test_memfd_create();
1666 RTE_LOG(ERR, EAL, "Unable to check if memfd is supported\n");
1670 RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1672 RTE_LOG(INFO, EAL, "Using memfd is not supported, falling back to anonymous hugepages\n");
1674 /* we only support single-file segments mode with in-memory mode
1675 * if we support hugetlbfs with memfd_create. this code will
1678 if (internal_config.single_file_segments &&
1680 RTE_LOG(ERR, EAL, "Single-file segments mode cannot be used without memfd support\n");
1683 /* this cannot ever happen but better safe than sorry */
1684 if (!anonymous_hugepages_supported) {
1685 RTE_LOG(ERR, EAL, "Using anonymous memory is not supported\n");
1690 /* initialize all of the fd lists */
1691 if (rte_memseg_list_walk(fd_list_create_walk, NULL))