1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #define _FILE_OFFSET_BITS 64
15 #include <sys/types.h>
17 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
30 #include <linux/falloc.h>
31 #include <linux/mman.h> /* for hugetlb-related mmap flags */
33 #include <rte_common.h>
35 #include <rte_eal_memconfig.h>
37 #include <rte_memory.h>
38 #include <rte_spinlock.h>
40 #include "eal_filesystem.h"
41 #include "eal_internal_cfg.h"
42 #include "eal_memalloc.h"
43 #include "eal_private.h"
45 const int anonymous_hugepages_supported =
48 #define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
51 #define RTE_MAP_HUGE_SHIFT 26
55 * not all kernel version support fallocate on hugetlbfs, so fall back to
56 * ftruncate and disallow deallocation if fallocate is not supported.
58 static int fallocate_supported = -1; /* unknown */
61 * we have two modes - single file segments, and file-per-page mode.
63 * for single-file segments, we need some kind of mechanism to keep track of
64 * which hugepages can be freed back to the system, and which cannot. we cannot
65 * use flock() because they don't allow locking parts of a file, and we cannot
66 * use fcntl() due to issues with their semantics, so we will have to rely on a
67 * bunch of lockfiles for each page. so, we will use 'fds' array to keep track
68 * of per-page lockfiles. we will store the actual segment list fd in the
69 * 'memseg_list_fd' field.
71 * for file-per-page mode, each page will have its own fd, so 'memseg_list_fd'
72 * will be invalid (set to -1), and we'll use 'fds' to keep track of page fd's.
74 * we cannot know how many pages a system will have in advance, but we do know
75 * that they come in lists, and we know lengths of these lists. so, simply store
76 * a malloc'd array of fd's indexed by list and segment index.
78 * they will be initialized at startup, and filled as we allocate/deallocate
82 int *fds; /**< dynamically allocated array of segment lock fd's */
83 int memseg_list_fd; /**< memseg list fd */
84 int len; /**< total length of the array */
85 int count; /**< entries used in an array */
86 } fd_list[RTE_MAX_MEMSEG_LISTS];
88 /** local copy of a memory map, used to synchronize memory hotplug in MP */
89 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
91 static sigjmp_buf huge_jmpenv;
93 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
95 siglongjmp(huge_jmpenv, 1);
98 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
99 * non-static local variable in the stack frame calling sigsetjmp might be
100 * clobbered by a call to longjmp.
102 static int __rte_unused huge_wrap_sigsetjmp(void)
104 return sigsetjmp(huge_jmpenv, 1);
107 static struct sigaction huge_action_old;
108 static int huge_need_recover;
110 static void __rte_unused
111 huge_register_sigbus(void)
114 struct sigaction action;
117 sigaddset(&mask, SIGBUS);
119 action.sa_mask = mask;
120 action.sa_handler = huge_sigbus_handler;
122 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
125 static void __rte_unused
126 huge_recover_sigbus(void)
128 if (huge_need_recover) {
129 sigaction(SIGBUS, &huge_action_old, NULL);
130 huge_need_recover = 0;
134 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
139 /* Check if kernel supports NUMA. */
140 if (numa_available() != 0) {
141 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
148 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
150 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
151 if (get_mempolicy(oldpolicy, oldmask->maskp,
152 oldmask->size + 1, 0, 0) < 0) {
154 "Failed to get current mempolicy: %s. "
155 "Assuming MPOL_DEFAULT.\n", strerror(errno));
156 oldpolicy = MPOL_DEFAULT;
159 "Setting policy MPOL_PREFERRED for socket %d\n",
161 numa_set_preferred(socket_id);
165 restore_numa(int *oldpolicy, struct bitmask *oldmask)
168 "Restoring previous memory policy: %d\n", *oldpolicy);
169 if (*oldpolicy == MPOL_DEFAULT) {
170 numa_set_localalloc();
171 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
172 oldmask->size + 1) < 0) {
173 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
175 numa_set_localalloc();
177 numa_free_cpumask(oldmask);
182 * uses fstat to report the size of a file on disk
185 get_file_size(int fd)
188 if (fstat(fd, &st) < 0)
193 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
194 static int lock(int fd, int type)
198 /* flock may be interrupted */
200 ret = flock(fd, type | LOCK_NB);
201 } while (ret && errno == EINTR);
203 if (ret && errno == EWOULDBLOCK) {
207 RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
208 __func__, strerror(errno));
211 /* lock was successful */
215 static int get_segment_lock_fd(int list_idx, int seg_idx)
217 char path[PATH_MAX] = {0};
220 if (list_idx < 0 || list_idx >= (int)RTE_DIM(fd_list))
222 if (seg_idx < 0 || seg_idx >= fd_list[list_idx].len)
225 fd = fd_list[list_idx].fds[seg_idx];
226 /* does this lock already exist? */
230 eal_get_hugefile_lock_path(path, sizeof(path),
231 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
233 fd = open(path, O_CREAT | O_RDWR, 0660);
235 RTE_LOG(ERR, EAL, "%s(): error creating lockfile '%s': %s\n",
236 __func__, path, strerror(errno));
239 /* take out a read lock */
240 if (lock(fd, LOCK_SH) != 1) {
241 RTE_LOG(ERR, EAL, "%s(): failed to take out a readlock on '%s': %s\n",
242 __func__, path, strerror(errno));
246 /* store it for future reference */
247 fd_list[list_idx].fds[seg_idx] = fd;
248 fd_list[list_idx].count++;
252 static int unlock_segment(int list_idx, int seg_idx)
256 if (list_idx < 0 || list_idx >= (int)RTE_DIM(fd_list))
258 if (seg_idx < 0 || seg_idx >= fd_list[list_idx].len)
261 fd = fd_list[list_idx].fds[seg_idx];
263 /* upgrade lock to exclusive to see if we can remove the lockfile */
264 ret = lock(fd, LOCK_EX);
266 /* we've succeeded in taking exclusive lock, this lockfile may
269 char path[PATH_MAX] = {0};
270 eal_get_hugefile_lock_path(path, sizeof(path),
271 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
273 RTE_LOG(ERR, EAL, "%s(): error removing lockfile '%s': %s\n",
274 __func__, path, strerror(errno));
277 /* we don't want to leak the fd, so even if we fail to lock, close fd
278 * and remove it from list anyway.
281 fd_list[list_idx].fds[seg_idx] = -1;
282 fd_list[list_idx].count--;
290 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
291 unsigned int list_idx, unsigned int seg_idx)
295 if (internal_config.single_file_segments) {
296 /* create a hugepage file path */
297 eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
299 fd = fd_list[list_idx].memseg_list_fd;
302 fd = open(path, O_CREAT | O_RDWR, 0600);
304 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
305 __func__, strerror(errno));
308 /* take out a read lock and keep it indefinitely */
309 if (lock(fd, LOCK_SH) < 0) {
310 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
311 __func__, strerror(errno));
315 fd_list[list_idx].memseg_list_fd = fd;
318 /* create a hugepage file path */
319 eal_get_hugefile_path(path, buflen, hi->hugedir,
320 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
322 fd = fd_list[list_idx].fds[seg_idx];
325 fd = open(path, O_CREAT | O_RDWR, 0600);
327 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
328 __func__, strerror(errno));
331 /* take out a read lock */
332 if (lock(fd, LOCK_SH) < 0) {
333 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
334 __func__, strerror(errno));
338 fd_list[list_idx].fds[seg_idx] = fd;
345 resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
346 uint64_t fa_offset, uint64_t page_sz, bool grow)
350 if (fallocate_supported == 0) {
351 /* we cannot deallocate memory if fallocate() is not
352 * supported, and hugepage file is already locked at
353 * creation, so no further synchronization needed.
357 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
361 uint64_t new_size = fa_offset + page_sz;
362 uint64_t cur_size = get_file_size(fd);
364 /* fallocate isn't supported, fall back to ftruncate */
365 if (new_size > cur_size &&
366 ftruncate(fd, new_size) < 0) {
367 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
368 __func__, strerror(errno));
372 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
376 /* if fallocate() is supported, we need to take out a
377 * read lock on allocate (to prevent other processes
378 * from deallocating this page), and take out a write
379 * lock on deallocate (to ensure nobody else is using
382 * read locks on page itself are already taken out at
383 * file creation, in get_seg_fd().
385 * we cannot rely on simple use of flock() call, because
386 * we need to be able to lock a section of the file,
387 * and we cannot use fcntl() locks, because of numerous
388 * problems with their semantics, so we will use
389 * deterministically named lock files for each section
392 * if we're shrinking the file, we want to upgrade our
393 * lock from shared to exclusive.
395 * lock_fd is an fd for a lockfile, not for the segment
398 lock_fd = get_segment_lock_fd(list_idx, seg_idx);
401 /* we are using this lockfile to determine
402 * whether this particular page is locked, as we
403 * are in single file segments mode and thus
404 * cannot use regular flock() to get this info.
406 * we want to try and take out an exclusive lock
407 * on the lock file to determine if we're the
408 * last ones using this page, and if not, we
409 * won't be shrinking it, and will instead exit
412 ret = lock(lock_fd, LOCK_EX);
414 /* drop the lock on the lockfile, so that even
415 * if we couldn't shrink the file ourselves, we
416 * are signalling to other processes that we're
417 * no longer using this page.
419 if (unlock_segment(list_idx, seg_idx))
420 RTE_LOG(ERR, EAL, "Could not unlock segment\n");
422 /* additionally, if this was the last lock on
423 * this segment list, we can safely close the
424 * page file fd, so that one of the processes
425 * could then delete the file after shrinking.
427 if (ret < 1 && fd_list[list_idx].count == 0) {
429 fd_list[list_idx].memseg_list_fd = -1;
433 RTE_LOG(ERR, EAL, "Could not lock segment\n");
437 /* failed to lock, not an error. */
441 /* grow or shrink the file */
442 ret = fallocate(fd, flags, fa_offset, page_sz);
445 if (fallocate_supported == -1 &&
447 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
450 fallocate_supported = 0;
452 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
458 fallocate_supported = 1;
460 /* we've grew/shrunk the file, and we hold an
461 * exclusive lock now. check if there are no
462 * more segments active in this segment list,
463 * and remove the file if there aren't.
465 if (fd_list[list_idx].count == 0) {
467 RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
471 fd_list[list_idx].memseg_list_fd = -1;
480 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
481 struct hugepage_info *hi, unsigned int list_idx,
482 unsigned int seg_idx)
484 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
485 int cur_socket_id = 0;
497 alloc_sz = hi->hugepage_sz;
498 if (!internal_config.single_file_segments &&
499 internal_config.in_memory &&
500 anonymous_hugepages_supported) {
503 log2 = rte_log2_u32(alloc_sz);
504 /* as per mmap() manpage, all page sizes are log2 of page size
505 * shifted by MAP_HUGE_SHIFT
507 flags = (log2 << RTE_MAP_HUGE_SHIFT) | MAP_HUGETLB | MAP_FIXED |
508 MAP_PRIVATE | MAP_ANONYMOUS;
510 va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, flags, -1, 0);
512 /* single-file segments codepath will never be active because
513 * in-memory mode is incompatible with it and it's stopped at
514 * EAL initialization stage, however the compiler doesn't know
515 * that and complains about map_offset being used uninitialized
516 * on failure codepaths while having in-memory mode enabled. so,
517 * assign a value here.
521 /* takes out a read lock on segment or segment list */
522 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
524 RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
528 if (internal_config.single_file_segments) {
529 map_offset = seg_idx * alloc_sz;
530 ret = resize_hugefile(fd, path, list_idx, seg_idx,
531 map_offset, alloc_sz, true);
536 if (ftruncate(fd, alloc_sz) < 0) {
537 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
538 __func__, strerror(errno));
541 if (internal_config.hugepage_unlink) {
543 RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
544 __func__, strerror(errno));
551 * map the segment, and populate page tables, the kernel fills
552 * this segment with zeros if it's a new page.
554 va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
555 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd,
559 if (va == MAP_FAILED) {
560 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
562 /* mmap failed, but the previous region might have been
563 * unmapped anyway. try to remap it
568 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
569 munmap(va, alloc_sz);
573 /* In linux, hugetlb limitations, like cgroup, are
574 * enforced at fault time instead of mmap(), even
575 * with the option of MAP_POPULATE. Kernel will send
576 * a SIGBUS signal. To avoid to be killed, save stack
577 * environment here, if SIGBUS happens, we can jump
580 if (huge_wrap_sigsetjmp()) {
581 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
582 (unsigned int)(alloc_sz >> 20));
586 /* we need to trigger a write to the page to enforce page fault and
587 * ensure that page is accessible to us, but we can't overwrite value
588 * that is already there, so read the old value, and write itback.
589 * kernel populates the page with zeroes initially.
591 *(volatile int *)addr = *(volatile int *)addr;
593 iova = rte_mem_virt2iova(addr);
594 if (iova == RTE_BAD_PHYS_ADDR) {
595 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
600 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
601 move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
603 if (cur_socket_id != socket_id) {
605 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
606 __func__, socket_id, cur_socket_id);
612 ms->hugepage_sz = alloc_sz;
614 ms->nchannel = rte_memory_get_nchannel();
615 ms->nrank = rte_memory_get_nrank();
617 ms->socket_id = socket_id;
622 munmap(addr, alloc_sz);
625 #ifdef RTE_ARCH_PPC_64
626 flags |= MAP_HUGETLB;
628 new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
629 if (new_addr != addr) {
630 if (new_addr != NULL)
631 munmap(new_addr, alloc_sz);
632 /* we're leaving a hole in our virtual address space. if
633 * somebody else maps this hole now, we could accidentally
634 * override it in the future.
636 RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
639 /* some codepaths will return negative fd, so exit early */
643 if (internal_config.single_file_segments) {
644 resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
646 /* ignore failure, can't make it any worse */
648 /* only remove file if we can take out a write lock */
649 if (internal_config.hugepage_unlink == 0 &&
650 internal_config.in_memory == 0 &&
651 lock(fd, LOCK_EX) == 1)
654 fd_list[list_idx].fds[seg_idx] = -1;
660 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
661 unsigned int list_idx, unsigned int seg_idx)
667 /* erase page data */
668 memset(ms->addr, 0, ms->len);
670 if (mmap(ms->addr, ms->len, PROT_READ,
671 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
673 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
677 /* if we've already unlinked the page, nothing needs to be done */
678 if (internal_config.hugepage_unlink) {
679 memset(ms, 0, sizeof(*ms));
683 /* if we are not in single file segments mode, we're going to unmap the
684 * segment and thus drop the lock on original fd, but hugepage dir is
685 * now locked so we can take out another one without races.
687 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
691 if (internal_config.single_file_segments) {
692 map_offset = seg_idx * ms->len;
693 if (resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
698 /* if we're able to take out a write lock, we're the last one
699 * holding onto this page.
701 ret = lock(fd, LOCK_EX);
703 /* no one else is using this page */
707 /* closing fd will drop the lock */
709 fd_list[list_idx].fds[seg_idx] = -1;
712 memset(ms, 0, sizeof(*ms));
714 return ret < 0 ? -1 : 0;
717 struct alloc_walk_param {
718 struct hugepage_info *hi;
719 struct rte_memseg **ms;
721 unsigned int segs_allocated;
727 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
729 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
730 struct alloc_walk_param *wa = arg;
731 struct rte_memseg_list *cur_msl;
733 int cur_idx, start_idx, j, dir_fd = -1;
734 unsigned int msl_idx, need, i;
736 if (msl->page_sz != wa->page_sz)
738 if (msl->socket_id != wa->socket)
741 page_sz = (size_t)msl->page_sz;
743 msl_idx = msl - mcfg->memsegs;
744 cur_msl = &mcfg->memsegs[msl_idx];
748 /* try finding space in memseg list */
749 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
754 /* do not allow any page allocations during the time we're allocating,
755 * because file creation and locking operations are not atomic,
756 * and we might be the first or the last ones to use a particular page,
757 * so we need to ensure atomicity of every operation.
759 * during init, we already hold a write lock, so don't try to take out
762 if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
763 dir_fd = open(wa->hi->hugedir, O_RDONLY);
765 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
766 __func__, wa->hi->hugedir, strerror(errno));
769 /* blocking writelock */
770 if (flock(dir_fd, LOCK_EX)) {
771 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
772 __func__, wa->hi->hugedir, strerror(errno));
778 for (i = 0; i < need; i++, cur_idx++) {
779 struct rte_memseg *cur;
782 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
783 map_addr = RTE_PTR_ADD(cur_msl->base_va,
786 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
788 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
791 /* if exact number wasn't requested, stop */
796 for (j = start_idx; j < cur_idx; j++) {
797 struct rte_memseg *tmp;
798 struct rte_fbarray *arr =
799 &cur_msl->memseg_arr;
801 tmp = rte_fbarray_get(arr, j);
802 rte_fbarray_set_free(arr, j);
804 /* free_seg may attempt to create a file, which
807 if (free_seg(tmp, wa->hi, msl_idx, j))
808 RTE_LOG(DEBUG, EAL, "Cannot free page\n");
812 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
821 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
824 wa->segs_allocated = i;
832 struct free_walk_param {
833 struct hugepage_info *hi;
834 struct rte_memseg *ms;
837 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
839 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
840 struct rte_memseg_list *found_msl;
841 struct free_walk_param *wa = arg;
842 uintptr_t start_addr, end_addr;
843 int msl_idx, seg_idx, ret, dir_fd = -1;
845 start_addr = (uintptr_t) msl->base_va;
846 end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
848 if ((uintptr_t)wa->ms->addr < start_addr ||
849 (uintptr_t)wa->ms->addr >= end_addr)
852 msl_idx = msl - mcfg->memsegs;
853 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
856 found_msl = &mcfg->memsegs[msl_idx];
858 /* do not allow any page allocations during the time we're freeing,
859 * because file creation and locking operations are not atomic,
860 * and we might be the first or the last ones to use a particular page,
861 * so we need to ensure atomicity of every operation.
863 * during init, we already hold a write lock, so don't try to take out
866 if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
867 dir_fd = open(wa->hi->hugedir, O_RDONLY);
869 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
870 __func__, wa->hi->hugedir, strerror(errno));
873 /* blocking writelock */
874 if (flock(dir_fd, LOCK_EX)) {
875 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
876 __func__, wa->hi->hugedir, strerror(errno));
882 found_msl->version++;
884 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
886 ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
898 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
899 int socket, bool exact)
902 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
903 bool have_numa = false;
905 struct bitmask *oldmask;
907 struct alloc_walk_param wa;
908 struct hugepage_info *hi = NULL;
910 memset(&wa, 0, sizeof(wa));
912 /* dynamic allocation not supported in legacy mode */
913 if (internal_config.legacy_mem)
916 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
918 internal_config.hugepage_info[i].hugepage_sz) {
919 hi = &internal_config.hugepage_info[i];
924 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
929 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
931 oldmask = numa_allocate_nodemask();
932 prepare_numa(&oldpolicy, oldmask, socket);
941 wa.page_sz = page_sz;
943 wa.segs_allocated = 0;
945 /* memalloc is locked, so it's safe to use thread-unsafe version */
946 ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
948 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
951 } else if (ret > 0) {
952 ret = (int)wa.segs_allocated;
955 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
957 restore_numa(&oldpolicy, oldmask);
963 eal_memalloc_alloc_seg(size_t page_sz, int socket)
965 struct rte_memseg *ms;
966 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
968 /* return pointer to newly allocated memseg */
973 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
977 /* dynamic free not supported in legacy mode */
978 if (internal_config.legacy_mem)
981 for (seg = 0; seg < n_segs; seg++) {
982 struct rte_memseg *cur = ms[seg];
983 struct hugepage_info *hi = NULL;
984 struct free_walk_param wa;
987 /* if this page is marked as unfreeable, fail */
988 if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
989 RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
994 memset(&wa, 0, sizeof(wa));
996 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
998 hi = &internal_config.hugepage_info[i];
999 if (cur->hugepage_sz == hi->hugepage_sz)
1002 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
1003 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1011 /* memalloc is locked, so it's safe to use thread-unsafe version
1013 walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
1018 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
1025 eal_memalloc_free_seg(struct rte_memseg *ms)
1027 /* dynamic free not supported in legacy mode */
1028 if (internal_config.legacy_mem)
1031 return eal_memalloc_free_seg_bulk(&ms, 1);
1035 sync_chunk(struct rte_memseg_list *primary_msl,
1036 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1037 unsigned int msl_idx, bool used, int start, int end)
1039 struct rte_fbarray *l_arr, *p_arr;
1040 int i, ret, chunk_len, diff_len;
1042 l_arr = &local_msl->memseg_arr;
1043 p_arr = &primary_msl->memseg_arr;
1045 /* we need to aggregate allocations/deallocations into bigger chunks,
1046 * as we don't want to spam the user with per-page callbacks.
1048 * to avoid any potential issues, we also want to trigger
1049 * deallocation callbacks *before* we actually deallocate
1050 * memory, so that the user application could wrap up its use
1051 * before it goes away.
1054 chunk_len = end - start;
1056 /* find how many contiguous pages we can map/unmap for this chunk */
1058 rte_fbarray_find_contig_free(l_arr, start) :
1059 rte_fbarray_find_contig_used(l_arr, start);
1061 /* has to be at least one page */
1065 diff_len = RTE_MIN(chunk_len, diff_len);
1067 /* if we are freeing memory, notify the application */
1069 struct rte_memseg *ms;
1071 size_t len, page_sz;
1073 ms = rte_fbarray_get(l_arr, start);
1074 start_va = ms->addr;
1075 page_sz = (size_t)primary_msl->page_sz;
1076 len = page_sz * diff_len;
1078 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
1082 for (i = 0; i < diff_len; i++) {
1083 struct rte_memseg *p_ms, *l_ms;
1084 int seg_idx = start + i;
1086 l_ms = rte_fbarray_get(l_arr, seg_idx);
1087 p_ms = rte_fbarray_get(p_arr, seg_idx);
1089 if (l_ms == NULL || p_ms == NULL)
1093 ret = alloc_seg(l_ms, p_ms->addr,
1094 p_ms->socket_id, hi,
1098 rte_fbarray_set_used(l_arr, seg_idx);
1100 ret = free_seg(l_ms, hi, msl_idx, seg_idx);
1101 rte_fbarray_set_free(l_arr, seg_idx);
1107 /* if we just allocated memory, notify the application */
1109 struct rte_memseg *ms;
1111 size_t len, page_sz;
1113 ms = rte_fbarray_get(l_arr, start);
1114 start_va = ms->addr;
1115 page_sz = (size_t)primary_msl->page_sz;
1116 len = page_sz * diff_len;
1118 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1122 /* calculate how much we can advance until next chunk */
1124 rte_fbarray_find_contig_used(l_arr, start) :
1125 rte_fbarray_find_contig_free(l_arr, start);
1126 ret = RTE_MIN(chunk_len, diff_len);
1132 sync_status(struct rte_memseg_list *primary_msl,
1133 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1134 unsigned int msl_idx, bool used)
1136 struct rte_fbarray *l_arr, *p_arr;
1137 int p_idx, l_chunk_len, p_chunk_len, ret;
1140 /* this is a little bit tricky, but the basic idea is - walk both lists
1141 * and spot any places where there are discrepancies. walking both lists
1142 * and noting discrepancies in a single go is a hard problem, so we do
1143 * it in two passes - first we spot any places where allocated segments
1144 * mismatch (i.e. ensure that everything that's allocated in the primary
1145 * is also allocated in the secondary), and then we do it by looking at
1146 * free segments instead.
1148 * we also need to aggregate changes into chunks, as we have to call
1149 * callbacks per allocation, not per page.
1151 l_arr = &local_msl->memseg_arr;
1152 p_arr = &primary_msl->memseg_arr;
1155 p_idx = rte_fbarray_find_next_used(p_arr, 0);
1157 p_idx = rte_fbarray_find_next_free(p_arr, 0);
1159 while (p_idx >= 0) {
1160 int next_chunk_search_idx;
1163 p_chunk_len = rte_fbarray_find_contig_used(p_arr,
1165 l_chunk_len = rte_fbarray_find_contig_used(l_arr,
1168 p_chunk_len = rte_fbarray_find_contig_free(p_arr,
1170 l_chunk_len = rte_fbarray_find_contig_free(l_arr,
1173 /* best case scenario - no differences (or bigger, which will be
1174 * fixed during next iteration), look for next chunk
1176 if (l_chunk_len >= p_chunk_len) {
1177 next_chunk_search_idx = p_idx + p_chunk_len;
1181 /* if both chunks start at the same point, skip parts we know
1182 * are identical, and sync the rest. each call to sync_chunk
1183 * will only sync contiguous segments, so we need to call this
1184 * until we are sure there are no more differences in this
1187 start = p_idx + l_chunk_len;
1188 end = p_idx + p_chunk_len;
1190 ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
1193 } while (start < end && ret >= 0);
1194 /* if ret is negative, something went wrong */
1198 next_chunk_search_idx = p_idx + p_chunk_len;
1200 /* skip to end of this chunk */
1202 p_idx = rte_fbarray_find_next_used(p_arr,
1203 next_chunk_search_idx);
1205 p_idx = rte_fbarray_find_next_free(p_arr,
1206 next_chunk_search_idx);
1213 sync_existing(struct rte_memseg_list *primary_msl,
1214 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1215 unsigned int msl_idx)
1219 /* do not allow any page allocations during the time we're allocating,
1220 * because file creation and locking operations are not atomic,
1221 * and we might be the first or the last ones to use a particular page,
1222 * so we need to ensure atomicity of every operation.
1224 dir_fd = open(hi->hugedir, O_RDONLY);
1226 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
1227 hi->hugedir, strerror(errno));
1230 /* blocking writelock */
1231 if (flock(dir_fd, LOCK_EX)) {
1232 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
1233 hi->hugedir, strerror(errno));
1238 /* ensure all allocated space is the same in both lists */
1239 ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
1243 /* ensure all unallocated space is the same in both lists */
1244 ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
1248 /* update version number */
1249 local_msl->version = primary_msl->version;
1260 sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
1262 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1263 struct rte_memseg_list *primary_msl, *local_msl;
1264 struct hugepage_info *hi = NULL;
1268 msl_idx = msl - mcfg->memsegs;
1269 primary_msl = &mcfg->memsegs[msl_idx];
1270 local_msl = &local_memsegs[msl_idx];
1272 for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
1274 internal_config.hugepage_info[i].hugepage_sz;
1275 uint64_t msl_sz = primary_msl->page_sz;
1276 if (msl_sz == cur_sz) {
1277 hi = &internal_config.hugepage_info[i];
1282 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1286 /* if versions don't match, synchronize everything */
1287 if (local_msl->version != primary_msl->version &&
1288 sync_existing(primary_msl, local_msl, hi, msl_idx))
1295 eal_memalloc_sync_with_primary(void)
1297 /* nothing to be done in primary */
1298 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1301 /* memalloc is locked, so it's safe to call thread-unsafe version */
1302 if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
1308 secondary_msl_create_walk(const struct rte_memseg_list *msl,
1309 void *arg __rte_unused)
1311 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1312 struct rte_memseg_list *primary_msl, *local_msl;
1313 char name[PATH_MAX];
1316 msl_idx = msl - mcfg->memsegs;
1317 primary_msl = &mcfg->memsegs[msl_idx];
1318 local_msl = &local_memsegs[msl_idx];
1320 /* create distinct fbarrays for each secondary */
1321 snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
1322 primary_msl->memseg_arr.name, getpid());
1324 ret = rte_fbarray_init(&local_msl->memseg_arr, name,
1325 primary_msl->memseg_arr.len,
1326 primary_msl->memseg_arr.elt_sz);
1328 RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
1331 local_msl->base_va = primary_msl->base_va;
1337 alloc_list(int list_idx, int len)
1342 /* ensure we have space to store fd per each possible segment */
1343 data = malloc(sizeof(int) * len);
1345 RTE_LOG(ERR, EAL, "Unable to allocate space for file descriptors\n");
1348 /* set all fd's as invalid */
1349 for (i = 0; i < len; i++)
1352 fd_list[list_idx].fds = data;
1353 fd_list[list_idx].len = len;
1354 fd_list[list_idx].count = 0;
1355 fd_list[list_idx].memseg_list_fd = -1;
1361 fd_list_create_walk(const struct rte_memseg_list *msl,
1362 void *arg __rte_unused)
1364 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1368 msl_idx = msl - mcfg->memsegs;
1369 len = msl->memseg_arr.len;
1371 return alloc_list(msl_idx, len);
1375 eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
1377 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1379 /* if list is not allocated, allocate it */
1380 if (fd_list[list_idx].len == 0) {
1381 int len = mcfg->memsegs[list_idx].memseg_arr.len;
1383 if (alloc_list(list_idx, len) < 0)
1386 fd_list[list_idx].fds[seg_idx] = fd;
1392 eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
1394 if (internal_config.single_file_segments)
1395 return fd_list[list_idx].memseg_list_fd;
1396 /* list not initialized */
1397 if (fd_list[list_idx].len == 0)
1399 return fd_list[list_idx].fds[seg_idx];
1403 eal_memalloc_init(void)
1405 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1406 if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
1409 /* initialize all of the fd lists */
1410 if (rte_memseg_list_walk(fd_list_create_walk, NULL))