1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #define _FILE_OFFSET_BITS 64
15 #include <sys/types.h>
17 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
30 #include <linux/falloc.h>
32 #include <rte_common.h>
34 #include <rte_eal_memconfig.h>
36 #include <rte_memory.h>
37 #include <rte_spinlock.h>
39 #include "eal_filesystem.h"
40 #include "eal_internal_cfg.h"
41 #include "eal_memalloc.h"
42 #include "eal_private.h"
45 * not all kernel version support fallocate on hugetlbfs, so fall back to
46 * ftruncate and disallow deallocation if fallocate is not supported.
48 static int fallocate_supported = -1; /* unknown */
50 /* for single-file segments, we need some kind of mechanism to keep track of
51 * which hugepages can be freed back to the system, and which cannot. we cannot
52 * use flock() because they don't allow locking parts of a file, and we cannot
53 * use fcntl() due to issues with their semantics, so we will have to rely on a
54 * bunch of lockfiles for each page.
56 * we cannot know how many pages a system will have in advance, but we do know
57 * that they come in lists, and we know lengths of these lists. so, simply store
58 * a malloc'd array of fd's indexed by list and segment index.
60 * they will be initialized at startup, and filled as we allocate/deallocate
61 * segments. also, use this to track memseg list proper fd.
64 int *fds; /**< dynamically allocated array of segment lock fd's */
65 int memseg_list_fd; /**< memseg list fd */
66 int len; /**< total length of the array */
67 int count; /**< entries used in an array */
68 } lock_fds[RTE_MAX_MEMSEG_LISTS];
70 /** local copy of a memory map, used to synchronize memory hotplug in MP */
71 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
73 static sigjmp_buf huge_jmpenv;
75 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
77 siglongjmp(huge_jmpenv, 1);
80 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
81 * non-static local variable in the stack frame calling sigsetjmp might be
82 * clobbered by a call to longjmp.
84 static int __rte_unused huge_wrap_sigsetjmp(void)
86 return sigsetjmp(huge_jmpenv, 1);
89 static struct sigaction huge_action_old;
90 static int huge_need_recover;
92 static void __rte_unused
93 huge_register_sigbus(void)
96 struct sigaction action;
99 sigaddset(&mask, SIGBUS);
101 action.sa_mask = mask;
102 action.sa_handler = huge_sigbus_handler;
104 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
107 static void __rte_unused
108 huge_recover_sigbus(void)
110 if (huge_need_recover) {
111 sigaction(SIGBUS, &huge_action_old, NULL);
112 huge_need_recover = 0;
116 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
121 /* Check if kernel supports NUMA. */
122 if (numa_available() != 0) {
123 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
130 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
132 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
133 if (get_mempolicy(oldpolicy, oldmask->maskp,
134 oldmask->size + 1, 0, 0) < 0) {
136 "Failed to get current mempolicy: %s. "
137 "Assuming MPOL_DEFAULT.\n", strerror(errno));
138 oldpolicy = MPOL_DEFAULT;
141 "Setting policy MPOL_PREFERRED for socket %d\n",
143 numa_set_preferred(socket_id);
147 restore_numa(int *oldpolicy, struct bitmask *oldmask)
150 "Restoring previous memory policy: %d\n", *oldpolicy);
151 if (*oldpolicy == MPOL_DEFAULT) {
152 numa_set_localalloc();
153 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
154 oldmask->size + 1) < 0) {
155 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
157 numa_set_localalloc();
159 numa_free_cpumask(oldmask);
164 * uses fstat to report the size of a file on disk
167 get_file_size(int fd)
170 if (fstat(fd, &st) < 0)
175 /* we cannot use rte_memseg_list_walk() here because we will be holding a
176 * write lock whenever we enter every function in this file, however copying
177 * the same iteration code everywhere is not ideal as well. so, use a lockless
178 * copy of memseg list walk here.
181 memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
183 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
186 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
187 struct rte_memseg_list *msl = &mcfg->memsegs[i];
189 if (msl->base_va == NULL)
192 ret = func(msl, arg);
201 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
202 static int lock(int fd, int type)
206 /* flock may be interrupted */
208 ret = flock(fd, type | LOCK_NB);
209 } while (ret && errno == EINTR);
211 if (ret && errno == EWOULDBLOCK) {
215 RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
216 __func__, strerror(errno));
219 /* lock was successful */
223 static int get_segment_lock_fd(int list_idx, int seg_idx)
225 char path[PATH_MAX] = {0};
228 if (list_idx < 0 || list_idx >= (int)RTE_DIM(lock_fds))
230 if (seg_idx < 0 || seg_idx >= lock_fds[list_idx].len)
233 fd = lock_fds[list_idx].fds[seg_idx];
234 /* does this lock already exist? */
238 eal_get_hugefile_lock_path(path, sizeof(path),
239 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
241 fd = open(path, O_CREAT | O_RDWR, 0660);
243 RTE_LOG(ERR, EAL, "%s(): error creating lockfile '%s': %s\n",
244 __func__, path, strerror(errno));
247 /* take out a read lock */
248 if (lock(fd, LOCK_SH) != 1) {
249 RTE_LOG(ERR, EAL, "%s(): failed to take out a readlock on '%s': %s\n",
250 __func__, path, strerror(errno));
254 /* store it for future reference */
255 lock_fds[list_idx].fds[seg_idx] = fd;
256 lock_fds[list_idx].count++;
260 static int unlock_segment(int list_idx, int seg_idx)
264 if (list_idx < 0 || list_idx >= (int)RTE_DIM(lock_fds))
266 if (seg_idx < 0 || seg_idx >= lock_fds[list_idx].len)
269 fd = lock_fds[list_idx].fds[seg_idx];
271 /* upgrade lock to exclusive to see if we can remove the lockfile */
272 ret = lock(fd, LOCK_EX);
274 /* we've succeeded in taking exclusive lock, this lockfile may
277 char path[PATH_MAX] = {0};
278 eal_get_hugefile_lock_path(path, sizeof(path),
279 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
281 RTE_LOG(ERR, EAL, "%s(): error removing lockfile '%s': %s\n",
282 __func__, path, strerror(errno));
285 /* we don't want to leak the fd, so even if we fail to lock, close fd
286 * and remove it from list anyway.
289 lock_fds[list_idx].fds[seg_idx] = -1;
290 lock_fds[list_idx].count--;
298 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
299 unsigned int list_idx, unsigned int seg_idx)
303 if (internal_config.single_file_segments) {
304 /* create a hugepage file path */
305 eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
307 fd = lock_fds[list_idx].memseg_list_fd;
310 fd = open(path, O_CREAT | O_RDWR, 0600);
312 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
313 __func__, strerror(errno));
316 /* take out a read lock and keep it indefinitely */
317 if (lock(fd, LOCK_SH) < 0) {
318 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
319 __func__, strerror(errno));
323 lock_fds[list_idx].memseg_list_fd = fd;
326 /* create a hugepage file path */
327 eal_get_hugefile_path(path, buflen, hi->hugedir,
328 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
329 fd = open(path, O_CREAT | O_RDWR, 0600);
331 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
335 /* take out a read lock */
336 if (lock(fd, LOCK_SH) < 0) {
337 RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
338 __func__, strerror(errno));
347 resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
348 uint64_t fa_offset, uint64_t page_sz, bool grow)
352 if (fallocate_supported == 0) {
353 /* we cannot deallocate memory if fallocate() is not
354 * supported, and hugepage file is already locked at
355 * creation, so no further synchronization needed.
359 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
363 uint64_t new_size = fa_offset + page_sz;
364 uint64_t cur_size = get_file_size(fd);
366 /* fallocate isn't supported, fall back to ftruncate */
367 if (new_size > cur_size &&
368 ftruncate(fd, new_size) < 0) {
369 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
370 __func__, strerror(errno));
374 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
378 /* if fallocate() is supported, we need to take out a
379 * read lock on allocate (to prevent other processes
380 * from deallocating this page), and take out a write
381 * lock on deallocate (to ensure nobody else is using
384 * read locks on page itself are already taken out at
385 * file creation, in get_seg_fd().
387 * we cannot rely on simple use of flock() call, because
388 * we need to be able to lock a section of the file,
389 * and we cannot use fcntl() locks, because of numerous
390 * problems with their semantics, so we will use
391 * deterministically named lock files for each section
394 * if we're shrinking the file, we want to upgrade our
395 * lock from shared to exclusive.
397 * lock_fd is an fd for a lockfile, not for the segment
400 lock_fd = get_segment_lock_fd(list_idx, seg_idx);
403 /* we are using this lockfile to determine
404 * whether this particular page is locked, as we
405 * are in single file segments mode and thus
406 * cannot use regular flock() to get this info.
408 * we want to try and take out an exclusive lock
409 * on the lock file to determine if we're the
410 * last ones using this page, and if not, we
411 * won't be shrinking it, and will instead exit
414 ret = lock(lock_fd, LOCK_EX);
416 /* drop the lock on the lockfile, so that even
417 * if we couldn't shrink the file ourselves, we
418 * are signalling to other processes that we're
419 * no longer using this page.
421 if (unlock_segment(list_idx, seg_idx))
422 RTE_LOG(ERR, EAL, "Could not unlock segment\n");
424 /* additionally, if this was the last lock on
425 * this segment list, we can safely close the
426 * page file fd, so that one of the processes
427 * could then delete the file after shrinking.
429 if (ret < 1 && lock_fds[list_idx].count == 0) {
431 lock_fds[list_idx].memseg_list_fd = -1;
435 RTE_LOG(ERR, EAL, "Could not lock segment\n");
439 /* failed to lock, not an error. */
443 /* grow or shrink the file */
444 ret = fallocate(fd, flags, fa_offset, page_sz);
447 if (fallocate_supported == -1 &&
449 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
452 fallocate_supported = 0;
454 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
460 fallocate_supported = 1;
462 /* we've grew/shrunk the file, and we hold an
463 * exclusive lock now. check if there are no
464 * more segments active in this segment list,
465 * and remove the file if there aren't.
467 if (lock_fds[list_idx].count == 0) {
469 RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
473 lock_fds[list_idx].memseg_list_fd = -1;
482 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
483 struct hugepage_info *hi, unsigned int list_idx,
484 unsigned int seg_idx)
486 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
487 int cur_socket_id = 0;
497 /* takes out a read lock on segment or segment list */
498 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
500 RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
504 alloc_sz = hi->hugepage_sz;
505 if (internal_config.single_file_segments) {
506 map_offset = seg_idx * alloc_sz;
507 ret = resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
513 if (ftruncate(fd, alloc_sz) < 0) {
514 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
515 __func__, strerror(errno));
521 * map the segment, and populate page tables, the kernel fills this
522 * segment with zeros if it's a new page.
524 void *va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
525 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);
527 if (va == MAP_FAILED) {
528 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
530 /* mmap failed, but the previous region might have been
531 * unmapped anyway. try to remap it
536 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
537 munmap(va, alloc_sz);
541 rte_iova_t iova = rte_mem_virt2iova(addr);
542 if (iova == RTE_BAD_PHYS_ADDR) {
543 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
548 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
549 move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
551 if (cur_socket_id != socket_id) {
553 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
554 __func__, socket_id, cur_socket_id);
559 /* In linux, hugetlb limitations, like cgroup, are
560 * enforced at fault time instead of mmap(), even
561 * with the option of MAP_POPULATE. Kernel will send
562 * a SIGBUS signal. To avoid to be killed, save stack
563 * environment here, if SIGBUS happens, we can jump
566 if (huge_wrap_sigsetjmp()) {
567 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
568 (unsigned int)(alloc_sz >> 20));
571 /* for non-single file segments, we can close fd here */
572 if (!internal_config.single_file_segments)
575 /* we need to trigger a write to the page to enforce page fault and
576 * ensure that page is accessible to us, but we can't overwrite value
577 * that is already there, so read the old value, and write itback.
578 * kernel populates the page with zeroes initially.
580 *(volatile int *)addr = *(volatile int *)addr;
583 ms->hugepage_sz = alloc_sz;
585 ms->nchannel = rte_memory_get_nchannel();
586 ms->nrank = rte_memory_get_nrank();
588 ms->socket_id = socket_id;
593 munmap(addr, alloc_sz);
596 #ifdef RTE_ARCH_PPC_64
597 flags |= MAP_HUGETLB;
599 new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
600 if (new_addr != addr) {
601 if (new_addr != NULL)
602 munmap(new_addr, alloc_sz);
603 /* we're leaving a hole in our virtual address space. if
604 * somebody else maps this hole now, we could accidentally
605 * override it in the future.
607 RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
610 if (internal_config.single_file_segments) {
611 resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
613 /* ignore failure, can't make it any worse */
615 /* only remove file if we can take out a write lock */
616 if (lock(fd, LOCK_EX) == 1)
624 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
625 unsigned int list_idx, unsigned int seg_idx)
631 /* erase page data */
632 memset(ms->addr, 0, ms->len);
634 if (mmap(ms->addr, ms->len, PROT_READ,
635 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
637 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
641 /* if we are not in single file segments mode, we're going to unmap the
642 * segment and thus drop the lock on original fd, but hugepage dir is
643 * now locked so we can take out another one without races.
645 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
649 if (internal_config.single_file_segments) {
650 map_offset = seg_idx * ms->len;
651 if (resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
656 /* if we're able to take out a write lock, we're the last one
657 * holding onto this page.
659 ret = lock(fd, LOCK_EX);
661 /* no one else is using this page */
665 /* closing fd will drop the lock */
669 memset(ms, 0, sizeof(*ms));
671 return ret < 0 ? -1 : 0;
674 struct alloc_walk_param {
675 struct hugepage_info *hi;
676 struct rte_memseg **ms;
678 unsigned int segs_allocated;
684 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
686 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
687 struct alloc_walk_param *wa = arg;
688 struct rte_memseg_list *cur_msl;
690 int cur_idx, start_idx, j, dir_fd = -1;
691 unsigned int msl_idx, need, i;
693 if (msl->page_sz != wa->page_sz)
695 if (msl->socket_id != wa->socket)
698 page_sz = (size_t)msl->page_sz;
700 msl_idx = msl - mcfg->memsegs;
701 cur_msl = &mcfg->memsegs[msl_idx];
705 /* try finding space in memseg list */
706 cur_idx = rte_fbarray_find_prev_n_free(&cur_msl->memseg_arr,
707 cur_msl->memseg_arr.len - 1, need);
712 /* do not allow any page allocations during the time we're allocating,
713 * because file creation and locking operations are not atomic,
714 * and we might be the first or the last ones to use a particular page,
715 * so we need to ensure atomicity of every operation.
717 * during init, we already hold a write lock, so don't try to take out
720 if (wa->hi->lock_descriptor == -1) {
721 dir_fd = open(wa->hi->hugedir, O_RDONLY);
723 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
724 __func__, wa->hi->hugedir, strerror(errno));
727 /* blocking writelock */
728 if (flock(dir_fd, LOCK_EX)) {
729 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
730 __func__, wa->hi->hugedir, strerror(errno));
736 for (i = 0; i < need; i++, cur_idx++) {
737 struct rte_memseg *cur;
740 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
741 map_addr = RTE_PTR_ADD(cur_msl->base_va,
744 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
746 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
749 /* if exact number wasn't requested, stop */
754 for (j = start_idx; j < cur_idx; j++) {
755 struct rte_memseg *tmp;
756 struct rte_fbarray *arr =
757 &cur_msl->memseg_arr;
759 tmp = rte_fbarray_get(arr, j);
760 rte_fbarray_set_free(arr, j);
762 /* free_seg may attempt to create a file, which
765 if (free_seg(tmp, wa->hi, msl_idx, j))
766 RTE_LOG(DEBUG, EAL, "Cannot free page\n");
770 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
779 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
782 wa->segs_allocated = i;
790 struct free_walk_param {
791 struct hugepage_info *hi;
792 struct rte_memseg *ms;
795 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
797 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
798 struct rte_memseg_list *found_msl;
799 struct free_walk_param *wa = arg;
800 uintptr_t start_addr, end_addr;
801 int msl_idx, seg_idx, ret, dir_fd = -1;
803 start_addr = (uintptr_t) msl->base_va;
804 end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
806 if ((uintptr_t)wa->ms->addr < start_addr ||
807 (uintptr_t)wa->ms->addr >= end_addr)
810 msl_idx = msl - mcfg->memsegs;
811 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
814 found_msl = &mcfg->memsegs[msl_idx];
816 /* do not allow any page allocations during the time we're freeing,
817 * because file creation and locking operations are not atomic,
818 * and we might be the first or the last ones to use a particular page,
819 * so we need to ensure atomicity of every operation.
821 * during init, we already hold a write lock, so don't try to take out
824 if (wa->hi->lock_descriptor == -1) {
825 dir_fd = open(wa->hi->hugedir, O_RDONLY);
827 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
828 __func__, wa->hi->hugedir, strerror(errno));
831 /* blocking writelock */
832 if (flock(dir_fd, LOCK_EX)) {
833 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
834 __func__, wa->hi->hugedir, strerror(errno));
840 found_msl->version++;
842 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
844 ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
856 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
857 int socket, bool exact)
860 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
861 bool have_numa = false;
863 struct bitmask *oldmask;
865 struct alloc_walk_param wa;
866 struct hugepage_info *hi = NULL;
868 memset(&wa, 0, sizeof(wa));
870 /* dynamic allocation not supported in legacy mode */
871 if (internal_config.legacy_mem)
874 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
876 internal_config.hugepage_info[i].hugepage_sz) {
877 hi = &internal_config.hugepage_info[i];
882 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
887 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
889 oldmask = numa_allocate_nodemask();
890 prepare_numa(&oldpolicy, oldmask, socket);
899 wa.page_sz = page_sz;
901 wa.segs_allocated = 0;
903 ret = memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
905 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
908 } else if (ret > 0) {
909 ret = (int)wa.segs_allocated;
912 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
914 restore_numa(&oldpolicy, oldmask);
920 eal_memalloc_alloc_seg(size_t page_sz, int socket)
922 struct rte_memseg *ms;
923 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
925 /* return pointer to newly allocated memseg */
930 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
934 /* dynamic free not supported in legacy mode */
935 if (internal_config.legacy_mem)
938 for (seg = 0; seg < n_segs; seg++) {
939 struct rte_memseg *cur = ms[seg];
940 struct hugepage_info *hi = NULL;
941 struct free_walk_param wa;
944 /* if this page is marked as unfreeable, fail */
945 if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
946 RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
951 memset(&wa, 0, sizeof(wa));
953 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
955 hi = &internal_config.hugepage_info[i];
956 if (cur->hugepage_sz == hi->hugepage_sz)
959 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
960 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
968 walk_res = memseg_list_walk_thread_unsafe(free_seg_walk, &wa);
972 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
979 eal_memalloc_free_seg(struct rte_memseg *ms)
981 /* dynamic free not supported in legacy mode */
982 if (internal_config.legacy_mem)
985 return eal_memalloc_free_seg_bulk(&ms, 1);
989 sync_chunk(struct rte_memseg_list *primary_msl,
990 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
991 unsigned int msl_idx, bool used, int start, int end)
993 struct rte_fbarray *l_arr, *p_arr;
994 int i, ret, chunk_len, diff_len;
996 l_arr = &local_msl->memseg_arr;
997 p_arr = &primary_msl->memseg_arr;
999 /* we need to aggregate allocations/deallocations into bigger chunks,
1000 * as we don't want to spam the user with per-page callbacks.
1002 * to avoid any potential issues, we also want to trigger
1003 * deallocation callbacks *before* we actually deallocate
1004 * memory, so that the user application could wrap up its use
1005 * before it goes away.
1008 chunk_len = end - start;
1010 /* find how many contiguous pages we can map/unmap for this chunk */
1012 rte_fbarray_find_contig_free(l_arr, start) :
1013 rte_fbarray_find_contig_used(l_arr, start);
1015 /* has to be at least one page */
1019 diff_len = RTE_MIN(chunk_len, diff_len);
1021 /* if we are freeing memory, notify the application */
1023 struct rte_memseg *ms;
1025 size_t len, page_sz;
1027 ms = rte_fbarray_get(l_arr, start);
1028 start_va = ms->addr;
1029 page_sz = (size_t)primary_msl->page_sz;
1030 len = page_sz * diff_len;
1032 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
1036 for (i = 0; i < diff_len; i++) {
1037 struct rte_memseg *p_ms, *l_ms;
1038 int seg_idx = start + i;
1040 l_ms = rte_fbarray_get(l_arr, seg_idx);
1041 p_ms = rte_fbarray_get(p_arr, seg_idx);
1043 if (l_ms == NULL || p_ms == NULL)
1047 ret = alloc_seg(l_ms, p_ms->addr,
1048 p_ms->socket_id, hi,
1052 rte_fbarray_set_used(l_arr, seg_idx);
1054 ret = free_seg(l_ms, hi, msl_idx, seg_idx);
1055 rte_fbarray_set_free(l_arr, seg_idx);
1061 /* if we just allocated memory, notify the application */
1063 struct rte_memseg *ms;
1065 size_t len, page_sz;
1067 ms = rte_fbarray_get(l_arr, start);
1068 start_va = ms->addr;
1069 page_sz = (size_t)primary_msl->page_sz;
1070 len = page_sz * diff_len;
1072 eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1076 /* calculate how much we can advance until next chunk */
1078 rte_fbarray_find_contig_used(l_arr, start) :
1079 rte_fbarray_find_contig_free(l_arr, start);
1080 ret = RTE_MIN(chunk_len, diff_len);
1086 sync_status(struct rte_memseg_list *primary_msl,
1087 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1088 unsigned int msl_idx, bool used)
1090 struct rte_fbarray *l_arr, *p_arr;
1091 int p_idx, l_chunk_len, p_chunk_len, ret;
1094 /* this is a little bit tricky, but the basic idea is - walk both lists
1095 * and spot any places where there are discrepancies. walking both lists
1096 * and noting discrepancies in a single go is a hard problem, so we do
1097 * it in two passes - first we spot any places where allocated segments
1098 * mismatch (i.e. ensure that everything that's allocated in the primary
1099 * is also allocated in the secondary), and then we do it by looking at
1100 * free segments instead.
1102 * we also need to aggregate changes into chunks, as we have to call
1103 * callbacks per allocation, not per page.
1105 l_arr = &local_msl->memseg_arr;
1106 p_arr = &primary_msl->memseg_arr;
1109 p_idx = rte_fbarray_find_next_used(p_arr, 0);
1111 p_idx = rte_fbarray_find_next_free(p_arr, 0);
1113 while (p_idx >= 0) {
1114 int next_chunk_search_idx;
1117 p_chunk_len = rte_fbarray_find_contig_used(p_arr,
1119 l_chunk_len = rte_fbarray_find_contig_used(l_arr,
1122 p_chunk_len = rte_fbarray_find_contig_free(p_arr,
1124 l_chunk_len = rte_fbarray_find_contig_free(l_arr,
1127 /* best case scenario - no differences (or bigger, which will be
1128 * fixed during next iteration), look for next chunk
1130 if (l_chunk_len >= p_chunk_len) {
1131 next_chunk_search_idx = p_idx + p_chunk_len;
1135 /* if both chunks start at the same point, skip parts we know
1136 * are identical, and sync the rest. each call to sync_chunk
1137 * will only sync contiguous segments, so we need to call this
1138 * until we are sure there are no more differences in this
1141 start = p_idx + l_chunk_len;
1142 end = p_idx + p_chunk_len;
1144 ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
1147 } while (start < end && ret >= 0);
1148 /* if ret is negative, something went wrong */
1152 next_chunk_search_idx = p_idx + p_chunk_len;
1154 /* skip to end of this chunk */
1156 p_idx = rte_fbarray_find_next_used(p_arr,
1157 next_chunk_search_idx);
1159 p_idx = rte_fbarray_find_next_free(p_arr,
1160 next_chunk_search_idx);
1167 sync_existing(struct rte_memseg_list *primary_msl,
1168 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
1169 unsigned int msl_idx)
1173 /* do not allow any page allocations during the time we're allocating,
1174 * because file creation and locking operations are not atomic,
1175 * and we might be the first or the last ones to use a particular page,
1176 * so we need to ensure atomicity of every operation.
1178 dir_fd = open(hi->hugedir, O_RDONLY);
1180 RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
1181 hi->hugedir, strerror(errno));
1184 /* blocking writelock */
1185 if (flock(dir_fd, LOCK_EX)) {
1186 RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
1187 hi->hugedir, strerror(errno));
1192 /* ensure all allocated space is the same in both lists */
1193 ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
1197 /* ensure all unallocated space is the same in both lists */
1198 ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
1202 /* update version number */
1203 local_msl->version = primary_msl->version;
1214 sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
1216 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1217 struct rte_memseg_list *primary_msl, *local_msl;
1218 struct hugepage_info *hi = NULL;
1222 msl_idx = msl - mcfg->memsegs;
1223 primary_msl = &mcfg->memsegs[msl_idx];
1224 local_msl = &local_memsegs[msl_idx];
1226 for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
1228 internal_config.hugepage_info[i].hugepage_sz;
1229 uint64_t msl_sz = primary_msl->page_sz;
1230 if (msl_sz == cur_sz) {
1231 hi = &internal_config.hugepage_info[i];
1236 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1240 /* if versions don't match, synchronize everything */
1241 if (local_msl->version != primary_msl->version &&
1242 sync_existing(primary_msl, local_msl, hi, msl_idx))
1249 eal_memalloc_sync_with_primary(void)
1251 /* nothing to be done in primary */
1252 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1255 if (memseg_list_walk_thread_unsafe(sync_walk, NULL))
1261 secondary_msl_create_walk(const struct rte_memseg_list *msl,
1262 void *arg __rte_unused)
1264 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1265 struct rte_memseg_list *primary_msl, *local_msl;
1266 char name[PATH_MAX];
1269 msl_idx = msl - mcfg->memsegs;
1270 primary_msl = &mcfg->memsegs[msl_idx];
1271 local_msl = &local_memsegs[msl_idx];
1273 /* create distinct fbarrays for each secondary */
1274 snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
1275 primary_msl->memseg_arr.name, getpid());
1277 ret = rte_fbarray_init(&local_msl->memseg_arr, name,
1278 primary_msl->memseg_arr.len,
1279 primary_msl->memseg_arr.elt_sz);
1281 RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
1284 local_msl->base_va = primary_msl->base_va;
1290 secondary_lock_list_create_walk(const struct rte_memseg_list *msl,
1291 void *arg __rte_unused)
1293 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1294 unsigned int i, len;
1298 msl_idx = msl - mcfg->memsegs;
1299 len = msl->memseg_arr.len;
1301 /* ensure we have space to store lock fd per each possible segment */
1302 data = malloc(sizeof(int) * len);
1304 RTE_LOG(ERR, EAL, "Unable to allocate space for lock descriptors\n");
1307 /* set all fd's as invalid */
1308 for (i = 0; i < len; i++)
1311 lock_fds[msl_idx].fds = data;
1312 lock_fds[msl_idx].len = len;
1313 lock_fds[msl_idx].count = 0;
1314 lock_fds[msl_idx].memseg_list_fd = -1;
1320 eal_memalloc_init(void)
1322 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1323 if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
1326 /* initialize all of the lock fd lists */
1327 if (internal_config.single_file_segments)
1328 if (rte_memseg_list_walk(secondary_lock_list_create_walk, NULL))