1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #define _FILE_OFFSET_BITS 64
15 #include <sys/types.h>
17 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
30 #include <linux/falloc.h>
32 #include <rte_common.h>
34 #include <rte_eal_memconfig.h>
36 #include <rte_memory.h>
37 #include <rte_spinlock.h>
39 #include "eal_filesystem.h"
40 #include "eal_internal_cfg.h"
41 #include "eal_memalloc.h"
44 * not all kernel version support fallocate on hugetlbfs, so fall back to
45 * ftruncate and disallow deallocation if fallocate is not supported.
47 static int fallocate_supported = -1; /* unknown */
50 * If each page is in a separate file, we can close fd's since we need each fd
51 * only once. However, in single file segments mode, we can get away with using
52 * a single fd for entire segments, but we need to store them somewhere. Each
53 * fd is different within each process, so we'll store them in a local tailq.
56 TAILQ_ENTRY(msl_entry) next;
61 /** Double linked list of memseg list fd's. */
62 TAILQ_HEAD(msl_entry_list, msl_entry);
64 static struct msl_entry_list msl_entry_list =
65 TAILQ_HEAD_INITIALIZER(msl_entry_list);
66 static rte_spinlock_t tailq_lock = RTE_SPINLOCK_INITIALIZER;
68 static sigjmp_buf huge_jmpenv;
70 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
72 siglongjmp(huge_jmpenv, 1);
75 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
76 * non-static local variable in the stack frame calling sigsetjmp might be
77 * clobbered by a call to longjmp.
79 static int __rte_unused huge_wrap_sigsetjmp(void)
81 return sigsetjmp(huge_jmpenv, 1);
84 static struct sigaction huge_action_old;
85 static int huge_need_recover;
87 static void __rte_unused
88 huge_register_sigbus(void)
91 struct sigaction action;
94 sigaddset(&mask, SIGBUS);
96 action.sa_mask = mask;
97 action.sa_handler = huge_sigbus_handler;
99 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
102 static void __rte_unused
103 huge_recover_sigbus(void)
105 if (huge_need_recover) {
106 sigaction(SIGBUS, &huge_action_old, NULL);
107 huge_need_recover = 0;
111 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
116 /* Check if kernel supports NUMA. */
117 if (numa_available() != 0) {
118 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
125 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
127 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
128 if (get_mempolicy(oldpolicy, oldmask->maskp,
129 oldmask->size + 1, 0, 0) < 0) {
131 "Failed to get current mempolicy: %s. "
132 "Assuming MPOL_DEFAULT.\n", strerror(errno));
133 oldpolicy = MPOL_DEFAULT;
136 "Setting policy MPOL_PREFERRED for socket %d\n",
138 numa_set_preferred(socket_id);
142 resotre_numa(int *oldpolicy, struct bitmask *oldmask)
145 "Restoring previous memory policy: %d\n", *oldpolicy);
146 if (oldpolicy == MPOL_DEFAULT) {
147 numa_set_localalloc();
148 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
149 oldmask->size + 1) < 0) {
150 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
152 numa_set_localalloc();
154 numa_free_cpumask(oldmask);
158 static struct msl_entry *
159 get_msl_entry_by_idx(unsigned int list_idx)
161 struct msl_entry *te;
163 rte_spinlock_lock(&tailq_lock);
165 TAILQ_FOREACH(te, &msl_entry_list, next) {
166 if (te->msl_idx == list_idx)
170 /* doesn't exist, so create it and set fd to -1 */
172 te = malloc(sizeof(*te));
174 RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
178 te->msl_idx = list_idx;
180 TAILQ_INSERT_TAIL(&msl_entry_list, te, next);
183 rte_spinlock_unlock(&tailq_lock);
188 * uses fstat to report the size of a file on disk
191 get_file_size(int fd)
194 if (fstat(fd, &st) < 0)
200 * uses fstat to check if file size on disk is zero (regular fstat won't show
201 * true file size due to how fallocate works)
204 is_zero_length(int fd)
207 if (fstat(fd, &st) < 0)
209 return st.st_blocks == 0;
213 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
214 unsigned int list_idx, unsigned int seg_idx)
218 if (internal_config.single_file_segments) {
220 * try to find a tailq entry, for this memseg list, or create
221 * one if it doesn't exist.
223 struct msl_entry *te = get_msl_entry_by_idx(list_idx);
225 RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
228 } else if (te->fd < 0) {
229 /* create a hugepage file */
230 eal_get_hugefile_path(path, buflen, hi->hugedir,
232 fd = open(path, O_CREAT | O_RDWR, 0600);
234 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
235 __func__, strerror(errno));
243 /* one file per page, just create it */
244 eal_get_hugefile_path(path, buflen, hi->hugedir,
245 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
246 fd = open(path, O_CREAT | O_RDWR, 0600);
248 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
256 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
257 static int lock(int fd, uint64_t offset, uint64_t len, int type)
262 memset(&lck, 0, sizeof(lck));
265 lck.l_whence = SEEK_SET;
266 lck.l_start = offset;
269 ret = fcntl(fd, F_SETLK, &lck);
271 if (ret && (errno == EAGAIN || errno == EACCES)) {
272 /* locked by another process, not an error */
275 RTE_LOG(ERR, EAL, "%s(): error calling fcntl(): %s\n",
276 __func__, strerror(errno));
277 /* we've encountered an unexpected error */
284 resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
289 if (fallocate_supported == 0) {
290 /* we cannot deallocate memory if fallocate() is not
291 * supported, but locks are still needed to prevent
292 * primary process' initialization from clearing out
293 * huge pages used by this process.
297 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
301 uint64_t new_size = fa_offset + page_sz;
302 uint64_t cur_size = get_file_size(fd);
304 /* fallocate isn't supported, fall back to ftruncate */
305 if (new_size > cur_size &&
306 ftruncate(fd, new_size) < 0) {
307 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
308 __func__, strerror(errno));
311 /* not being able to take out a read lock is an error */
312 if (lock(fd, fa_offset, page_sz, F_RDLCK) != 1)
315 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
319 /* if fallocate() is supported, we need to take out a
320 * read lock on allocate (to prevent other processes
321 * from deallocating this page), and take out a write
322 * lock on deallocate (to ensure nobody else is using
325 * we can't use flock() for this, as we actually need to
326 * lock part of the file, not the entire file.
330 ret = lock(fd, fa_offset, page_sz, F_WRLCK);
335 /* failed to lock, not an error */
338 if (fallocate(fd, flags, fa_offset, page_sz) < 0) {
339 if (fallocate_supported == -1 &&
341 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
344 fallocate_supported = 0;
346 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
352 fallocate_supported = 1;
355 /* if can't read lock, it's an error */
356 if (lock(fd, fa_offset, page_sz,
360 /* if can't unlock, it's an error */
361 if (lock(fd, fa_offset, page_sz,
372 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
373 struct hugepage_info *hi, unsigned int list_idx,
374 unsigned int seg_idx)
376 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
377 int cur_socket_id = 0;
385 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
389 alloc_sz = hi->hugepage_sz;
390 if (internal_config.single_file_segments) {
391 map_offset = seg_idx * alloc_sz;
392 ret = resize_hugefile(fd, map_offset, alloc_sz, true);
397 if (ftruncate(fd, alloc_sz) < 0) {
398 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
399 __func__, strerror(errno));
402 /* we've allocated a page - take out a read lock. we're using
403 * fcntl() locks rather than flock() here because doing that
404 * gives us one huge advantage - fcntl() locks are per-process,
405 * not per-file descriptor, which means that we don't have to
406 * keep the original fd's around to keep a lock on the file.
408 * this is useful, because when it comes to unmapping pages, we
409 * will have to take out a write lock (to figure out if another
410 * process still has this page mapped), and to do itwith flock()
411 * we'll have to use original fd, as lock is associated with
412 * that particular fd. with fcntl(), this is not necessary - we
413 * can open a new fd and use fcntl() on that.
415 ret = lock(fd, map_offset, alloc_sz, F_RDLCK);
417 /* this should not fail */
419 RTE_LOG(ERR, EAL, "%s(): error locking file: %s\n",
427 * map the segment, and populate page tables, the kernel fills this
428 * segment with zeros if it's a new page.
430 void *va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
431 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);
432 /* for non-single file segments, we can close fd here */
433 if (!internal_config.single_file_segments)
436 if (va == MAP_FAILED) {
437 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
442 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
446 rte_iova_t iova = rte_mem_virt2iova(addr);
447 if (iova == RTE_BAD_PHYS_ADDR) {
448 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
453 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
454 move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
456 if (cur_socket_id != socket_id) {
458 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
459 __func__, socket_id, cur_socket_id);
464 /* In linux, hugetlb limitations, like cgroup, are
465 * enforced at fault time instead of mmap(), even
466 * with the option of MAP_POPULATE. Kernel will send
467 * a SIGBUS signal. To avoid to be killed, save stack
468 * environment here, if SIGBUS happens, we can jump
471 if (huge_wrap_sigsetjmp()) {
472 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
473 (unsigned int)(alloc_sz >> 20));
476 *(int *)addr = *(int *)addr;
479 ms->hugepage_sz = alloc_sz;
481 ms->nchannel = rte_memory_get_nchannel();
482 ms->nrank = rte_memory_get_nrank();
484 ms->socket_id = socket_id;
489 munmap(addr, alloc_sz);
491 if (internal_config.single_file_segments) {
492 resize_hugefile(fd, map_offset, alloc_sz, false);
493 if (is_zero_length(fd)) {
494 struct msl_entry *te = get_msl_entry_by_idx(list_idx);
495 if (te != NULL && te->fd >= 0) {
499 /* ignore errors, can't make it any worse */
510 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
511 unsigned int list_idx, unsigned int seg_idx)
517 /* erase page data */
518 memset(ms->addr, 0, ms->len);
520 if (mmap(ms->addr, ms->len, PROT_READ,
521 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
523 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
527 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
531 if (internal_config.single_file_segments) {
532 map_offset = seg_idx * ms->len;
533 if (resize_hugefile(fd, map_offset, ms->len, false))
535 /* if file is zero-length, we've already shrunk it, so it's
538 if (is_zero_length(fd)) {
539 struct msl_entry *te = get_msl_entry_by_idx(list_idx);
540 if (te != NULL && te->fd >= 0) {
548 /* if we're able to take out a write lock, we're the last one
549 * holding onto this page.
552 ret = lock(fd, 0, ms->len, F_WRLCK);
554 /* no one else is using this page */
557 ret = lock(fd, 0, ms->len, F_UNLCK);
559 RTE_LOG(ERR, EAL, "%s(): unable to unlock file %s\n",
565 memset(ms, 0, sizeof(*ms));
570 struct alloc_walk_param {
571 struct hugepage_info *hi;
572 struct rte_memseg **ms;
574 unsigned int segs_allocated;
580 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
582 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
583 struct alloc_walk_param *wa = arg;
584 struct rte_memseg_list *cur_msl;
586 int cur_idx, start_idx, j;
587 unsigned int msl_idx, need, i;
589 if (msl->page_sz != wa->page_sz)
591 if (msl->socket_id != wa->socket)
594 page_sz = (size_t)msl->page_sz;
596 msl_idx = msl - mcfg->memsegs;
597 cur_msl = &mcfg->memsegs[msl_idx];
601 /* try finding space in memseg list */
602 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
607 for (i = 0; i < need; i++, cur_idx++) {
608 struct rte_memseg *cur;
611 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
612 map_addr = RTE_PTR_ADD(cur_msl->base_va,
615 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
617 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
620 /* if exact number wasn't requested, stop */
625 for (j = start_idx; j < cur_idx; j++) {
626 struct rte_memseg *tmp;
627 struct rte_fbarray *arr =
628 &cur_msl->memseg_arr;
630 tmp = rte_fbarray_get(arr, j);
631 if (free_seg(tmp, wa->hi, msl_idx,
633 RTE_LOG(ERR, EAL, "Cannot free page\n");
637 rte_fbarray_set_free(arr, j);
641 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
647 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
650 wa->segs_allocated = i;
654 struct free_walk_param {
655 struct hugepage_info *hi;
656 struct rte_memseg *ms;
659 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
661 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
662 struct rte_memseg_list *found_msl;
663 struct free_walk_param *wa = arg;
664 uintptr_t start_addr, end_addr;
665 int msl_idx, seg_idx;
667 start_addr = (uintptr_t) msl->base_va;
668 end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
670 if ((uintptr_t)wa->ms->addr < start_addr ||
671 (uintptr_t)wa->ms->addr >= end_addr)
674 msl_idx = msl - mcfg->memsegs;
675 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
678 found_msl = &mcfg->memsegs[msl_idx];
680 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
681 if (free_seg(wa->ms, wa->hi, msl_idx, seg_idx))
688 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
689 int socket, bool exact)
692 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
693 bool have_numa = false;
695 struct bitmask *oldmask;
697 struct alloc_walk_param wa;
698 struct hugepage_info *hi = NULL;
700 memset(&wa, 0, sizeof(wa));
702 /* dynamic allocation not supported in legacy mode */
703 if (internal_config.legacy_mem)
706 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
708 internal_config.hugepage_info[i].hugepage_sz) {
709 hi = &internal_config.hugepage_info[i];
714 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
719 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
721 oldmask = numa_allocate_nodemask();
722 prepare_numa(&oldpolicy, oldmask, socket);
731 wa.page_sz = page_sz;
733 wa.segs_allocated = 0;
735 ret = rte_memseg_list_walk(alloc_seg_walk, &wa);
737 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
740 } else if (ret > 0) {
741 ret = (int)wa.segs_allocated;
744 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
746 resotre_numa(&oldpolicy, oldmask);
752 eal_memalloc_alloc_seg(size_t page_sz, int socket)
754 struct rte_memseg *ms;
755 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
757 /* return pointer to newly allocated memseg */
762 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
766 /* dynamic free not supported in legacy mode */
767 if (internal_config.legacy_mem)
770 for (seg = 0; seg < n_segs; seg++) {
771 struct rte_memseg *cur = ms[seg];
772 struct hugepage_info *hi = NULL;
773 struct free_walk_param wa;
776 memset(&wa, 0, sizeof(wa));
778 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
780 hi = &internal_config.hugepage_info[i];
781 if (cur->hugepage_sz == hi->hugepage_sz)
784 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
785 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
793 walk_res = rte_memseg_list_walk(free_seg_walk, &wa);
797 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
804 eal_memalloc_free_seg(struct rte_memseg *ms)
806 /* dynamic free not supported in legacy mode */
807 if (internal_config.legacy_mem)
810 return eal_memalloc_free_seg_bulk(&ms, 1);