1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #define _FILE_OFFSET_BITS 64
15 #include <sys/types.h>
17 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
31 #include <rte_common.h>
33 #include <rte_eal_memconfig.h>
35 #include <rte_memory.h>
36 #include <rte_spinlock.h>
38 #include "eal_filesystem.h"
39 #include "eal_internal_cfg.h"
40 #include "eal_memalloc.h"
42 static sigjmp_buf huge_jmpenv;
44 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
46 siglongjmp(huge_jmpenv, 1);
49 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
50 * non-static local variable in the stack frame calling sigsetjmp might be
51 * clobbered by a call to longjmp.
53 static int __rte_unused huge_wrap_sigsetjmp(void)
55 return sigsetjmp(huge_jmpenv, 1);
58 static struct sigaction huge_action_old;
59 static int huge_need_recover;
61 static void __rte_unused
62 huge_register_sigbus(void)
65 struct sigaction action;
68 sigaddset(&mask, SIGBUS);
70 action.sa_mask = mask;
71 action.sa_handler = huge_sigbus_handler;
73 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
76 static void __rte_unused
77 huge_recover_sigbus(void)
79 if (huge_need_recover) {
80 sigaction(SIGBUS, &huge_action_old, NULL);
81 huge_need_recover = 0;
85 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
90 /* Check if kernel supports NUMA. */
91 if (numa_available() != 0) {
92 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
99 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
101 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
102 if (get_mempolicy(oldpolicy, oldmask->maskp,
103 oldmask->size + 1, 0, 0) < 0) {
105 "Failed to get current mempolicy: %s. "
106 "Assuming MPOL_DEFAULT.\n", strerror(errno));
107 oldpolicy = MPOL_DEFAULT;
110 "Setting policy MPOL_PREFERRED for socket %d\n",
112 numa_set_preferred(socket_id);
116 resotre_numa(int *oldpolicy, struct bitmask *oldmask)
119 "Restoring previous memory policy: %d\n", *oldpolicy);
120 if (oldpolicy == MPOL_DEFAULT) {
121 numa_set_localalloc();
122 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
123 oldmask->size + 1) < 0) {
124 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
126 numa_set_localalloc();
128 numa_free_cpumask(oldmask);
133 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
134 unsigned int list_idx, unsigned int seg_idx)
137 eal_get_hugefile_path(path, buflen, hi->hugedir,
138 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
139 fd = open(path, O_CREAT | O_RDWR, 0600);
141 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
148 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
149 static int lock(int fd, uint64_t offset, uint64_t len, int type)
154 memset(&lck, 0, sizeof(lck));
157 lck.l_whence = SEEK_SET;
158 lck.l_start = offset;
161 ret = fcntl(fd, F_SETLK, &lck);
163 if (ret && (errno == EAGAIN || errno == EACCES)) {
164 /* locked by another process, not an error */
167 RTE_LOG(ERR, EAL, "%s(): error calling fcntl(): %s\n",
168 __func__, strerror(errno));
169 /* we've encountered an unexpected error */
176 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
177 struct hugepage_info *hi, unsigned int list_idx,
178 unsigned int seg_idx)
180 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
181 int cur_socket_id = 0;
189 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
193 alloc_sz = hi->hugepage_sz;
196 if (ftruncate(fd, alloc_sz) < 0) {
197 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
198 __func__, strerror(errno));
201 /* we've allocated a page - take out a read lock. we're using fcntl()
202 * locks rather than flock() here because doing that gives us one huge
203 * advantage - fcntl() locks are per-process, not per-file descriptor,
204 * which means that we don't have to keep the original fd's around to
205 * keep a lock on the file.
207 * this is useful, because when it comes to unmapping pages, we will
208 * have to take out a write lock (to figure out if another process still
209 * has this page mapped), and to do itwith flock() we'll have to use
210 * original fd, as lock is associated with that particular fd. with
211 * fcntl(), this is not necessary - we can open a new fd and use fcntl()
214 ret = lock(fd, map_offset, alloc_sz, F_RDLCK);
216 /* this should not fail */
218 RTE_LOG(ERR, EAL, "%s(): error locking file: %s\n",
225 * map the segment, and populate page tables, the kernel fills this
226 * segment with zeros if it's a new page.
228 void *va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
229 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);
232 if (va == MAP_FAILED) {
233 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
238 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
242 rte_iova_t iova = rte_mem_virt2iova(addr);
243 if (iova == RTE_BAD_PHYS_ADDR) {
244 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
249 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
250 move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
252 if (cur_socket_id != socket_id) {
254 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
255 __func__, socket_id, cur_socket_id);
260 /* In linux, hugetlb limitations, like cgroup, are
261 * enforced at fault time instead of mmap(), even
262 * with the option of MAP_POPULATE. Kernel will send
263 * a SIGBUS signal. To avoid to be killed, save stack
264 * environment here, if SIGBUS happens, we can jump
267 if (huge_wrap_sigsetjmp()) {
268 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
269 (unsigned int)(alloc_sz >> 20));
272 *(int *)addr = *(int *)addr;
275 ms->hugepage_sz = alloc_sz;
277 ms->nchannel = rte_memory_get_nchannel();
278 ms->nrank = rte_memory_get_nrank();
280 ms->socket_id = socket_id;
285 munmap(addr, alloc_sz);
293 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
294 unsigned int list_idx, unsigned int seg_idx)
299 /* erase page data */
300 memset(ms->addr, 0, ms->len);
302 if (mmap(ms->addr, ms->len, PROT_READ,
303 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
305 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
309 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
313 /* if we're able to take out a write lock, we're the last one
314 * holding onto this page.
317 ret = lock(fd, 0, ms->len, F_WRLCK);
319 /* no one else is using this page */
322 ret = lock(fd, 0, ms->len, F_UNLCK);
324 RTE_LOG(ERR, EAL, "%s(): unable to unlock file %s\n",
329 memset(ms, 0, sizeof(*ms));
334 struct alloc_walk_param {
335 struct hugepage_info *hi;
336 struct rte_memseg **ms;
338 unsigned int segs_allocated;
344 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
346 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
347 struct alloc_walk_param *wa = arg;
348 struct rte_memseg_list *cur_msl;
350 int cur_idx, start_idx, j;
351 unsigned int msl_idx, need, i;
353 if (msl->page_sz != wa->page_sz)
355 if (msl->socket_id != wa->socket)
358 page_sz = (size_t)msl->page_sz;
360 msl_idx = msl - mcfg->memsegs;
361 cur_msl = &mcfg->memsegs[msl_idx];
365 /* try finding space in memseg list */
366 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
371 for (i = 0; i < need; i++, cur_idx++) {
372 struct rte_memseg *cur;
375 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
376 map_addr = RTE_PTR_ADD(cur_msl->base_va,
379 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
381 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
384 /* if exact number wasn't requested, stop */
389 for (j = start_idx; j < cur_idx; j++) {
390 struct rte_memseg *tmp;
391 struct rte_fbarray *arr =
392 &cur_msl->memseg_arr;
394 tmp = rte_fbarray_get(arr, j);
395 if (free_seg(tmp, wa->hi, msl_idx,
397 RTE_LOG(ERR, EAL, "Cannot free page\n");
401 rte_fbarray_set_free(arr, j);
405 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
411 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
414 wa->segs_allocated = i;
418 struct free_walk_param {
419 struct hugepage_info *hi;
420 struct rte_memseg *ms;
423 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
425 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
426 struct rte_memseg_list *found_msl;
427 struct free_walk_param *wa = arg;
428 uintptr_t start_addr, end_addr;
429 int msl_idx, seg_idx;
431 start_addr = (uintptr_t) msl->base_va;
432 end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
434 if ((uintptr_t)wa->ms->addr < start_addr ||
435 (uintptr_t)wa->ms->addr >= end_addr)
438 msl_idx = msl - mcfg->memsegs;
439 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
442 found_msl = &mcfg->memsegs[msl_idx];
444 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
445 if (free_seg(wa->ms, wa->hi, msl_idx, seg_idx))
452 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
453 int socket, bool exact)
456 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
457 bool have_numa = false;
459 struct bitmask *oldmask;
461 struct alloc_walk_param wa;
462 struct hugepage_info *hi = NULL;
464 memset(&wa, 0, sizeof(wa));
466 /* dynamic allocation not supported in legacy mode */
467 if (internal_config.legacy_mem)
470 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
472 internal_config.hugepage_info[i].hugepage_sz) {
473 hi = &internal_config.hugepage_info[i];
478 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
483 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
485 oldmask = numa_allocate_nodemask();
486 prepare_numa(&oldpolicy, oldmask, socket);
495 wa.page_sz = page_sz;
497 wa.segs_allocated = 0;
499 ret = rte_memseg_list_walk(alloc_seg_walk, &wa);
501 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
504 } else if (ret > 0) {
505 ret = (int)wa.segs_allocated;
508 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
510 resotre_numa(&oldpolicy, oldmask);
516 eal_memalloc_alloc_seg(size_t page_sz, int socket)
518 struct rte_memseg *ms;
519 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
521 /* return pointer to newly allocated memseg */
526 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
530 /* dynamic free not supported in legacy mode */
531 if (internal_config.legacy_mem)
534 for (seg = 0; seg < n_segs; seg++) {
535 struct rte_memseg *cur = ms[seg];
536 struct hugepage_info *hi = NULL;
537 struct free_walk_param wa;
540 memset(&wa, 0, sizeof(wa));
542 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
544 hi = &internal_config.hugepage_info[i];
545 if (cur->hugepage_sz == hi->hugepage_sz)
548 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
549 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
557 walk_res = rte_memseg_list_walk(free_seg_walk, &wa);
561 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
568 eal_memalloc_free_seg(struct rte_memseg *ms)
570 /* dynamic free not supported in legacy mode */
571 if (internal_config.legacy_mem)
574 return eal_memalloc_free_seg_bulk(&ms, 1);