1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017-2018 Intel Corporation
5 #define _FILE_OFFSET_BITS 64
15 #include <sys/types.h>
17 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
30 #include <linux/falloc.h>
32 #include <rte_common.h>
34 #include <rte_eal_memconfig.h>
36 #include <rte_memory.h>
37 #include <rte_spinlock.h>
39 #include "eal_filesystem.h"
40 #include "eal_internal_cfg.h"
41 #include "eal_memalloc.h"
44 * not all kernel version support fallocate on hugetlbfs, so fall back to
45 * ftruncate and disallow deallocation if fallocate is not supported.
47 static int fallocate_supported = -1; /* unknown */
50 * If each page is in a separate file, we can close fd's since we need each fd
51 * only once. However, in single file segments mode, we can get away with using
52 * a single fd for entire segments, but we need to store them somewhere. Each
53 * fd is different within each process, so we'll store them in a local tailq.
56 TAILQ_ENTRY(msl_entry) next;
61 /** Double linked list of memseg list fd's. */
62 TAILQ_HEAD(msl_entry_list, msl_entry);
64 static struct msl_entry_list msl_entry_list =
65 TAILQ_HEAD_INITIALIZER(msl_entry_list);
66 static rte_spinlock_t tailq_lock = RTE_SPINLOCK_INITIALIZER;
68 /** local copy of a memory map, used to synchronize memory hotplug in MP */
69 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
71 static sigjmp_buf huge_jmpenv;
73 static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
75 siglongjmp(huge_jmpenv, 1);
78 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
79 * non-static local variable in the stack frame calling sigsetjmp might be
80 * clobbered by a call to longjmp.
82 static int __rte_unused huge_wrap_sigsetjmp(void)
84 return sigsetjmp(huge_jmpenv, 1);
87 static struct sigaction huge_action_old;
88 static int huge_need_recover;
90 static void __rte_unused
91 huge_register_sigbus(void)
94 struct sigaction action;
97 sigaddset(&mask, SIGBUS);
99 action.sa_mask = mask;
100 action.sa_handler = huge_sigbus_handler;
102 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
105 static void __rte_unused
106 huge_recover_sigbus(void)
108 if (huge_need_recover) {
109 sigaction(SIGBUS, &huge_action_old, NULL);
110 huge_need_recover = 0;
114 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
119 /* Check if kernel supports NUMA. */
120 if (numa_available() != 0) {
121 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
128 prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
130 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
131 if (get_mempolicy(oldpolicy, oldmask->maskp,
132 oldmask->size + 1, 0, 0) < 0) {
134 "Failed to get current mempolicy: %s. "
135 "Assuming MPOL_DEFAULT.\n", strerror(errno));
136 oldpolicy = MPOL_DEFAULT;
139 "Setting policy MPOL_PREFERRED for socket %d\n",
141 numa_set_preferred(socket_id);
145 resotre_numa(int *oldpolicy, struct bitmask *oldmask)
148 "Restoring previous memory policy: %d\n", *oldpolicy);
149 if (oldpolicy == MPOL_DEFAULT) {
150 numa_set_localalloc();
151 } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
152 oldmask->size + 1) < 0) {
153 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
155 numa_set_localalloc();
157 numa_free_cpumask(oldmask);
161 static struct msl_entry *
162 get_msl_entry_by_idx(unsigned int list_idx)
164 struct msl_entry *te;
166 rte_spinlock_lock(&tailq_lock);
168 TAILQ_FOREACH(te, &msl_entry_list, next) {
169 if (te->msl_idx == list_idx)
173 /* doesn't exist, so create it and set fd to -1 */
175 te = malloc(sizeof(*te));
177 RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
181 te->msl_idx = list_idx;
183 TAILQ_INSERT_TAIL(&msl_entry_list, te, next);
186 rte_spinlock_unlock(&tailq_lock);
191 * uses fstat to report the size of a file on disk
194 get_file_size(int fd)
197 if (fstat(fd, &st) < 0)
203 * uses fstat to check if file size on disk is zero (regular fstat won't show
204 * true file size due to how fallocate works)
207 is_zero_length(int fd)
210 if (fstat(fd, &st) < 0)
212 return st.st_blocks == 0;
216 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
217 unsigned int list_idx, unsigned int seg_idx)
221 if (internal_config.single_file_segments) {
223 * try to find a tailq entry, for this memseg list, or create
224 * one if it doesn't exist.
226 struct msl_entry *te = get_msl_entry_by_idx(list_idx);
228 RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
231 } else if (te->fd < 0) {
232 /* create a hugepage file */
233 eal_get_hugefile_path(path, buflen, hi->hugedir,
235 fd = open(path, O_CREAT | O_RDWR, 0600);
237 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
238 __func__, strerror(errno));
246 /* one file per page, just create it */
247 eal_get_hugefile_path(path, buflen, hi->hugedir,
248 list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
249 fd = open(path, O_CREAT | O_RDWR, 0600);
251 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
259 /* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
260 static int lock(int fd, uint64_t offset, uint64_t len, int type)
265 memset(&lck, 0, sizeof(lck));
268 lck.l_whence = SEEK_SET;
269 lck.l_start = offset;
272 ret = fcntl(fd, F_SETLK, &lck);
274 if (ret && (errno == EAGAIN || errno == EACCES)) {
275 /* locked by another process, not an error */
278 RTE_LOG(ERR, EAL, "%s(): error calling fcntl(): %s\n",
279 __func__, strerror(errno));
280 /* we've encountered an unexpected error */
287 resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
292 if (fallocate_supported == 0) {
293 /* we cannot deallocate memory if fallocate() is not
294 * supported, but locks are still needed to prevent
295 * primary process' initialization from clearing out
296 * huge pages used by this process.
300 RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
304 uint64_t new_size = fa_offset + page_sz;
305 uint64_t cur_size = get_file_size(fd);
307 /* fallocate isn't supported, fall back to ftruncate */
308 if (new_size > cur_size &&
309 ftruncate(fd, new_size) < 0) {
310 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
311 __func__, strerror(errno));
314 /* not being able to take out a read lock is an error */
315 if (lock(fd, fa_offset, page_sz, F_RDLCK) != 1)
318 int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
322 /* if fallocate() is supported, we need to take out a
323 * read lock on allocate (to prevent other processes
324 * from deallocating this page), and take out a write
325 * lock on deallocate (to ensure nobody else is using
328 * we can't use flock() for this, as we actually need to
329 * lock part of the file, not the entire file.
333 ret = lock(fd, fa_offset, page_sz, F_WRLCK);
338 /* failed to lock, not an error */
341 if (fallocate(fd, flags, fa_offset, page_sz) < 0) {
342 if (fallocate_supported == -1 &&
344 RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
347 fallocate_supported = 0;
349 RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
355 fallocate_supported = 1;
358 /* if can't read lock, it's an error */
359 if (lock(fd, fa_offset, page_sz,
363 /* if can't unlock, it's an error */
364 if (lock(fd, fa_offset, page_sz,
375 alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
376 struct hugepage_info *hi, unsigned int list_idx,
377 unsigned int seg_idx)
379 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
380 int cur_socket_id = 0;
388 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
392 alloc_sz = hi->hugepage_sz;
393 if (internal_config.single_file_segments) {
394 map_offset = seg_idx * alloc_sz;
395 ret = resize_hugefile(fd, map_offset, alloc_sz, true);
400 if (ftruncate(fd, alloc_sz) < 0) {
401 RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
402 __func__, strerror(errno));
405 /* we've allocated a page - take out a read lock. we're using
406 * fcntl() locks rather than flock() here because doing that
407 * gives us one huge advantage - fcntl() locks are per-process,
408 * not per-file descriptor, which means that we don't have to
409 * keep the original fd's around to keep a lock on the file.
411 * this is useful, because when it comes to unmapping pages, we
412 * will have to take out a write lock (to figure out if another
413 * process still has this page mapped), and to do itwith flock()
414 * we'll have to use original fd, as lock is associated with
415 * that particular fd. with fcntl(), this is not necessary - we
416 * can open a new fd and use fcntl() on that.
418 ret = lock(fd, map_offset, alloc_sz, F_RDLCK);
420 /* this should not fail */
422 RTE_LOG(ERR, EAL, "%s(): error locking file: %s\n",
430 * map the segment, and populate page tables, the kernel fills this
431 * segment with zeros if it's a new page.
433 void *va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
434 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);
435 /* for non-single file segments, we can close fd here */
436 if (!internal_config.single_file_segments)
439 if (va == MAP_FAILED) {
440 RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
445 RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
449 rte_iova_t iova = rte_mem_virt2iova(addr);
450 if (iova == RTE_BAD_PHYS_ADDR) {
451 RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
456 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
457 move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
459 if (cur_socket_id != socket_id) {
461 "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
462 __func__, socket_id, cur_socket_id);
467 /* In linux, hugetlb limitations, like cgroup, are
468 * enforced at fault time instead of mmap(), even
469 * with the option of MAP_POPULATE. Kernel will send
470 * a SIGBUS signal. To avoid to be killed, save stack
471 * environment here, if SIGBUS happens, we can jump
474 if (huge_wrap_sigsetjmp()) {
475 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
476 (unsigned int)(alloc_sz >> 20));
479 *(int *)addr = *(int *)addr;
482 ms->hugepage_sz = alloc_sz;
484 ms->nchannel = rte_memory_get_nchannel();
485 ms->nrank = rte_memory_get_nrank();
487 ms->socket_id = socket_id;
492 munmap(addr, alloc_sz);
494 if (internal_config.single_file_segments) {
495 resize_hugefile(fd, map_offset, alloc_sz, false);
496 if (is_zero_length(fd)) {
497 struct msl_entry *te = get_msl_entry_by_idx(list_idx);
498 if (te != NULL && te->fd >= 0) {
502 /* ignore errors, can't make it any worse */
513 free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
514 unsigned int list_idx, unsigned int seg_idx)
520 /* erase page data */
521 memset(ms->addr, 0, ms->len);
523 if (mmap(ms->addr, ms->len, PROT_READ,
524 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
526 RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
530 fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
534 if (internal_config.single_file_segments) {
535 map_offset = seg_idx * ms->len;
536 if (resize_hugefile(fd, map_offset, ms->len, false))
538 /* if file is zero-length, we've already shrunk it, so it's
541 if (is_zero_length(fd)) {
542 struct msl_entry *te = get_msl_entry_by_idx(list_idx);
543 if (te != NULL && te->fd >= 0) {
551 /* if we're able to take out a write lock, we're the last one
552 * holding onto this page.
555 ret = lock(fd, 0, ms->len, F_WRLCK);
557 /* no one else is using this page */
560 ret = lock(fd, 0, ms->len, F_UNLCK);
562 RTE_LOG(ERR, EAL, "%s(): unable to unlock file %s\n",
568 memset(ms, 0, sizeof(*ms));
573 struct alloc_walk_param {
574 struct hugepage_info *hi;
575 struct rte_memseg **ms;
577 unsigned int segs_allocated;
583 alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
585 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
586 struct alloc_walk_param *wa = arg;
587 struct rte_memseg_list *cur_msl;
589 int cur_idx, start_idx, j;
590 unsigned int msl_idx, need, i;
592 if (msl->page_sz != wa->page_sz)
594 if (msl->socket_id != wa->socket)
597 page_sz = (size_t)msl->page_sz;
599 msl_idx = msl - mcfg->memsegs;
600 cur_msl = &mcfg->memsegs[msl_idx];
604 /* try finding space in memseg list */
605 cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
610 for (i = 0; i < need; i++, cur_idx++) {
611 struct rte_memseg *cur;
614 cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
615 map_addr = RTE_PTR_ADD(cur_msl->base_va,
618 if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
620 RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
623 /* if exact number wasn't requested, stop */
628 for (j = start_idx; j < cur_idx; j++) {
629 struct rte_memseg *tmp;
630 struct rte_fbarray *arr =
631 &cur_msl->memseg_arr;
633 tmp = rte_fbarray_get(arr, j);
634 if (free_seg(tmp, wa->hi, msl_idx,
636 RTE_LOG(ERR, EAL, "Cannot free page\n");
640 rte_fbarray_set_free(arr, j);
644 memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
650 rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
653 wa->segs_allocated = i;
659 struct free_walk_param {
660 struct hugepage_info *hi;
661 struct rte_memseg *ms;
664 free_seg_walk(const struct rte_memseg_list *msl, void *arg)
666 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
667 struct rte_memseg_list *found_msl;
668 struct free_walk_param *wa = arg;
669 uintptr_t start_addr, end_addr;
670 int msl_idx, seg_idx;
672 start_addr = (uintptr_t) msl->base_va;
673 end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
675 if ((uintptr_t)wa->ms->addr < start_addr ||
676 (uintptr_t)wa->ms->addr >= end_addr)
679 msl_idx = msl - mcfg->memsegs;
680 seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
683 found_msl = &mcfg->memsegs[msl_idx];
685 found_msl->version++;
687 rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
689 if (free_seg(wa->ms, wa->hi, msl_idx, seg_idx))
696 eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
697 int socket, bool exact)
700 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
701 bool have_numa = false;
703 struct bitmask *oldmask;
705 struct alloc_walk_param wa;
706 struct hugepage_info *hi = NULL;
708 memset(&wa, 0, sizeof(wa));
710 /* dynamic allocation not supported in legacy mode */
711 if (internal_config.legacy_mem)
714 for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
716 internal_config.hugepage_info[i].hugepage_sz) {
717 hi = &internal_config.hugepage_info[i];
722 RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
727 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
729 oldmask = numa_allocate_nodemask();
730 prepare_numa(&oldpolicy, oldmask, socket);
739 wa.page_sz = page_sz;
741 wa.segs_allocated = 0;
743 ret = rte_memseg_list_walk(alloc_seg_walk, &wa);
745 RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
748 } else if (ret > 0) {
749 ret = (int)wa.segs_allocated;
752 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
754 resotre_numa(&oldpolicy, oldmask);
760 eal_memalloc_alloc_seg(size_t page_sz, int socket)
762 struct rte_memseg *ms;
763 if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
765 /* return pointer to newly allocated memseg */
770 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
774 /* dynamic free not supported in legacy mode */
775 if (internal_config.legacy_mem)
778 for (seg = 0; seg < n_segs; seg++) {
779 struct rte_memseg *cur = ms[seg];
780 struct hugepage_info *hi = NULL;
781 struct free_walk_param wa;
784 memset(&wa, 0, sizeof(wa));
786 for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
788 hi = &internal_config.hugepage_info[i];
789 if (cur->hugepage_sz == hi->hugepage_sz)
792 if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
793 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
801 walk_res = rte_memseg_list_walk(free_seg_walk, &wa);
805 RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
812 eal_memalloc_free_seg(struct rte_memseg *ms)
814 /* dynamic free not supported in legacy mode */
815 if (internal_config.legacy_mem)
818 return eal_memalloc_free_seg_bulk(&ms, 1);
822 sync_chunk(struct rte_memseg_list *primary_msl,
823 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
824 unsigned int msl_idx, bool used, int start, int end)
826 struct rte_fbarray *l_arr, *p_arr;
827 int i, ret, chunk_len, diff_len;
829 l_arr = &local_msl->memseg_arr;
830 p_arr = &primary_msl->memseg_arr;
832 /* we need to aggregate allocations/deallocations into bigger chunks,
833 * as we don't want to spam the user with per-page callbacks.
835 * to avoid any potential issues, we also want to trigger
836 * deallocation callbacks *before* we actually deallocate
837 * memory, so that the user application could wrap up its use
838 * before it goes away.
841 chunk_len = end - start;
843 /* find how many contiguous pages we can map/unmap for this chunk */
845 rte_fbarray_find_contig_free(l_arr, start) :
846 rte_fbarray_find_contig_used(l_arr, start);
848 /* has to be at least one page */
852 diff_len = RTE_MIN(chunk_len, diff_len);
854 for (i = 0; i < diff_len; i++) {
855 struct rte_memseg *p_ms, *l_ms;
856 int seg_idx = start + i;
858 l_ms = rte_fbarray_get(l_arr, seg_idx);
859 p_ms = rte_fbarray_get(p_arr, seg_idx);
861 if (l_ms == NULL || p_ms == NULL)
865 ret = alloc_seg(l_ms, p_ms->addr,
870 rte_fbarray_set_used(l_arr, seg_idx);
872 ret = free_seg(l_ms, hi, msl_idx, seg_idx);
873 rte_fbarray_set_free(l_arr, seg_idx);
879 /* calculate how much we can advance until next chunk */
881 rte_fbarray_find_contig_used(l_arr, start) :
882 rte_fbarray_find_contig_free(l_arr, start);
883 ret = RTE_MIN(chunk_len, diff_len);
889 sync_status(struct rte_memseg_list *primary_msl,
890 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
891 unsigned int msl_idx, bool used)
893 struct rte_fbarray *l_arr, *p_arr;
894 int p_idx, l_chunk_len, p_chunk_len, ret;
897 /* this is a little bit tricky, but the basic idea is - walk both lists
898 * and spot any places where there are discrepancies. walking both lists
899 * and noting discrepancies in a single go is a hard problem, so we do
900 * it in two passes - first we spot any places where allocated segments
901 * mismatch (i.e. ensure that everything that's allocated in the primary
902 * is also allocated in the secondary), and then we do it by looking at
903 * free segments instead.
905 * we also need to aggregate changes into chunks, as we have to call
906 * callbacks per allocation, not per page.
908 l_arr = &local_msl->memseg_arr;
909 p_arr = &primary_msl->memseg_arr;
912 p_idx = rte_fbarray_find_next_used(p_arr, 0);
914 p_idx = rte_fbarray_find_next_free(p_arr, 0);
917 int next_chunk_search_idx;
920 p_chunk_len = rte_fbarray_find_contig_used(p_arr,
922 l_chunk_len = rte_fbarray_find_contig_used(l_arr,
925 p_chunk_len = rte_fbarray_find_contig_free(p_arr,
927 l_chunk_len = rte_fbarray_find_contig_free(l_arr,
930 /* best case scenario - no differences (or bigger, which will be
931 * fixed during next iteration), look for next chunk
933 if (l_chunk_len >= p_chunk_len) {
934 next_chunk_search_idx = p_idx + p_chunk_len;
938 /* if both chunks start at the same point, skip parts we know
939 * are identical, and sync the rest. each call to sync_chunk
940 * will only sync contiguous segments, so we need to call this
941 * until we are sure there are no more differences in this
944 start = p_idx + l_chunk_len;
945 end = p_idx + p_chunk_len;
947 ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
950 } while (start < end && ret >= 0);
951 /* if ret is negative, something went wrong */
955 next_chunk_search_idx = p_idx + p_chunk_len;
957 /* skip to end of this chunk */
959 p_idx = rte_fbarray_find_next_used(p_arr,
960 next_chunk_search_idx);
962 p_idx = rte_fbarray_find_next_free(p_arr,
963 next_chunk_search_idx);
970 sync_existing(struct rte_memseg_list *primary_msl,
971 struct rte_memseg_list *local_msl, struct hugepage_info *hi,
972 unsigned int msl_idx)
976 /* ensure all allocated space is the same in both lists */
977 ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
981 /* ensure all unallocated space is the same in both lists */
982 ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
986 /* update version number */
987 local_msl->version = primary_msl->version;
993 sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
995 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
996 struct rte_memseg_list *primary_msl, *local_msl;
997 struct hugepage_info *hi = NULL;
1000 bool new_msl = false;
1002 msl_idx = msl - mcfg->memsegs;
1003 primary_msl = &mcfg->memsegs[msl_idx];
1004 local_msl = &local_memsegs[msl_idx];
1006 /* check if secondary has this memseg list set up */
1007 if (local_msl->base_va == NULL) {
1008 char name[PATH_MAX];
1012 /* create distinct fbarrays for each secondary */
1013 snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
1014 primary_msl->memseg_arr.name, getpid());
1016 ret = rte_fbarray_init(&local_msl->memseg_arr, name,
1017 primary_msl->memseg_arr.len,
1018 primary_msl->memseg_arr.elt_sz);
1020 RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
1024 local_msl->base_va = primary_msl->base_va;
1027 for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
1029 internal_config.hugepage_info[i].hugepage_sz;
1030 uint64_t msl_sz = primary_msl->page_sz;
1031 if (msl_sz == cur_sz) {
1032 hi = &internal_config.hugepage_info[i];
1037 RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
1041 /* if versions don't match or if we have just allocated a new
1042 * memseg list, synchronize everything
1044 if ((new_msl || local_msl->version != primary_msl->version) &&
1045 sync_existing(primary_msl, local_msl, hi, msl_idx))
1052 eal_memalloc_sync_with_primary(void)
1054 /* nothing to be done in primary */
1055 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1058 if (rte_memseg_list_walk(sync_walk, NULL))