1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
16 #include <rte_fbarray.h>
17 #include <rte_memory.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_errno.h>
23 #include "eal_memalloc.h"
24 #include "eal_private.h"
25 #include "eal_internal_cfg.h"
28 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
29 * pointer to the mmap'd area and keep *size unmodified. Else, retry
30 * with a smaller zone: decrease *size by hugepage_sz until it reaches
31 * 0. In this case, return NULL. Note: this function returns an address
32 * which is a multiple of hugepage size.
35 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
37 static uint64_t baseaddr_offset;
38 static uint64_t system_page_sz;
41 eal_get_virtual_area(void *requested_addr, size_t *size,
42 size_t page_sz, int flags, int mmap_flags)
44 bool addr_is_hint, allow_shrink, unmap, no_align;
46 void *mapped_addr, *aligned_addr;
48 if (system_page_sz == 0)
49 system_page_sz = sysconf(_SC_PAGESIZE);
51 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
53 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
55 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
56 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
57 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
59 if (requested_addr == NULL && internal_config.base_virtaddr != 0) {
60 requested_addr = (void *) (internal_config.base_virtaddr +
61 (size_t)baseaddr_offset);
62 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
66 /* if requested address is not aligned by page size, or if requested
67 * address is NULL, add page size to requested length as we may get an
68 * address that's aligned by system page size, which can be smaller than
69 * our requested page size. additionally, we shouldn't try to align if
70 * system page size is the same as requested page size.
72 no_align = (requested_addr != NULL &&
73 ((uintptr_t)requested_addr & (page_sz - 1)) == 0) ||
74 page_sz == system_page_sz;
77 map_sz = no_align ? *size : *size + page_sz;
79 mapped_addr = mmap(requested_addr, map_sz, PROT_READ,
81 if (mapped_addr == MAP_FAILED && allow_shrink)
83 } while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);
85 /* align resulting address - if map failed, we will ignore the value
86 * anyway, so no need to add additional checks.
88 aligned_addr = no_align ? mapped_addr :
89 RTE_PTR_ALIGN(mapped_addr, page_sz);
92 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
96 } else if (mapped_addr == MAP_FAILED) {
97 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
99 /* pass errno up the call chain */
102 } else if (requested_addr != NULL && !addr_is_hint &&
103 aligned_addr != requested_addr) {
104 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
105 requested_addr, aligned_addr);
106 munmap(mapped_addr, map_sz);
107 rte_errno = EADDRNOTAVAIL;
109 } else if (requested_addr != NULL && addr_is_hint &&
110 aligned_addr != requested_addr) {
111 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
112 requested_addr, aligned_addr);
113 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
117 munmap(mapped_addr, map_sz);
119 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
120 aligned_addr, *size);
122 baseaddr_offset += *size;
128 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
130 uint64_t area_sz, max_pages;
132 /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
133 max_pages = RTE_MAX_MEMSEG_PER_LIST;
134 max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
136 area_sz = RTE_MIN(page_sz * max_pages, max_mem);
138 /* make sure the list isn't smaller than the page size */
139 area_sz = RTE_MAX(area_sz, page_sz);
141 return RTE_ALIGN(area_sz, page_sz);
145 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
146 uint64_t max_mem, int socket_id, int type_msl_idx)
148 char name[RTE_FBARRAY_NAME_LEN];
152 mem_amount = get_mem_amount(page_sz, max_mem);
153 max_segs = mem_amount / page_sz;
155 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
157 if (rte_fbarray_init(&msl->memseg_arr, name, max_segs,
158 sizeof(struct rte_memseg))) {
159 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
160 rte_strerror(rte_errno));
164 msl->page_sz = page_sz;
165 msl->socket_id = socket_id;
168 RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
169 (size_t)page_sz >> 10, socket_id);
175 alloc_va_space(struct rte_memseg_list *msl)
182 #ifdef RTE_ARCH_PPC_64
183 flags |= MAP_HUGETLB;
186 page_sz = msl->page_sz;
187 mem_sz = page_sz * msl->memseg_arr.len;
189 addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
191 if (rte_errno == EADDRNOTAVAIL)
192 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
193 (unsigned long long)mem_sz, msl->base_va);
195 RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
203 static int __rte_unused
204 memseg_primary_init_32(void)
206 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
207 int active_sockets, hpi_idx, msl_idx = 0;
208 unsigned int socket_id, i;
209 struct rte_memseg_list *msl;
210 uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
213 /* no-huge does not need this at all */
214 if (internal_config.no_hugetlbfs)
217 /* this is a giant hack, but desperate times call for desperate
218 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
219 * because having upwards of 2 gigabytes of VA space already mapped will
220 * interfere with our ability to map and sort hugepages.
222 * therefore, in legacy 32-bit mode, we will be initializing memseg
223 * lists much later - in eal_memory.c, right after we unmap all the
224 * unneeded pages. this will not affect secondary processes, as those
225 * should be able to mmap the space without (too many) problems.
227 if (internal_config.legacy_mem)
230 /* 32-bit mode is a very special case. we cannot know in advance where
231 * the user will want to allocate their memory, so we have to do some
235 total_requested_mem = 0;
236 if (internal_config.force_sockets)
237 for (i = 0; i < rte_socket_count(); i++) {
240 socket_id = rte_socket_id_by_idx(i);
241 mem = internal_config.socket_mem[socket_id];
247 total_requested_mem += mem;
250 total_requested_mem = internal_config.memory;
252 max_mem = (uint64_t) RTE_MAX_MEM_MB_PER_TYPE << 20;
253 if (total_requested_mem > max_mem) {
254 RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
255 (unsigned int)(max_mem >> 20));
258 total_extra_mem = max_mem - total_requested_mem;
259 extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
260 total_extra_mem / active_sockets;
262 /* the allocation logic is a little bit convoluted, but here's how it
263 * works, in a nutshell:
264 * - if user hasn't specified on which sockets to allocate memory via
265 * --socket-mem, we allocate all of our memory on master core socket.
266 * - if user has specified sockets to allocate memory on, there may be
267 * some "unused" memory left (e.g. if user has specified --socket-mem
268 * such that not all memory adds up to 2 gigabytes), so add it to all
269 * sockets that are in use equally.
271 * page sizes are sorted by size in descending order, so we can safely
272 * assume that we dispense with bigger page sizes first.
275 /* create memseg lists */
276 for (i = 0; i < rte_socket_count(); i++) {
277 int hp_sizes = (int) internal_config.num_hugepage_sizes;
278 uint64_t max_socket_mem, cur_socket_mem;
279 unsigned int master_lcore_socket;
280 struct rte_config *cfg = rte_eal_get_configuration();
283 socket_id = rte_socket_id_by_idx(i);
285 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
290 /* if we didn't specifically request memory on this socket */
291 skip = active_sockets != 0 &&
292 internal_config.socket_mem[socket_id] == 0;
293 /* ...or if we didn't specifically request memory on *any*
294 * socket, and this is not master lcore
296 master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
297 skip |= active_sockets == 0 && socket_id != master_lcore_socket;
300 RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
305 /* max amount of memory on this socket */
306 max_socket_mem = (active_sockets != 0 ?
307 internal_config.socket_mem[socket_id] :
308 internal_config.memory) +
309 extra_mem_per_socket;
312 for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
313 uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
314 uint64_t hugepage_sz;
315 struct hugepage_info *hpi;
316 int type_msl_idx, max_segs, total_segs = 0;
318 hpi = &internal_config.hugepage_info[hpi_idx];
319 hugepage_sz = hpi->hugepage_sz;
321 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
322 max_pagesz_mem = max_socket_mem - cur_socket_mem;
324 /* make it multiple of page size */
325 max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
328 RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
329 "%" PRIu64 "M on socket %i\n",
330 max_pagesz_mem >> 20, socket_id);
333 while (cur_pagesz_mem < max_pagesz_mem &&
334 total_segs < max_segs) {
335 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
337 "No more space in memseg lists, please increase %s\n",
338 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
342 msl = &mcfg->memsegs[msl_idx++];
344 if (alloc_memseg_list(msl, hugepage_sz,
345 max_pagesz_mem, socket_id,
349 total_segs += msl->memseg_arr.len;
350 cur_pagesz_mem = total_segs * hugepage_sz;
353 if (alloc_va_space(msl)) {
354 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
358 cur_socket_mem += cur_pagesz_mem;
365 static int __rte_unused
366 memseg_primary_init(void)
368 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
369 int i, socket_id, hpi_idx, msl_idx = 0;
370 struct rte_memseg_list *msl;
371 uint64_t max_mem, total_mem;
373 /* no-huge does not need this at all */
374 if (internal_config.no_hugetlbfs)
377 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
380 /* create memseg lists */
381 for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
383 struct hugepage_info *hpi;
384 uint64_t hugepage_sz;
386 hpi = &internal_config.hugepage_info[hpi_idx];
387 hugepage_sz = hpi->hugepage_sz;
389 for (i = 0; i < (int) rte_socket_count(); i++) {
390 uint64_t max_type_mem, total_type_mem = 0;
391 int type_msl_idx, max_segs, total_segs = 0;
393 socket_id = rte_socket_id_by_idx(i);
395 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
400 max_type_mem = RTE_MIN(max_mem - total_mem,
401 (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
402 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
405 while (total_type_mem < max_type_mem &&
406 total_segs < max_segs) {
407 uint64_t cur_max_mem;
408 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
410 "No more space in memseg lists, please increase %s\n",
411 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
415 msl = &mcfg->memsegs[msl_idx++];
417 cur_max_mem = max_type_mem - total_type_mem;
418 if (alloc_memseg_list(msl, hugepage_sz,
419 cur_max_mem, socket_id,
423 total_segs += msl->memseg_arr.len;
424 total_type_mem = total_segs * hugepage_sz;
427 if (alloc_va_space(msl)) {
428 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
432 total_mem += total_type_mem;
439 memseg_secondary_init(void)
441 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
443 struct rte_memseg_list *msl;
445 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
447 msl = &mcfg->memsegs[msl_idx];
449 /* skip empty memseg lists */
450 if (msl->memseg_arr.len == 0)
453 if (rte_fbarray_attach(&msl->memseg_arr)) {
454 RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
458 /* preallocate VA space */
459 if (alloc_va_space(msl)) {
460 RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
468 static struct rte_memseg *
469 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
471 const struct rte_fbarray *arr;
475 /* a memseg list was specified, check if it's the right one */
476 start = msl->base_va;
477 end = RTE_PTR_ADD(start, (size_t)msl->page_sz * msl->memseg_arr.len);
479 if (addr < start || addr >= end)
482 /* now, calculate index */
483 arr = &msl->memseg_arr;
484 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
485 return rte_fbarray_get(arr, ms_idx);
488 static struct rte_memseg_list *
489 virt2memseg_list(const void *addr)
491 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
492 struct rte_memseg_list *msl;
495 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
497 msl = &mcfg->memsegs[msl_idx];
499 start = msl->base_va;
500 end = RTE_PTR_ADD(start,
501 (size_t)msl->page_sz * msl->memseg_arr.len);
502 if (addr >= start && addr < end)
505 /* if we didn't find our memseg list */
506 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
511 __rte_experimental struct rte_memseg_list *
512 rte_mem_virt2memseg_list(const void *addr)
514 return virt2memseg_list(addr);
522 find_virt(const struct rte_memseg_list *msl __rte_unused,
523 const struct rte_memseg *ms, void *arg)
525 struct virtiova *vi = arg;
526 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
527 size_t offset = vi->iova - ms->iova;
528 vi->virt = RTE_PTR_ADD(ms->addr, offset);
535 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
536 const struct rte_memseg *ms, size_t len, void *arg)
538 struct virtiova *vi = arg;
539 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
540 size_t offset = vi->iova - ms->iova;
541 vi->virt = RTE_PTR_ADD(ms->addr, offset);
548 __rte_experimental void *
549 rte_mem_iova2virt(rte_iova_t iova)
553 memset(&vi, 0, sizeof(vi));
556 /* for legacy mem, we can get away with scanning VA-contiguous segments,
557 * as we know they are PA-contiguous as well
559 if (internal_config.legacy_mem)
560 rte_memseg_contig_walk(find_virt_legacy, &vi);
562 rte_memseg_walk(find_virt, &vi);
567 __rte_experimental struct rte_memseg *
568 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
570 return virt2memseg(addr, msl != NULL ? msl :
571 rte_mem_virt2memseg_list(addr));
575 physmem_size(const struct rte_memseg_list *msl, void *arg)
577 uint64_t *total_len = arg;
579 *total_len += msl->memseg_arr.count * msl->page_sz;
584 /* get the total size of memory */
586 rte_eal_get_physmem_size(void)
588 uint64_t total_len = 0;
590 rte_memseg_list_walk(physmem_size, &total_len);
596 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
599 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
603 msl_idx = msl - mcfg->memsegs;
604 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
607 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
611 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
612 "virt:%p, socket_id:%"PRId32", "
613 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
628 * Defining here because declared in rte_memory.h, but the actual implementation
629 * is in eal_common_memalloc.c, like all other memalloc internals.
631 int __rte_experimental
632 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb)
634 /* FreeBSD boots with legacy mem enabled by default */
635 if (internal_config.legacy_mem) {
636 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
640 return eal_memalloc_mem_event_callback_register(name, clb);
643 int __rte_experimental
644 rte_mem_event_callback_unregister(const char *name)
646 /* FreeBSD boots with legacy mem enabled by default */
647 if (internal_config.legacy_mem) {
648 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
652 return eal_memalloc_mem_event_callback_unregister(name);
655 int __rte_experimental
656 rte_mem_alloc_validator_register(const char *name,
657 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
659 /* FreeBSD boots with legacy mem enabled by default */
660 if (internal_config.legacy_mem) {
661 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
665 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
669 int __rte_experimental
670 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
672 /* FreeBSD boots with legacy mem enabled by default */
673 if (internal_config.legacy_mem) {
674 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
678 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
681 /* Dump the physical memory layout on console */
683 rte_dump_physmem_layout(FILE *f)
685 rte_memseg_walk(dump_memseg, f);
688 /* return the number of memory channels */
689 unsigned rte_memory_get_nchannel(void)
691 return rte_eal_get_configuration()->mem_config->nchannel;
694 /* return the number of memory rank */
695 unsigned rte_memory_get_nrank(void)
697 return rte_eal_get_configuration()->mem_config->nrank;
701 rte_eal_memdevice_init(void)
703 struct rte_config *config;
705 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
708 config = rte_eal_get_configuration();
709 config->mem_config->nchannel = internal_config.force_nchannel;
710 config->mem_config->nrank = internal_config.force_nrank;
715 /* Lock page in physical memory and prevent from swapping. */
717 rte_mem_lock_page(const void *virt)
719 unsigned long virtual = (unsigned long)virt;
720 int page_size = getpagesize();
721 unsigned long aligned = (virtual & ~(page_size - 1));
722 return mlock((void *)aligned, page_size);
725 int __rte_experimental
726 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
728 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
729 int i, ms_idx, ret = 0;
731 /* do not allow allocations/frees/init while we iterate */
732 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
734 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
735 struct rte_memseg_list *msl = &mcfg->memsegs[i];
736 const struct rte_memseg *ms;
737 struct rte_fbarray *arr;
739 if (msl->memseg_arr.count == 0)
742 arr = &msl->memseg_arr;
744 ms_idx = rte_fbarray_find_next_used(arr, 0);
745 while (ms_idx >= 0) {
749 ms = rte_fbarray_get(arr, ms_idx);
751 /* find how many more segments there are, starting with
754 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
755 len = n_segs * msl->page_sz;
757 ret = func(msl, ms, len, arg);
761 } else if (ret > 0) {
765 ms_idx = rte_fbarray_find_next_used(arr,
770 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
774 int __rte_experimental
775 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
777 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
778 int i, ms_idx, ret = 0;
780 /* do not allow allocations/frees/init while we iterate */
781 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
783 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
784 struct rte_memseg_list *msl = &mcfg->memsegs[i];
785 const struct rte_memseg *ms;
786 struct rte_fbarray *arr;
788 if (msl->memseg_arr.count == 0)
791 arr = &msl->memseg_arr;
793 ms_idx = rte_fbarray_find_next_used(arr, 0);
794 while (ms_idx >= 0) {
795 ms = rte_fbarray_get(arr, ms_idx);
796 ret = func(msl, ms, arg);
800 } else if (ret > 0) {
804 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
808 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
812 int __rte_experimental
813 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
815 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
818 /* do not allow allocations/frees/init while we iterate */
819 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
821 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
822 struct rte_memseg_list *msl = &mcfg->memsegs[i];
824 if (msl->base_va == NULL)
827 ret = func(msl, arg);
838 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
842 /* init memory subsystem */
844 rte_eal_memory_init(void)
846 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
848 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
853 /* lock mem hotplug here, to prevent races while we init */
854 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
856 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
858 memseg_primary_init_32() :
860 memseg_primary_init() :
862 memseg_secondary_init();
867 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
868 rte_eal_hugepage_init() :
869 rte_eal_hugepage_attach();
873 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
878 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);