1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
16 #include <rte_fbarray.h>
17 #include <rte_memory.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_errno.h>
23 #include "eal_memalloc.h"
24 #include "eal_private.h"
25 #include "eal_internal_cfg.h"
28 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
29 * pointer to the mmap'd area and keep *size unmodified. Else, retry
30 * with a smaller zone: decrease *size by hugepage_sz until it reaches
31 * 0. In this case, return NULL. Note: this function returns an address
32 * which is a multiple of hugepage size.
35 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
37 static uint64_t baseaddr_offset;
38 static uint64_t system_page_sz;
41 eal_get_virtual_area(void *requested_addr, size_t *size,
42 size_t page_sz, int flags, int mmap_flags)
44 bool addr_is_hint, allow_shrink, unmap, no_align;
46 void *mapped_addr, *aligned_addr;
48 if (system_page_sz == 0)
49 system_page_sz = sysconf(_SC_PAGESIZE);
51 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
53 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
55 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
56 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
57 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
59 if (requested_addr == NULL && internal_config.base_virtaddr != 0) {
60 requested_addr = (void *) (internal_config.base_virtaddr +
61 (size_t)baseaddr_offset);
62 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
66 /* if requested address is not aligned by page size, or if requested
67 * address is NULL, add page size to requested length as we may get an
68 * address that's aligned by system page size, which can be smaller than
69 * our requested page size. additionally, we shouldn't try to align if
70 * system page size is the same as requested page size.
72 no_align = (requested_addr != NULL &&
73 ((uintptr_t)requested_addr & (page_sz - 1)) == 0) ||
74 page_sz == system_page_sz;
77 map_sz = no_align ? *size : *size + page_sz;
78 if (map_sz > SIZE_MAX) {
79 RTE_LOG(ERR, EAL, "Map size too big\n");
84 mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
86 if (mapped_addr == MAP_FAILED && allow_shrink)
88 } while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);
90 /* align resulting address - if map failed, we will ignore the value
91 * anyway, so no need to add additional checks.
93 aligned_addr = no_align ? mapped_addr :
94 RTE_PTR_ALIGN(mapped_addr, page_sz);
97 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
101 } else if (mapped_addr == MAP_FAILED) {
102 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
104 /* pass errno up the call chain */
107 } else if (requested_addr != NULL && !addr_is_hint &&
108 aligned_addr != requested_addr) {
109 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
110 requested_addr, aligned_addr);
111 munmap(mapped_addr, map_sz);
112 rte_errno = EADDRNOTAVAIL;
114 } else if (requested_addr != NULL && addr_is_hint &&
115 aligned_addr != requested_addr) {
116 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
117 requested_addr, aligned_addr);
118 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
121 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
122 aligned_addr, *size);
125 munmap(mapped_addr, map_sz);
126 } else if (!no_align) {
127 void *map_end, *aligned_end;
128 size_t before_len, after_len;
130 /* when we reserve space with alignment, we add alignment to
131 * mapping size. On 32-bit, if 1GB alignment was requested, this
132 * would waste 1GB of address space, which is a luxury we cannot
133 * afford. so, if alignment was performed, check if any unneeded
134 * address space can be unmapped back.
137 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
138 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
140 /* unmap space before aligned mmap address */
141 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
143 munmap(mapped_addr, before_len);
145 /* unmap space after aligned end mmap address */
146 after_len = RTE_PTR_DIFF(map_end, aligned_end);
148 munmap(aligned_end, after_len);
151 baseaddr_offset += *size;
157 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
159 uint64_t area_sz, max_pages;
161 /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
162 max_pages = RTE_MAX_MEMSEG_PER_LIST;
163 max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
165 area_sz = RTE_MIN(page_sz * max_pages, max_mem);
167 /* make sure the list isn't smaller than the page size */
168 area_sz = RTE_MAX(area_sz, page_sz);
170 return RTE_ALIGN(area_sz, page_sz);
174 free_memseg_list(struct rte_memseg_list *msl)
176 if (rte_fbarray_destroy(&msl->memseg_arr)) {
177 RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
180 memset(msl, 0, sizeof(*msl));
185 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
186 uint64_t max_mem, int socket_id, int type_msl_idx)
188 char name[RTE_FBARRAY_NAME_LEN];
192 mem_amount = get_mem_amount(page_sz, max_mem);
193 max_segs = mem_amount / page_sz;
195 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
197 if (rte_fbarray_init(&msl->memseg_arr, name, max_segs,
198 sizeof(struct rte_memseg))) {
199 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
200 rte_strerror(rte_errno));
204 msl->page_sz = page_sz;
205 msl->socket_id = socket_id;
208 RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
209 (size_t)page_sz >> 10, socket_id);
215 alloc_va_space(struct rte_memseg_list *msl)
222 #ifdef RTE_ARCH_PPC_64
223 flags |= MAP_HUGETLB;
226 page_sz = msl->page_sz;
227 mem_sz = page_sz * msl->memseg_arr.len;
229 addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
231 if (rte_errno == EADDRNOTAVAIL)
232 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
233 (unsigned long long)mem_sz, msl->base_va);
235 RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
243 static int __rte_unused
244 memseg_primary_init_32(void)
246 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
247 int active_sockets, hpi_idx, msl_idx = 0;
248 unsigned int socket_id, i;
249 struct rte_memseg_list *msl;
250 uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
253 /* no-huge does not need this at all */
254 if (internal_config.no_hugetlbfs)
257 /* this is a giant hack, but desperate times call for desperate
258 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
259 * because having upwards of 2 gigabytes of VA space already mapped will
260 * interfere with our ability to map and sort hugepages.
262 * therefore, in legacy 32-bit mode, we will be initializing memseg
263 * lists much later - in eal_memory.c, right after we unmap all the
264 * unneeded pages. this will not affect secondary processes, as those
265 * should be able to mmap the space without (too many) problems.
267 if (internal_config.legacy_mem)
270 /* 32-bit mode is a very special case. we cannot know in advance where
271 * the user will want to allocate their memory, so we have to do some
275 total_requested_mem = 0;
276 if (internal_config.force_sockets)
277 for (i = 0; i < rte_socket_count(); i++) {
280 socket_id = rte_socket_id_by_idx(i);
281 mem = internal_config.socket_mem[socket_id];
287 total_requested_mem += mem;
290 total_requested_mem = internal_config.memory;
292 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
293 if (total_requested_mem > max_mem) {
294 RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
295 (unsigned int)(max_mem >> 20));
298 total_extra_mem = max_mem - total_requested_mem;
299 extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
300 total_extra_mem / active_sockets;
302 /* the allocation logic is a little bit convoluted, but here's how it
303 * works, in a nutshell:
304 * - if user hasn't specified on which sockets to allocate memory via
305 * --socket-mem, we allocate all of our memory on master core socket.
306 * - if user has specified sockets to allocate memory on, there may be
307 * some "unused" memory left (e.g. if user has specified --socket-mem
308 * such that not all memory adds up to 2 gigabytes), so add it to all
309 * sockets that are in use equally.
311 * page sizes are sorted by size in descending order, so we can safely
312 * assume that we dispense with bigger page sizes first.
315 /* create memseg lists */
316 for (i = 0; i < rte_socket_count(); i++) {
317 int hp_sizes = (int) internal_config.num_hugepage_sizes;
318 uint64_t max_socket_mem, cur_socket_mem;
319 unsigned int master_lcore_socket;
320 struct rte_config *cfg = rte_eal_get_configuration();
323 socket_id = rte_socket_id_by_idx(i);
325 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
330 /* if we didn't specifically request memory on this socket */
331 skip = active_sockets != 0 &&
332 internal_config.socket_mem[socket_id] == 0;
333 /* ...or if we didn't specifically request memory on *any*
334 * socket, and this is not master lcore
336 master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
337 skip |= active_sockets == 0 && socket_id != master_lcore_socket;
340 RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
345 /* max amount of memory on this socket */
346 max_socket_mem = (active_sockets != 0 ?
347 internal_config.socket_mem[socket_id] :
348 internal_config.memory) +
349 extra_mem_per_socket;
352 for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
353 uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
354 uint64_t hugepage_sz;
355 struct hugepage_info *hpi;
356 int type_msl_idx, max_segs, total_segs = 0;
358 hpi = &internal_config.hugepage_info[hpi_idx];
359 hugepage_sz = hpi->hugepage_sz;
361 /* check if pages are actually available */
362 if (hpi->num_pages[socket_id] == 0)
365 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
366 max_pagesz_mem = max_socket_mem - cur_socket_mem;
368 /* make it multiple of page size */
369 max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
372 RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
373 "%" PRIu64 "M on socket %i\n",
374 max_pagesz_mem >> 20, socket_id);
377 while (cur_pagesz_mem < max_pagesz_mem &&
378 total_segs < max_segs) {
379 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
381 "No more space in memseg lists, please increase %s\n",
382 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
386 msl = &mcfg->memsegs[msl_idx];
388 if (alloc_memseg_list(msl, hugepage_sz,
389 max_pagesz_mem, socket_id,
391 /* failing to allocate a memseg list is
394 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
398 if (alloc_va_space(msl)) {
399 /* if we couldn't allocate VA space, we
400 * can try with smaller page sizes.
402 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
403 /* deallocate memseg list */
404 if (free_memseg_list(msl))
409 total_segs += msl->memseg_arr.len;
410 cur_pagesz_mem = total_segs * hugepage_sz;
414 cur_socket_mem += cur_pagesz_mem;
416 if (cur_socket_mem == 0) {
417 RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
426 static int __rte_unused
427 memseg_primary_init(void)
429 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
430 int i, socket_id, hpi_idx, msl_idx = 0;
431 struct rte_memseg_list *msl;
432 uint64_t max_mem, total_mem;
434 /* no-huge does not need this at all */
435 if (internal_config.no_hugetlbfs)
438 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
441 /* create memseg lists */
442 for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
444 struct hugepage_info *hpi;
445 uint64_t hugepage_sz;
447 hpi = &internal_config.hugepage_info[hpi_idx];
448 hugepage_sz = hpi->hugepage_sz;
450 for (i = 0; i < (int) rte_socket_count(); i++) {
451 uint64_t max_type_mem, total_type_mem = 0;
452 int type_msl_idx, max_segs, total_segs = 0;
454 socket_id = rte_socket_id_by_idx(i);
456 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
461 if (total_mem >= max_mem)
464 max_type_mem = RTE_MIN(max_mem - total_mem,
465 (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
466 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
469 while (total_type_mem < max_type_mem &&
470 total_segs < max_segs) {
471 uint64_t cur_max_mem;
472 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
474 "No more space in memseg lists, please increase %s\n",
475 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
479 msl = &mcfg->memsegs[msl_idx++];
481 cur_max_mem = max_type_mem - total_type_mem;
482 if (alloc_memseg_list(msl, hugepage_sz,
483 cur_max_mem, socket_id,
487 total_segs += msl->memseg_arr.len;
488 total_type_mem = total_segs * hugepage_sz;
491 if (alloc_va_space(msl)) {
492 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
496 total_mem += total_type_mem;
503 memseg_secondary_init(void)
505 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
507 struct rte_memseg_list *msl;
509 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
511 msl = &mcfg->memsegs[msl_idx];
513 /* skip empty memseg lists */
514 if (msl->memseg_arr.len == 0)
517 if (rte_fbarray_attach(&msl->memseg_arr)) {
518 RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
522 /* preallocate VA space */
523 if (alloc_va_space(msl)) {
524 RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
532 static struct rte_memseg *
533 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
535 const struct rte_fbarray *arr;
539 /* a memseg list was specified, check if it's the right one */
540 start = msl->base_va;
541 end = RTE_PTR_ADD(start, (size_t)msl->page_sz * msl->memseg_arr.len);
543 if (addr < start || addr >= end)
546 /* now, calculate index */
547 arr = &msl->memseg_arr;
548 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
549 return rte_fbarray_get(arr, ms_idx);
552 static struct rte_memseg_list *
553 virt2memseg_list(const void *addr)
555 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
556 struct rte_memseg_list *msl;
559 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
561 msl = &mcfg->memsegs[msl_idx];
563 start = msl->base_va;
564 end = RTE_PTR_ADD(start,
565 (size_t)msl->page_sz * msl->memseg_arr.len);
566 if (addr >= start && addr < end)
569 /* if we didn't find our memseg list */
570 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
575 __rte_experimental struct rte_memseg_list *
576 rte_mem_virt2memseg_list(const void *addr)
578 return virt2memseg_list(addr);
586 find_virt(const struct rte_memseg_list *msl __rte_unused,
587 const struct rte_memseg *ms, void *arg)
589 struct virtiova *vi = arg;
590 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
591 size_t offset = vi->iova - ms->iova;
592 vi->virt = RTE_PTR_ADD(ms->addr, offset);
599 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
600 const struct rte_memseg *ms, size_t len, void *arg)
602 struct virtiova *vi = arg;
603 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
604 size_t offset = vi->iova - ms->iova;
605 vi->virt = RTE_PTR_ADD(ms->addr, offset);
612 __rte_experimental void *
613 rte_mem_iova2virt(rte_iova_t iova)
617 memset(&vi, 0, sizeof(vi));
620 /* for legacy mem, we can get away with scanning VA-contiguous segments,
621 * as we know they are PA-contiguous as well
623 if (internal_config.legacy_mem)
624 rte_memseg_contig_walk(find_virt_legacy, &vi);
626 rte_memseg_walk(find_virt, &vi);
631 __rte_experimental struct rte_memseg *
632 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
634 return virt2memseg(addr, msl != NULL ? msl :
635 rte_mem_virt2memseg_list(addr));
639 physmem_size(const struct rte_memseg_list *msl, void *arg)
641 uint64_t *total_len = arg;
643 *total_len += msl->memseg_arr.count * msl->page_sz;
648 /* get the total size of memory */
650 rte_eal_get_physmem_size(void)
652 uint64_t total_len = 0;
654 rte_memseg_list_walk(physmem_size, &total_len);
660 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
663 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
667 msl_idx = msl - mcfg->memsegs;
668 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
671 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
675 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
676 "virt:%p, socket_id:%"PRId32", "
677 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
692 * Defining here because declared in rte_memory.h, but the actual implementation
693 * is in eal_common_memalloc.c, like all other memalloc internals.
695 int __rte_experimental
696 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
699 /* FreeBSD boots with legacy mem enabled by default */
700 if (internal_config.legacy_mem) {
701 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
705 return eal_memalloc_mem_event_callback_register(name, clb, arg);
708 int __rte_experimental
709 rte_mem_event_callback_unregister(const char *name, void *arg)
711 /* FreeBSD boots with legacy mem enabled by default */
712 if (internal_config.legacy_mem) {
713 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
717 return eal_memalloc_mem_event_callback_unregister(name, arg);
720 int __rte_experimental
721 rte_mem_alloc_validator_register(const char *name,
722 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
724 /* FreeBSD boots with legacy mem enabled by default */
725 if (internal_config.legacy_mem) {
726 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
730 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
734 int __rte_experimental
735 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
737 /* FreeBSD boots with legacy mem enabled by default */
738 if (internal_config.legacy_mem) {
739 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
743 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
746 /* Dump the physical memory layout on console */
748 rte_dump_physmem_layout(FILE *f)
750 rte_memseg_walk(dump_memseg, f);
753 /* return the number of memory channels */
754 unsigned rte_memory_get_nchannel(void)
756 return rte_eal_get_configuration()->mem_config->nchannel;
759 /* return the number of memory rank */
760 unsigned rte_memory_get_nrank(void)
762 return rte_eal_get_configuration()->mem_config->nrank;
766 rte_eal_memdevice_init(void)
768 struct rte_config *config;
770 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
773 config = rte_eal_get_configuration();
774 config->mem_config->nchannel = internal_config.force_nchannel;
775 config->mem_config->nrank = internal_config.force_nrank;
780 /* Lock page in physical memory and prevent from swapping. */
782 rte_mem_lock_page(const void *virt)
784 unsigned long virtual = (unsigned long)virt;
785 int page_size = getpagesize();
786 unsigned long aligned = (virtual & ~(page_size - 1));
787 return mlock((void *)aligned, page_size);
790 int __rte_experimental
791 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
793 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
794 int i, ms_idx, ret = 0;
796 /* do not allow allocations/frees/init while we iterate */
797 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
799 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
800 struct rte_memseg_list *msl = &mcfg->memsegs[i];
801 const struct rte_memseg *ms;
802 struct rte_fbarray *arr;
804 if (msl->memseg_arr.count == 0)
807 arr = &msl->memseg_arr;
809 ms_idx = rte_fbarray_find_next_used(arr, 0);
810 while (ms_idx >= 0) {
814 ms = rte_fbarray_get(arr, ms_idx);
816 /* find how many more segments there are, starting with
819 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
820 len = n_segs * msl->page_sz;
822 ret = func(msl, ms, len, arg);
826 } else if (ret > 0) {
830 ms_idx = rte_fbarray_find_next_used(arr,
835 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
839 int __rte_experimental
840 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
842 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
843 int i, ms_idx, ret = 0;
845 /* do not allow allocations/frees/init while we iterate */
846 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
848 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
849 struct rte_memseg_list *msl = &mcfg->memsegs[i];
850 const struct rte_memseg *ms;
851 struct rte_fbarray *arr;
853 if (msl->memseg_arr.count == 0)
856 arr = &msl->memseg_arr;
858 ms_idx = rte_fbarray_find_next_used(arr, 0);
859 while (ms_idx >= 0) {
860 ms = rte_fbarray_get(arr, ms_idx);
861 ret = func(msl, ms, arg);
865 } else if (ret > 0) {
869 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
873 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
877 int __rte_experimental
878 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
880 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
883 /* do not allow allocations/frees/init while we iterate */
884 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
886 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
887 struct rte_memseg_list *msl = &mcfg->memsegs[i];
889 if (msl->base_va == NULL)
892 ret = func(msl, arg);
903 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
907 /* init memory subsystem */
909 rte_eal_memory_init(void)
911 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
913 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
918 /* lock mem hotplug here, to prevent races while we init */
919 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
921 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
923 memseg_primary_init_32() :
925 memseg_primary_init() :
927 memseg_secondary_init();
932 if (eal_memalloc_init() < 0)
935 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
936 rte_eal_hugepage_init() :
937 rte_eal_hugepage_attach();
941 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
946 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);