1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
16 #include <rte_fbarray.h>
17 #include <rte_memory.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_eal_paging.h>
21 #include <rte_errno.h>
23 #ifndef RTE_EXEC_ENV_WINDOWS
24 #include <rte_telemetry.h>
27 #include "eal_memalloc.h"
28 #include "eal_private.h"
29 #include "eal_internal_cfg.h"
30 #include "eal_memcfg.h"
31 #include "eal_options.h"
32 #include "malloc_heap.h"
35 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
36 * pointer to the mmap'd area and keep *size unmodified. Else, retry
37 * with a smaller zone: decrease *size by hugepage_sz until it reaches
38 * 0. In this case, return NULL. Note: this function returns an address
39 * which is a multiple of hugepage size.
42 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
44 static void *next_baseaddr;
45 static uint64_t system_page_sz;
47 #define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
49 eal_get_virtual_area(void *requested_addr, size_t *size,
50 size_t page_sz, int flags, int reserve_flags)
52 bool addr_is_hint, allow_shrink, unmap, no_align;
54 void *mapped_addr, *aligned_addr;
56 struct internal_config *internal_conf =
57 eal_get_internal_configuration();
59 if (system_page_sz == 0)
60 system_page_sz = rte_mem_page_size();
62 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
64 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
65 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
66 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
68 if (next_baseaddr == NULL && internal_conf->base_virtaddr != 0 &&
69 rte_eal_process_type() == RTE_PROC_PRIMARY)
70 next_baseaddr = (void *) internal_conf->base_virtaddr;
73 if (next_baseaddr == NULL && internal_conf->base_virtaddr == 0 &&
74 rte_eal_process_type() == RTE_PROC_PRIMARY)
75 next_baseaddr = (void *) eal_get_baseaddr();
77 if (requested_addr == NULL && next_baseaddr != NULL) {
78 requested_addr = next_baseaddr;
79 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
83 /* we don't need alignment of resulting pointer in the following cases:
85 * 1. page size is equal to system size
86 * 2. we have a requested address, and it is page-aligned, and we will
87 * be discarding the address if we get a different one.
89 * for all other cases, alignment is potentially necessary.
91 no_align = (requested_addr != NULL &&
92 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
94 page_sz == system_page_sz;
97 map_sz = no_align ? *size : *size + page_sz;
98 if (map_sz > SIZE_MAX) {
99 RTE_LOG(ERR, EAL, "Map size too big\n");
104 mapped_addr = eal_mem_reserve(
105 requested_addr, (size_t)map_sz, reserve_flags);
106 if ((mapped_addr == NULL) && allow_shrink)
109 if ((mapped_addr != NULL) && addr_is_hint &&
110 (mapped_addr != requested_addr)) {
112 next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
113 if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) {
114 /* hint was not used. Try with another offset */
115 eal_mem_free(mapped_addr, map_sz);
117 requested_addr = next_baseaddr;
120 } while ((allow_shrink || addr_is_hint) &&
121 (mapped_addr == NULL) && (*size > 0));
123 /* align resulting address - if map failed, we will ignore the value
124 * anyway, so no need to add additional checks.
126 aligned_addr = no_align ? mapped_addr :
127 RTE_PTR_ALIGN(mapped_addr, page_sz);
130 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
131 rte_strerror(rte_errno));
133 } else if (mapped_addr == NULL) {
134 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
135 rte_strerror(rte_errno));
137 } else if (requested_addr != NULL && !addr_is_hint &&
138 aligned_addr != requested_addr) {
139 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
140 requested_addr, aligned_addr);
141 eal_mem_free(mapped_addr, map_sz);
142 rte_errno = EADDRNOTAVAIL;
144 } else if (requested_addr != NULL && addr_is_hint &&
145 aligned_addr != requested_addr) {
146 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
147 requested_addr, aligned_addr);
148 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
149 } else if (next_baseaddr != NULL) {
150 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
153 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
154 aligned_addr, *size);
157 eal_mem_free(mapped_addr, map_sz);
158 } else if (!no_align) {
159 void *map_end, *aligned_end;
160 size_t before_len, after_len;
162 /* when we reserve space with alignment, we add alignment to
163 * mapping size. On 32-bit, if 1GB alignment was requested, this
164 * would waste 1GB of address space, which is a luxury we cannot
165 * afford. so, if alignment was performed, check if any unneeded
166 * address space can be unmapped back.
169 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
170 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
172 /* unmap space before aligned mmap address */
173 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
175 eal_mem_free(mapped_addr, before_len);
177 /* unmap space after aligned end mmap address */
178 after_len = RTE_PTR_DIFF(map_end, aligned_end);
180 eal_mem_free(aligned_end, after_len);
184 /* Exclude these pages from a core dump. */
185 eal_mem_set_dump(aligned_addr, *size, false);
192 eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
193 uint64_t page_sz, int n_segs, int socket_id, bool heap)
195 if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
196 sizeof(struct rte_memseg))) {
197 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
198 rte_strerror(rte_errno));
202 msl->page_sz = page_sz;
203 msl->socket_id = socket_id;
208 "Memseg list allocated at socket %i, page size 0x%"PRIx64"kB\n",
209 socket_id, page_sz >> 10);
215 eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
216 int n_segs, int socket_id, int type_msl_idx, bool heap)
218 char name[RTE_FBARRAY_NAME_LEN];
220 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
223 return eal_memseg_list_init_named(
224 msl, name, page_sz, n_segs, socket_id, heap);
228 eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
230 size_t page_sz, mem_sz;
233 page_sz = msl->page_sz;
234 mem_sz = page_sz * msl->memseg_arr.len;
236 addr = eal_get_virtual_area(
237 msl->base_va, &mem_sz, page_sz, 0, reserve_flags);
239 #ifndef RTE_EXEC_ENV_WINDOWS
240 /* The hint would be misleading on Windows, because address
241 * is by default system-selected (base VA = 0).
242 * However, this function is called from many places,
243 * including common code, so don't duplicate the message.
245 if (rte_errno == EADDRNOTAVAIL)
246 RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
247 "please use '--" OPT_BASE_VIRTADDR "' option\n",
248 (unsigned long long)mem_sz, msl->base_va);
255 RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
262 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs)
264 size_t page_sz = msl->page_sz;
267 for (i = 0; i < n_segs; i++) {
268 struct rte_fbarray *arr = &msl->memseg_arr;
269 struct rte_memseg *ms = rte_fbarray_get(arr, i);
271 if (rte_eal_iova_mode() == RTE_IOVA_VA)
272 ms->iova = (uintptr_t)addr;
274 ms->iova = RTE_BAD_IOVA;
276 ms->hugepage_sz = page_sz;
280 rte_fbarray_set_used(arr, i);
282 addr = RTE_PTR_ADD(addr, page_sz);
286 static struct rte_memseg *
287 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
289 const struct rte_fbarray *arr;
296 /* a memseg list was specified, check if it's the right one */
297 start = msl->base_va;
298 end = RTE_PTR_ADD(start, msl->len);
300 if (addr < start || addr >= end)
303 /* now, calculate index */
304 arr = &msl->memseg_arr;
305 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
306 return rte_fbarray_get(arr, ms_idx);
309 static struct rte_memseg_list *
310 virt2memseg_list(const void *addr)
312 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
313 struct rte_memseg_list *msl;
316 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
318 msl = &mcfg->memsegs[msl_idx];
320 start = msl->base_va;
321 end = RTE_PTR_ADD(start, msl->len);
322 if (addr >= start && addr < end)
325 /* if we didn't find our memseg list */
326 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
331 struct rte_memseg_list *
332 rte_mem_virt2memseg_list(const void *addr)
334 return virt2memseg_list(addr);
342 find_virt(const struct rte_memseg_list *msl __rte_unused,
343 const struct rte_memseg *ms, void *arg)
345 struct virtiova *vi = arg;
346 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
347 size_t offset = vi->iova - ms->iova;
348 vi->virt = RTE_PTR_ADD(ms->addr, offset);
355 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
356 const struct rte_memseg *ms, size_t len, void *arg)
358 struct virtiova *vi = arg;
359 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
360 size_t offset = vi->iova - ms->iova;
361 vi->virt = RTE_PTR_ADD(ms->addr, offset);
369 rte_mem_iova2virt(rte_iova_t iova)
372 const struct internal_config *internal_conf =
373 eal_get_internal_configuration();
375 memset(&vi, 0, sizeof(vi));
378 /* for legacy mem, we can get away with scanning VA-contiguous segments,
379 * as we know they are PA-contiguous as well
381 if (internal_conf->legacy_mem)
382 rte_memseg_contig_walk(find_virt_legacy, &vi);
384 rte_memseg_walk(find_virt, &vi);
390 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
392 return virt2memseg(addr, msl != NULL ? msl :
393 rte_mem_virt2memseg_list(addr));
397 physmem_size(const struct rte_memseg_list *msl, void *arg)
399 uint64_t *total_len = arg;
404 *total_len += msl->memseg_arr.count * msl->page_sz;
409 /* get the total size of memory */
411 rte_eal_get_physmem_size(void)
413 uint64_t total_len = 0;
415 rte_memseg_list_walk(physmem_size, &total_len);
421 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
424 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
425 int msl_idx, ms_idx, fd;
428 msl_idx = msl - mcfg->memsegs;
429 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
432 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
436 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
437 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
438 "virt:%p, socket_id:%"PRId32", "
439 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
440 "nrank:%"PRIx32" fd:%i\n",
455 * Defining here because declared in rte_memory.h, but the actual implementation
456 * is in eal_common_memalloc.c, like all other memalloc internals.
459 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
462 const struct internal_config *internal_conf =
463 eal_get_internal_configuration();
465 /* FreeBSD boots with legacy mem enabled by default */
466 if (internal_conf->legacy_mem) {
467 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
471 return eal_memalloc_mem_event_callback_register(name, clb, arg);
475 rte_mem_event_callback_unregister(const char *name, void *arg)
477 const struct internal_config *internal_conf =
478 eal_get_internal_configuration();
480 /* FreeBSD boots with legacy mem enabled by default */
481 if (internal_conf->legacy_mem) {
482 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
486 return eal_memalloc_mem_event_callback_unregister(name, arg);
490 rte_mem_alloc_validator_register(const char *name,
491 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
493 const struct internal_config *internal_conf =
494 eal_get_internal_configuration();
496 /* FreeBSD boots with legacy mem enabled by default */
497 if (internal_conf->legacy_mem) {
498 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
502 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
507 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
509 const struct internal_config *internal_conf =
510 eal_get_internal_configuration();
512 /* FreeBSD boots with legacy mem enabled by default */
513 if (internal_conf->legacy_mem) {
514 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
518 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
521 /* Dump the physical memory layout on console */
523 rte_dump_physmem_layout(FILE *f)
525 rte_memseg_walk(dump_memseg, f);
529 check_iova(const struct rte_memseg_list *msl __rte_unused,
530 const struct rte_memseg *ms, void *arg)
532 uint64_t *mask = arg;
535 /* higher address within segment */
536 iova = (ms->iova + ms->len) - 1;
540 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
543 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
547 #define MAX_DMA_MASK_BITS 63
549 /* check memseg iovas are within the required range based on dma mask */
551 check_dma_mask(uint8_t maskbits, bool thread_unsafe)
553 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
557 /* Sanity check. We only check width can be managed with 64 bits
558 * variables. Indeed any higher value is likely wrong. */
559 if (maskbits > MAX_DMA_MASK_BITS) {
560 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
561 maskbits, MAX_DMA_MASK_BITS);
565 /* create dma mask */
566 mask = ~((1ULL << maskbits) - 1);
569 ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
571 ret = rte_memseg_walk(check_iova, &mask);
575 * Dma mask precludes hugepage usage.
576 * This device can not be used and we do not need to keep
582 * we need to keep the more restricted maskbit for checking
583 * potential dynamic memory allocation in the future.
585 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
586 RTE_MIN(mcfg->dma_maskbits, maskbits);
592 rte_mem_check_dma_mask(uint8_t maskbits)
594 return check_dma_mask(maskbits, false);
598 rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
600 return check_dma_mask(maskbits, true);
604 * Set dma mask to use when memory initialization is done.
606 * This function should ONLY be used by code executed before the memory
607 * initialization. PMDs should use rte_mem_check_dma_mask if addressing
608 * limitations by the device.
611 rte_mem_set_dma_mask(uint8_t maskbits)
613 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
615 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
616 RTE_MIN(mcfg->dma_maskbits, maskbits);
619 /* return the number of memory channels */
620 unsigned rte_memory_get_nchannel(void)
622 return rte_eal_get_configuration()->mem_config->nchannel;
625 /* return the number of memory rank */
626 unsigned rte_memory_get_nrank(void)
628 return rte_eal_get_configuration()->mem_config->nrank;
632 rte_eal_memdevice_init(void)
634 struct rte_config *config;
635 const struct internal_config *internal_conf;
637 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
640 internal_conf = eal_get_internal_configuration();
641 config = rte_eal_get_configuration();
642 config->mem_config->nchannel = internal_conf->force_nchannel;
643 config->mem_config->nrank = internal_conf->force_nrank;
648 /* Lock page in physical memory and prevent from swapping. */
650 rte_mem_lock_page(const void *virt)
652 uintptr_t virtual = (uintptr_t)virt;
653 size_t page_size = rte_mem_page_size();
654 uintptr_t aligned = RTE_PTR_ALIGN_FLOOR(virtual, page_size);
655 return rte_mem_lock((void *)aligned, page_size);
659 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
661 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
662 int i, ms_idx, ret = 0;
664 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
665 struct rte_memseg_list *msl = &mcfg->memsegs[i];
666 const struct rte_memseg *ms;
667 struct rte_fbarray *arr;
669 if (msl->memseg_arr.count == 0)
672 arr = &msl->memseg_arr;
674 ms_idx = rte_fbarray_find_next_used(arr, 0);
675 while (ms_idx >= 0) {
679 ms = rte_fbarray_get(arr, ms_idx);
681 /* find how many more segments there are, starting with
684 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
685 len = n_segs * msl->page_sz;
687 ret = func(msl, ms, len, arg);
690 ms_idx = rte_fbarray_find_next_used(arr,
698 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
702 /* do not allow allocations/frees/init while we iterate */
703 rte_mcfg_mem_read_lock();
704 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
705 rte_mcfg_mem_read_unlock();
711 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
713 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
714 int i, ms_idx, ret = 0;
716 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
717 struct rte_memseg_list *msl = &mcfg->memsegs[i];
718 const struct rte_memseg *ms;
719 struct rte_fbarray *arr;
721 if (msl->memseg_arr.count == 0)
724 arr = &msl->memseg_arr;
726 ms_idx = rte_fbarray_find_next_used(arr, 0);
727 while (ms_idx >= 0) {
728 ms = rte_fbarray_get(arr, ms_idx);
729 ret = func(msl, ms, arg);
732 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
739 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
743 /* do not allow allocations/frees/init while we iterate */
744 rte_mcfg_mem_read_lock();
745 ret = rte_memseg_walk_thread_unsafe(func, arg);
746 rte_mcfg_mem_read_unlock();
752 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
754 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
757 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
758 struct rte_memseg_list *msl = &mcfg->memsegs[i];
760 if (msl->base_va == NULL)
763 ret = func(msl, arg);
771 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
775 /* do not allow allocations/frees/init while we iterate */
776 rte_mcfg_mem_read_lock();
777 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
778 rte_mcfg_mem_read_unlock();
784 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
786 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
787 struct rte_memseg_list *msl;
788 struct rte_fbarray *arr;
789 int msl_idx, seg_idx, ret;
796 msl = rte_mem_virt2memseg_list(ms->addr);
801 arr = &msl->memseg_arr;
803 msl_idx = msl - mcfg->memsegs;
804 seg_idx = rte_fbarray_find_idx(arr, ms);
806 if (!rte_fbarray_is_used(arr, seg_idx)) {
811 /* segment fd API is not supported for external segments */
817 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
826 rte_memseg_get_fd(const struct rte_memseg *ms)
830 rte_mcfg_mem_read_lock();
831 ret = rte_memseg_get_fd_thread_unsafe(ms);
832 rte_mcfg_mem_read_unlock();
838 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
841 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
842 struct rte_memseg_list *msl;
843 struct rte_fbarray *arr;
844 int msl_idx, seg_idx, ret;
846 if (ms == NULL || offset == NULL) {
851 msl = rte_mem_virt2memseg_list(ms->addr);
856 arr = &msl->memseg_arr;
858 msl_idx = msl - mcfg->memsegs;
859 seg_idx = rte_fbarray_find_idx(arr, ms);
861 if (!rte_fbarray_is_used(arr, seg_idx)) {
866 /* segment fd API is not supported for external segments */
872 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
881 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
885 rte_mcfg_mem_read_lock();
886 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
887 rte_mcfg_mem_read_unlock();
893 rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
894 unsigned int n_pages, size_t page_sz)
896 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
897 unsigned int socket_id, n;
900 if (va_addr == NULL || page_sz == 0 || len == 0 ||
901 !rte_is_power_of_2(page_sz) ||
902 RTE_ALIGN(len, page_sz) != len ||
903 ((len / page_sz) != n_pages && iova_addrs != NULL) ||
904 !rte_is_aligned(va_addr, page_sz)) {
908 rte_mcfg_mem_write_lock();
910 /* make sure the segment doesn't already exist */
911 if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
917 /* get next available socket ID */
918 socket_id = mcfg->next_socket_id;
919 if (socket_id > INT32_MAX) {
920 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
926 /* we can create a new memseg */
928 if (malloc_heap_create_external_seg(va_addr, iova_addrs, n,
929 page_sz, "extmem", socket_id) == NULL) {
934 /* memseg list successfully created - increment next socket ID */
935 mcfg->next_socket_id++;
937 rte_mcfg_mem_write_unlock();
942 rte_extmem_unregister(void *va_addr, size_t len)
944 struct rte_memseg_list *msl;
947 if (va_addr == NULL || len == 0) {
951 rte_mcfg_mem_write_lock();
953 /* find our segment */
954 msl = malloc_heap_find_external_seg(va_addr, len);
961 ret = malloc_heap_destroy_external_seg(msl);
963 rte_mcfg_mem_write_unlock();
968 sync_memory(void *va_addr, size_t len, bool attach)
970 struct rte_memseg_list *msl;
973 if (va_addr == NULL || len == 0) {
977 rte_mcfg_mem_write_lock();
979 /* find our segment */
980 msl = malloc_heap_find_external_seg(va_addr, len);
987 ret = rte_fbarray_attach(&msl->memseg_arr);
989 ret = rte_fbarray_detach(&msl->memseg_arr);
992 rte_mcfg_mem_write_unlock();
997 rte_extmem_attach(void *va_addr, size_t len)
999 return sync_memory(va_addr, len, true);
1003 rte_extmem_detach(void *va_addr, size_t len)
1005 return sync_memory(va_addr, len, false);
1008 /* detach all EAL memory */
1010 rte_eal_memory_detach(void)
1012 const struct internal_config *internal_conf =
1013 eal_get_internal_configuration();
1014 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1015 size_t page_sz = rte_mem_page_size();
1018 if (internal_conf->in_memory == 1)
1021 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
1023 /* detach internal memory subsystem data first */
1024 if (eal_memalloc_cleanup())
1025 RTE_LOG(ERR, EAL, "Could not release memory subsystem data\n");
1027 for (i = 0; i < RTE_DIM(mcfg->memsegs); i++) {
1028 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1030 /* skip uninitialized segments */
1031 if (msl->base_va == NULL)
1034 * external segments are supposed to be detached at this point,
1035 * but if they aren't, we can't really do anything about it,
1036 * because if we skip them here, they'll become invalid after
1037 * we unmap the memconfig anyway. however, if this is externally
1038 * referenced memory, we have no business unmapping it.
1041 if (rte_mem_unmap(msl->base_va, msl->len) != 0)
1042 RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
1043 rte_strerror(rte_errno));
1046 * we are detaching the fbarray rather than destroying because
1047 * other processes might still reference this fbarray, and we
1048 * have no way of knowing if they still do.
1050 if (rte_fbarray_detach(&msl->memseg_arr))
1051 RTE_LOG(ERR, EAL, "Could not detach fbarray: %s\n",
1052 rte_strerror(rte_errno));
1054 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
1057 * we've detached the memseg lists, so we can unmap the shared mem
1058 * config - we can't zero it out because it might still be referenced
1059 * by other processes.
1061 if (internal_conf->no_shconf == 0 && mcfg->mem_cfg_addr != 0) {
1062 if (rte_mem_unmap(mcfg, RTE_ALIGN(sizeof(*mcfg), page_sz)) != 0)
1063 RTE_LOG(ERR, EAL, "Could not unmap shared memory config: %s\n",
1064 rte_strerror(rte_errno));
1066 rte_eal_get_configuration()->mem_config = NULL;
1071 /* init memory subsystem */
1073 rte_eal_memory_init(void)
1075 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1076 const struct internal_config *internal_conf =
1077 eal_get_internal_configuration();
1080 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
1085 /* lock mem hotplug here, to prevent races while we init */
1086 rte_mcfg_mem_read_lock();
1088 if (rte_eal_memseg_init() < 0)
1091 if (eal_memalloc_init() < 0)
1094 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
1095 rte_eal_hugepage_init() :
1096 rte_eal_hugepage_attach();
1100 if (internal_conf->no_shconf == 0 && rte_eal_memdevice_init() < 0)
1105 rte_mcfg_mem_read_unlock();
1109 #ifndef RTE_EXEC_ENV_WINDOWS
1110 #define EAL_MEMZONE_LIST_REQ "/eal/memzone_list"
1111 #define EAL_MEMZONE_INFO_REQ "/eal/memzone_info"
1112 #define EAL_HEAP_LIST_REQ "/eal/heap_list"
1113 #define EAL_HEAP_INFO_REQ "/eal/heap_info"
1116 /* Telemetry callback handler to return heap stats for requested heap id. */
1118 handle_eal_heap_info_request(const char *cmd __rte_unused, const char *params,
1119 struct rte_tel_data *d)
1121 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1122 struct rte_malloc_socket_stats sock_stats;
1123 struct malloc_heap *heap;
1124 unsigned int heap_id;
1126 if (params == NULL || strlen(params) == 0)
1129 heap_id = (unsigned int)strtoul(params, NULL, 10);
1131 /* Get the heap stats of user provided heap id */
1132 heap = &mcfg->malloc_heaps[heap_id];
1133 malloc_heap_get_stats(heap, &sock_stats);
1135 rte_tel_data_start_dict(d);
1136 rte_tel_data_add_dict_int(d, "Head id", heap_id);
1137 rte_tel_data_add_dict_string(d, "Name", heap->name);
1138 rte_tel_data_add_dict_u64(d, "Heap_size",
1139 sock_stats.heap_totalsz_bytes);
1140 rte_tel_data_add_dict_u64(d, "Free_size", sock_stats.heap_freesz_bytes);
1141 rte_tel_data_add_dict_u64(d, "Alloc_size",
1142 sock_stats.heap_allocsz_bytes);
1143 rte_tel_data_add_dict_u64(d, "Greatest_free_size",
1144 sock_stats.greatest_free_size);
1145 rte_tel_data_add_dict_u64(d, "Alloc_count", sock_stats.alloc_count);
1146 rte_tel_data_add_dict_u64(d, "Free_count", sock_stats.free_count);
1151 /* Telemetry callback handler to list the heap ids setup. */
1153 handle_eal_heap_list_request(const char *cmd __rte_unused,
1154 const char *params __rte_unused,
1155 struct rte_tel_data *d)
1157 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1158 struct rte_malloc_socket_stats sock_stats;
1159 unsigned int heap_id;
1161 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1162 /* Iterate through all initialised heaps */
1163 for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) {
1164 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
1166 malloc_heap_get_stats(heap, &sock_stats);
1167 if (sock_stats.heap_totalsz_bytes != 0)
1168 rte_tel_data_add_array_int(d, heap_id);
1174 /* Telemetry callback handler to return memzone info for requested index. */
1176 handle_eal_memzone_info_request(const char *cmd __rte_unused,
1177 const char *params, struct rte_tel_data *d)
1179 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1180 struct rte_memseg_list *msl = NULL;
1181 int ms_idx, ms_count = 0;
1182 void *cur_addr, *mz_end;
1183 struct rte_memzone *mz;
1184 struct rte_memseg *ms;
1185 char addr[ADDR_STR];
1186 unsigned int mz_idx;
1189 if (params == NULL || strlen(params) == 0)
1192 mz_idx = strtoul(params, NULL, 10);
1194 /* Get the memzone handle using index */
1195 mz = rte_fbarray_get(&mcfg->memzones, mz_idx);
1197 rte_tel_data_start_dict(d);
1198 rte_tel_data_add_dict_int(d, "Zone", mz_idx);
1199 rte_tel_data_add_dict_string(d, "Name", mz->name);
1200 rte_tel_data_add_dict_int(d, "Length", mz->len);
1201 snprintf(addr, ADDR_STR, "%p", mz->addr);
1202 rte_tel_data_add_dict_string(d, "Address", addr);
1203 rte_tel_data_add_dict_int(d, "Socket", mz->socket_id);
1204 rte_tel_data_add_dict_int(d, "Flags", mz->flags);
1206 /* go through each page occupied by this memzone */
1207 msl = rte_mem_virt2memseg_list(mz->addr);
1209 RTE_LOG(DEBUG, EAL, "Skipping bad memzone\n");
1212 page_sz = (size_t)mz->hugepage_sz;
1213 cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz);
1214 mz_end = RTE_PTR_ADD(cur_addr, mz->len);
1216 ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz;
1217 ms = rte_fbarray_get(&msl->memseg_arr, ms_idx);
1219 rte_tel_data_add_dict_int(d, "Hugepage_size", page_sz);
1220 snprintf(addr, ADDR_STR, "%p", ms->addr);
1221 rte_tel_data_add_dict_string(d, "Hugepage_base", addr);
1224 /* advance VA to next page */
1225 cur_addr = RTE_PTR_ADD(cur_addr, page_sz);
1227 /* memzones occupy contiguous segments */
1230 } while (cur_addr < mz_end);
1232 rte_tel_data_add_dict_int(d, "Hugepage_used", ms_count);
1238 memzone_list_cb(const struct rte_memzone *mz __rte_unused,
1239 void *arg __rte_unused)
1241 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1242 struct rte_tel_data *d = arg;
1245 mz_idx = rte_fbarray_find_idx(&mcfg->memzones, mz);
1246 rte_tel_data_add_array_int(d, mz_idx);
1250 /* Telemetry callback handler to list the memzones reserved. */
1252 handle_eal_memzone_list_request(const char *cmd __rte_unused,
1253 const char *params __rte_unused,
1254 struct rte_tel_data *d)
1256 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1257 rte_memzone_walk(memzone_list_cb, d);
1262 RTE_INIT(memory_telemetry)
1264 rte_telemetry_register_cmd(
1265 EAL_MEMZONE_LIST_REQ, handle_eal_memzone_list_request,
1266 "List of memzone index reserved. Takes no parameters");
1267 rte_telemetry_register_cmd(
1268 EAL_MEMZONE_INFO_REQ, handle_eal_memzone_info_request,
1269 "Returns memzone info. Parameters: int mz_id");
1270 rte_telemetry_register_cmd(
1271 EAL_HEAP_LIST_REQ, handle_eal_heap_list_request,
1272 "List of heap index setup. Takes no parameters");
1273 rte_telemetry_register_cmd(
1274 EAL_HEAP_INFO_REQ, handle_eal_heap_info_request,
1275 "Returns malloc heap stats. Parameters: int heap_id");