1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
16 #include <rte_fbarray.h>
17 #include <rte_memory.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_eal_paging.h>
21 #include <rte_errno.h>
23 #ifndef RTE_EXEC_ENV_WINDOWS
24 #include <rte_telemetry.h>
27 #include "eal_memalloc.h"
28 #include "eal_private.h"
29 #include "eal_internal_cfg.h"
30 #include "eal_memcfg.h"
31 #include "eal_options.h"
32 #include "malloc_heap.h"
35 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
36 * pointer to the mmap'd area and keep *size unmodified. Else, retry
37 * with a smaller zone: decrease *size by hugepage_sz until it reaches
38 * 0. In this case, return NULL. Note: this function returns an address
39 * which is a multiple of hugepage size.
42 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
44 static void *next_baseaddr;
45 static uint64_t system_page_sz;
47 #define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
49 eal_get_virtual_area(void *requested_addr, size_t *size,
50 size_t page_sz, int flags, int reserve_flags)
52 bool addr_is_hint, allow_shrink, unmap, no_align;
54 void *mapped_addr, *aligned_addr;
56 struct internal_config *internal_conf =
57 eal_get_internal_configuration();
59 if (system_page_sz == 0)
60 system_page_sz = rte_mem_page_size();
62 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
64 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
65 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
66 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
68 if (next_baseaddr == NULL && internal_conf->base_virtaddr != 0 &&
69 rte_eal_process_type() == RTE_PROC_PRIMARY)
70 next_baseaddr = (void *) internal_conf->base_virtaddr;
73 if (next_baseaddr == NULL && internal_conf->base_virtaddr == 0 &&
74 rte_eal_process_type() == RTE_PROC_PRIMARY)
75 next_baseaddr = (void *) eal_get_baseaddr();
77 if (requested_addr == NULL && next_baseaddr != NULL) {
78 requested_addr = next_baseaddr;
79 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
83 /* we don't need alignment of resulting pointer in the following cases:
85 * 1. page size is equal to system size
86 * 2. we have a requested address, and it is page-aligned, and we will
87 * be discarding the address if we get a different one.
89 * for all other cases, alignment is potentially necessary.
91 no_align = (requested_addr != NULL &&
92 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
94 page_sz == system_page_sz;
97 map_sz = no_align ? *size : *size + page_sz;
98 if (map_sz > SIZE_MAX) {
99 RTE_LOG(ERR, EAL, "Map size too big\n");
104 mapped_addr = eal_mem_reserve(
105 requested_addr, (size_t)map_sz, reserve_flags);
106 if ((mapped_addr == NULL) && allow_shrink)
109 if ((mapped_addr != NULL) && addr_is_hint &&
110 (mapped_addr != requested_addr)) {
112 next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
113 if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) {
114 /* hint was not used. Try with another offset */
115 eal_mem_free(mapped_addr, map_sz);
117 requested_addr = next_baseaddr;
120 } while ((allow_shrink || addr_is_hint) &&
121 (mapped_addr == NULL) && (*size > 0));
123 /* align resulting address - if map failed, we will ignore the value
124 * anyway, so no need to add additional checks.
126 aligned_addr = no_align ? mapped_addr :
127 RTE_PTR_ALIGN(mapped_addr, page_sz);
130 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
131 rte_strerror(rte_errno));
133 } else if (mapped_addr == NULL) {
134 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
135 rte_strerror(rte_errno));
137 } else if (requested_addr != NULL && !addr_is_hint &&
138 aligned_addr != requested_addr) {
139 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
140 requested_addr, aligned_addr);
141 eal_mem_free(mapped_addr, map_sz);
142 rte_errno = EADDRNOTAVAIL;
144 } else if (requested_addr != NULL && addr_is_hint &&
145 aligned_addr != requested_addr) {
147 * demote this warning to debug if we did not explicitly request
148 * a base virtual address.
150 if (internal_conf->base_virtaddr != 0) {
151 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
152 requested_addr, aligned_addr);
153 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
155 RTE_LOG(DEBUG, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
156 requested_addr, aligned_addr);
157 RTE_LOG(DEBUG, EAL, " This may cause issues with mapping memory into secondary processes\n");
159 } else if (next_baseaddr != NULL) {
160 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
163 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
164 aligned_addr, *size);
167 eal_mem_free(mapped_addr, map_sz);
168 } else if (!no_align) {
169 void *map_end, *aligned_end;
170 size_t before_len, after_len;
172 /* when we reserve space with alignment, we add alignment to
173 * mapping size. On 32-bit, if 1GB alignment was requested, this
174 * would waste 1GB of address space, which is a luxury we cannot
175 * afford. so, if alignment was performed, check if any unneeded
176 * address space can be unmapped back.
179 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
180 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
182 /* unmap space before aligned mmap address */
183 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
185 eal_mem_free(mapped_addr, before_len);
187 /* unmap space after aligned end mmap address */
188 after_len = RTE_PTR_DIFF(map_end, aligned_end);
190 eal_mem_free(aligned_end, after_len);
194 /* Exclude these pages from a core dump. */
195 eal_mem_set_dump(aligned_addr, *size, false);
202 eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
203 uint64_t page_sz, int n_segs, int socket_id, bool heap)
205 if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
206 sizeof(struct rte_memseg))) {
207 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
208 rte_strerror(rte_errno));
212 msl->page_sz = page_sz;
213 msl->socket_id = socket_id;
218 "Memseg list allocated at socket %i, page size 0x%"PRIx64"kB\n",
219 socket_id, page_sz >> 10);
225 eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
226 int n_segs, int socket_id, int type_msl_idx, bool heap)
228 char name[RTE_FBARRAY_NAME_LEN];
230 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
233 return eal_memseg_list_init_named(
234 msl, name, page_sz, n_segs, socket_id, heap);
238 eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
240 size_t page_sz, mem_sz;
243 page_sz = msl->page_sz;
244 mem_sz = page_sz * msl->memseg_arr.len;
246 addr = eal_get_virtual_area(
247 msl->base_va, &mem_sz, page_sz, 0, reserve_flags);
249 #ifndef RTE_EXEC_ENV_WINDOWS
250 /* The hint would be misleading on Windows, because address
251 * is by default system-selected (base VA = 0).
252 * However, this function is called from many places,
253 * including common code, so don't duplicate the message.
255 if (rte_errno == EADDRNOTAVAIL)
256 RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
257 "please use '--" OPT_BASE_VIRTADDR "' option\n",
258 (unsigned long long)mem_sz, msl->base_va);
265 RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
272 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs)
274 size_t page_sz = msl->page_sz;
277 for (i = 0; i < n_segs; i++) {
278 struct rte_fbarray *arr = &msl->memseg_arr;
279 struct rte_memseg *ms = rte_fbarray_get(arr, i);
281 if (rte_eal_iova_mode() == RTE_IOVA_VA)
282 ms->iova = (uintptr_t)addr;
284 ms->iova = RTE_BAD_IOVA;
286 ms->hugepage_sz = page_sz;
290 rte_fbarray_set_used(arr, i);
292 addr = RTE_PTR_ADD(addr, page_sz);
296 static struct rte_memseg *
297 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
299 const struct rte_fbarray *arr;
306 /* a memseg list was specified, check if it's the right one */
307 start = msl->base_va;
308 end = RTE_PTR_ADD(start, msl->len);
310 if (addr < start || addr >= end)
313 /* now, calculate index */
314 arr = &msl->memseg_arr;
315 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
316 return rte_fbarray_get(arr, ms_idx);
319 static struct rte_memseg_list *
320 virt2memseg_list(const void *addr)
322 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
323 struct rte_memseg_list *msl;
326 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
328 msl = &mcfg->memsegs[msl_idx];
330 start = msl->base_va;
331 end = RTE_PTR_ADD(start, msl->len);
332 if (addr >= start && addr < end)
335 /* if we didn't find our memseg list */
336 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
341 struct rte_memseg_list *
342 rte_mem_virt2memseg_list(const void *addr)
344 return virt2memseg_list(addr);
352 find_virt(const struct rte_memseg_list *msl __rte_unused,
353 const struct rte_memseg *ms, void *arg)
355 struct virtiova *vi = arg;
356 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
357 size_t offset = vi->iova - ms->iova;
358 vi->virt = RTE_PTR_ADD(ms->addr, offset);
365 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
366 const struct rte_memseg *ms, size_t len, void *arg)
368 struct virtiova *vi = arg;
369 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
370 size_t offset = vi->iova - ms->iova;
371 vi->virt = RTE_PTR_ADD(ms->addr, offset);
379 rte_mem_iova2virt(rte_iova_t iova)
382 const struct internal_config *internal_conf =
383 eal_get_internal_configuration();
385 memset(&vi, 0, sizeof(vi));
388 /* for legacy mem, we can get away with scanning VA-contiguous segments,
389 * as we know they are PA-contiguous as well
391 if (internal_conf->legacy_mem)
392 rte_memseg_contig_walk(find_virt_legacy, &vi);
394 rte_memseg_walk(find_virt, &vi);
400 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
402 return virt2memseg(addr, msl != NULL ? msl :
403 rte_mem_virt2memseg_list(addr));
407 physmem_size(const struct rte_memseg_list *msl, void *arg)
409 uint64_t *total_len = arg;
414 *total_len += msl->memseg_arr.count * msl->page_sz;
419 /* get the total size of memory */
421 rte_eal_get_physmem_size(void)
423 uint64_t total_len = 0;
425 rte_memseg_list_walk(physmem_size, &total_len);
431 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
434 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
435 int msl_idx, ms_idx, fd;
438 msl_idx = msl - mcfg->memsegs;
439 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
442 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
446 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
447 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
448 "virt:%p, socket_id:%"PRId32", "
449 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
450 "nrank:%"PRIx32" fd:%i\n",
465 * Defining here because declared in rte_memory.h, but the actual implementation
466 * is in eal_common_memalloc.c, like all other memalloc internals.
469 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
472 const struct internal_config *internal_conf =
473 eal_get_internal_configuration();
475 /* FreeBSD boots with legacy mem enabled by default */
476 if (internal_conf->legacy_mem) {
477 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
481 return eal_memalloc_mem_event_callback_register(name, clb, arg);
485 rte_mem_event_callback_unregister(const char *name, void *arg)
487 const struct internal_config *internal_conf =
488 eal_get_internal_configuration();
490 /* FreeBSD boots with legacy mem enabled by default */
491 if (internal_conf->legacy_mem) {
492 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
496 return eal_memalloc_mem_event_callback_unregister(name, arg);
500 rte_mem_alloc_validator_register(const char *name,
501 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
503 const struct internal_config *internal_conf =
504 eal_get_internal_configuration();
506 /* FreeBSD boots with legacy mem enabled by default */
507 if (internal_conf->legacy_mem) {
508 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
512 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
517 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
519 const struct internal_config *internal_conf =
520 eal_get_internal_configuration();
522 /* FreeBSD boots with legacy mem enabled by default */
523 if (internal_conf->legacy_mem) {
524 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
528 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
531 /* Dump the physical memory layout on console */
533 rte_dump_physmem_layout(FILE *f)
535 rte_memseg_walk(dump_memseg, f);
539 check_iova(const struct rte_memseg_list *msl __rte_unused,
540 const struct rte_memseg *ms, void *arg)
542 uint64_t *mask = arg;
545 /* higher address within segment */
546 iova = (ms->iova + ms->len) - 1;
550 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
553 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
557 #define MAX_DMA_MASK_BITS 63
559 /* check memseg iovas are within the required range based on dma mask */
561 check_dma_mask(uint8_t maskbits, bool thread_unsafe)
563 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
567 /* Sanity check. We only check width can be managed with 64 bits
568 * variables. Indeed any higher value is likely wrong. */
569 if (maskbits > MAX_DMA_MASK_BITS) {
570 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
571 maskbits, MAX_DMA_MASK_BITS);
575 /* create dma mask */
576 mask = ~((1ULL << maskbits) - 1);
579 ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
581 ret = rte_memseg_walk(check_iova, &mask);
585 * Dma mask precludes hugepage usage.
586 * This device can not be used and we do not need to keep
592 * we need to keep the more restricted maskbit for checking
593 * potential dynamic memory allocation in the future.
595 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
596 RTE_MIN(mcfg->dma_maskbits, maskbits);
602 rte_mem_check_dma_mask(uint8_t maskbits)
604 return check_dma_mask(maskbits, false);
608 rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
610 return check_dma_mask(maskbits, true);
614 * Set dma mask to use when memory initialization is done.
616 * This function should ONLY be used by code executed before the memory
617 * initialization. PMDs should use rte_mem_check_dma_mask if addressing
618 * limitations by the device.
621 rte_mem_set_dma_mask(uint8_t maskbits)
623 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
625 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
626 RTE_MIN(mcfg->dma_maskbits, maskbits);
629 /* return the number of memory channels */
630 unsigned rte_memory_get_nchannel(void)
632 return rte_eal_get_configuration()->mem_config->nchannel;
635 /* return the number of memory rank */
636 unsigned rte_memory_get_nrank(void)
638 return rte_eal_get_configuration()->mem_config->nrank;
642 rte_eal_memdevice_init(void)
644 struct rte_config *config;
645 const struct internal_config *internal_conf;
647 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
650 internal_conf = eal_get_internal_configuration();
651 config = rte_eal_get_configuration();
652 config->mem_config->nchannel = internal_conf->force_nchannel;
653 config->mem_config->nrank = internal_conf->force_nrank;
658 /* Lock page in physical memory and prevent from swapping. */
660 rte_mem_lock_page(const void *virt)
662 uintptr_t virtual = (uintptr_t)virt;
663 size_t page_size = rte_mem_page_size();
664 uintptr_t aligned = RTE_PTR_ALIGN_FLOOR(virtual, page_size);
665 return rte_mem_lock((void *)aligned, page_size);
669 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
671 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
672 int i, ms_idx, ret = 0;
674 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
675 struct rte_memseg_list *msl = &mcfg->memsegs[i];
676 const struct rte_memseg *ms;
677 struct rte_fbarray *arr;
679 if (msl->memseg_arr.count == 0)
682 arr = &msl->memseg_arr;
684 ms_idx = rte_fbarray_find_next_used(arr, 0);
685 while (ms_idx >= 0) {
689 ms = rte_fbarray_get(arr, ms_idx);
691 /* find how many more segments there are, starting with
694 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
695 len = n_segs * msl->page_sz;
697 ret = func(msl, ms, len, arg);
700 ms_idx = rte_fbarray_find_next_used(arr,
708 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
712 /* do not allow allocations/frees/init while we iterate */
713 rte_mcfg_mem_read_lock();
714 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
715 rte_mcfg_mem_read_unlock();
721 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
723 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
724 int i, ms_idx, ret = 0;
726 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
727 struct rte_memseg_list *msl = &mcfg->memsegs[i];
728 const struct rte_memseg *ms;
729 struct rte_fbarray *arr;
731 if (msl->memseg_arr.count == 0)
734 arr = &msl->memseg_arr;
736 ms_idx = rte_fbarray_find_next_used(arr, 0);
737 while (ms_idx >= 0) {
738 ms = rte_fbarray_get(arr, ms_idx);
739 ret = func(msl, ms, arg);
742 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
749 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
753 /* do not allow allocations/frees/init while we iterate */
754 rte_mcfg_mem_read_lock();
755 ret = rte_memseg_walk_thread_unsafe(func, arg);
756 rte_mcfg_mem_read_unlock();
762 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
764 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
767 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
768 struct rte_memseg_list *msl = &mcfg->memsegs[i];
770 if (msl->base_va == NULL)
773 ret = func(msl, arg);
781 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
785 /* do not allow allocations/frees/init while we iterate */
786 rte_mcfg_mem_read_lock();
787 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
788 rte_mcfg_mem_read_unlock();
794 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
796 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
797 struct rte_memseg_list *msl;
798 struct rte_fbarray *arr;
799 int msl_idx, seg_idx, ret;
806 msl = rte_mem_virt2memseg_list(ms->addr);
811 arr = &msl->memseg_arr;
813 msl_idx = msl - mcfg->memsegs;
814 seg_idx = rte_fbarray_find_idx(arr, ms);
816 if (!rte_fbarray_is_used(arr, seg_idx)) {
821 /* segment fd API is not supported for external segments */
827 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
836 rte_memseg_get_fd(const struct rte_memseg *ms)
840 rte_mcfg_mem_read_lock();
841 ret = rte_memseg_get_fd_thread_unsafe(ms);
842 rte_mcfg_mem_read_unlock();
848 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
851 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
852 struct rte_memseg_list *msl;
853 struct rte_fbarray *arr;
854 int msl_idx, seg_idx, ret;
856 if (ms == NULL || offset == NULL) {
861 msl = rte_mem_virt2memseg_list(ms->addr);
866 arr = &msl->memseg_arr;
868 msl_idx = msl - mcfg->memsegs;
869 seg_idx = rte_fbarray_find_idx(arr, ms);
871 if (!rte_fbarray_is_used(arr, seg_idx)) {
876 /* segment fd API is not supported for external segments */
882 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
891 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
895 rte_mcfg_mem_read_lock();
896 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
897 rte_mcfg_mem_read_unlock();
903 rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
904 unsigned int n_pages, size_t page_sz)
906 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
907 unsigned int socket_id, n;
910 if (va_addr == NULL || page_sz == 0 || len == 0 ||
911 !rte_is_power_of_2(page_sz) ||
912 RTE_ALIGN(len, page_sz) != len ||
913 ((len / page_sz) != n_pages && iova_addrs != NULL) ||
914 !rte_is_aligned(va_addr, page_sz)) {
918 rte_mcfg_mem_write_lock();
920 /* make sure the segment doesn't already exist */
921 if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
927 /* get next available socket ID */
928 socket_id = mcfg->next_socket_id;
929 if (socket_id > INT32_MAX) {
930 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
936 /* we can create a new memseg */
938 if (malloc_heap_create_external_seg(va_addr, iova_addrs, n,
939 page_sz, "extmem", socket_id) == NULL) {
944 /* memseg list successfully created - increment next socket ID */
945 mcfg->next_socket_id++;
947 rte_mcfg_mem_write_unlock();
952 rte_extmem_unregister(void *va_addr, size_t len)
954 struct rte_memseg_list *msl;
957 if (va_addr == NULL || len == 0) {
961 rte_mcfg_mem_write_lock();
963 /* find our segment */
964 msl = malloc_heap_find_external_seg(va_addr, len);
971 ret = malloc_heap_destroy_external_seg(msl);
973 rte_mcfg_mem_write_unlock();
978 sync_memory(void *va_addr, size_t len, bool attach)
980 struct rte_memseg_list *msl;
983 if (va_addr == NULL || len == 0) {
987 rte_mcfg_mem_write_lock();
989 /* find our segment */
990 msl = malloc_heap_find_external_seg(va_addr, len);
997 ret = rte_fbarray_attach(&msl->memseg_arr);
999 ret = rte_fbarray_detach(&msl->memseg_arr);
1002 rte_mcfg_mem_write_unlock();
1007 rte_extmem_attach(void *va_addr, size_t len)
1009 return sync_memory(va_addr, len, true);
1013 rte_extmem_detach(void *va_addr, size_t len)
1015 return sync_memory(va_addr, len, false);
1018 /* detach all EAL memory */
1020 rte_eal_memory_detach(void)
1022 const struct internal_config *internal_conf =
1023 eal_get_internal_configuration();
1024 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1025 size_t page_sz = rte_mem_page_size();
1028 if (internal_conf->in_memory == 1)
1031 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
1033 /* detach internal memory subsystem data first */
1034 if (eal_memalloc_cleanup())
1035 RTE_LOG(ERR, EAL, "Could not release memory subsystem data\n");
1037 for (i = 0; i < RTE_DIM(mcfg->memsegs); i++) {
1038 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1040 /* skip uninitialized segments */
1041 if (msl->base_va == NULL)
1044 * external segments are supposed to be detached at this point,
1045 * but if they aren't, we can't really do anything about it,
1046 * because if we skip them here, they'll become invalid after
1047 * we unmap the memconfig anyway. however, if this is externally
1048 * referenced memory, we have no business unmapping it.
1051 if (rte_mem_unmap(msl->base_va, msl->len) != 0)
1052 RTE_LOG(ERR, EAL, "Could not unmap memory: %s\n",
1053 rte_strerror(rte_errno));
1056 * we are detaching the fbarray rather than destroying because
1057 * other processes might still reference this fbarray, and we
1058 * have no way of knowing if they still do.
1060 if (rte_fbarray_detach(&msl->memseg_arr))
1061 RTE_LOG(ERR, EAL, "Could not detach fbarray: %s\n",
1062 rte_strerror(rte_errno));
1064 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
1067 * we've detached the memseg lists, so we can unmap the shared mem
1068 * config - we can't zero it out because it might still be referenced
1069 * by other processes.
1071 if (internal_conf->no_shconf == 0 && mcfg->mem_cfg_addr != 0) {
1072 if (rte_mem_unmap(mcfg, RTE_ALIGN(sizeof(*mcfg), page_sz)) != 0)
1073 RTE_LOG(ERR, EAL, "Could not unmap shared memory config: %s\n",
1074 rte_strerror(rte_errno));
1076 rte_eal_get_configuration()->mem_config = NULL;
1081 /* init memory subsystem */
1083 rte_eal_memory_init(void)
1085 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1086 const struct internal_config *internal_conf =
1087 eal_get_internal_configuration();
1090 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
1095 /* lock mem hotplug here, to prevent races while we init */
1096 rte_mcfg_mem_read_lock();
1098 if (rte_eal_memseg_init() < 0)
1101 if (eal_memalloc_init() < 0)
1104 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
1105 rte_eal_hugepage_init() :
1106 rte_eal_hugepage_attach();
1110 if (internal_conf->no_shconf == 0 && rte_eal_memdevice_init() < 0)
1115 rte_mcfg_mem_read_unlock();
1119 #ifndef RTE_EXEC_ENV_WINDOWS
1120 #define EAL_MEMZONE_LIST_REQ "/eal/memzone_list"
1121 #define EAL_MEMZONE_INFO_REQ "/eal/memzone_info"
1122 #define EAL_HEAP_LIST_REQ "/eal/heap_list"
1123 #define EAL_HEAP_INFO_REQ "/eal/heap_info"
1126 /* Telemetry callback handler to return heap stats for requested heap id. */
1128 handle_eal_heap_info_request(const char *cmd __rte_unused, const char *params,
1129 struct rte_tel_data *d)
1131 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1132 struct rte_malloc_socket_stats sock_stats;
1133 struct malloc_heap *heap;
1134 unsigned int heap_id;
1136 if (params == NULL || strlen(params) == 0)
1139 heap_id = (unsigned int)strtoul(params, NULL, 10);
1141 /* Get the heap stats of user provided heap id */
1142 heap = &mcfg->malloc_heaps[heap_id];
1143 malloc_heap_get_stats(heap, &sock_stats);
1145 rte_tel_data_start_dict(d);
1146 rte_tel_data_add_dict_int(d, "Head id", heap_id);
1147 rte_tel_data_add_dict_string(d, "Name", heap->name);
1148 rte_tel_data_add_dict_u64(d, "Heap_size",
1149 sock_stats.heap_totalsz_bytes);
1150 rte_tel_data_add_dict_u64(d, "Free_size", sock_stats.heap_freesz_bytes);
1151 rte_tel_data_add_dict_u64(d, "Alloc_size",
1152 sock_stats.heap_allocsz_bytes);
1153 rte_tel_data_add_dict_u64(d, "Greatest_free_size",
1154 sock_stats.greatest_free_size);
1155 rte_tel_data_add_dict_u64(d, "Alloc_count", sock_stats.alloc_count);
1156 rte_tel_data_add_dict_u64(d, "Free_count", sock_stats.free_count);
1161 /* Telemetry callback handler to list the heap ids setup. */
1163 handle_eal_heap_list_request(const char *cmd __rte_unused,
1164 const char *params __rte_unused,
1165 struct rte_tel_data *d)
1167 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1168 struct rte_malloc_socket_stats sock_stats;
1169 unsigned int heap_id;
1171 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1172 /* Iterate through all initialised heaps */
1173 for (heap_id = 0; heap_id < RTE_MAX_HEAPS; heap_id++) {
1174 struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
1176 malloc_heap_get_stats(heap, &sock_stats);
1177 if (sock_stats.heap_totalsz_bytes != 0)
1178 rte_tel_data_add_array_int(d, heap_id);
1184 /* Telemetry callback handler to return memzone info for requested index. */
1186 handle_eal_memzone_info_request(const char *cmd __rte_unused,
1187 const char *params, struct rte_tel_data *d)
1189 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1190 struct rte_memseg_list *msl = NULL;
1191 int ms_idx, ms_count = 0;
1192 void *cur_addr, *mz_end;
1193 struct rte_memzone *mz;
1194 struct rte_memseg *ms;
1195 char addr[ADDR_STR];
1196 unsigned int mz_idx;
1199 if (params == NULL || strlen(params) == 0)
1202 mz_idx = strtoul(params, NULL, 10);
1204 /* Get the memzone handle using index */
1205 mz = rte_fbarray_get(&mcfg->memzones, mz_idx);
1207 rte_tel_data_start_dict(d);
1208 rte_tel_data_add_dict_int(d, "Zone", mz_idx);
1209 rte_tel_data_add_dict_string(d, "Name", mz->name);
1210 rte_tel_data_add_dict_int(d, "Length", mz->len);
1211 snprintf(addr, ADDR_STR, "%p", mz->addr);
1212 rte_tel_data_add_dict_string(d, "Address", addr);
1213 rte_tel_data_add_dict_int(d, "Socket", mz->socket_id);
1214 rte_tel_data_add_dict_int(d, "Flags", mz->flags);
1216 /* go through each page occupied by this memzone */
1217 msl = rte_mem_virt2memseg_list(mz->addr);
1219 RTE_LOG(DEBUG, EAL, "Skipping bad memzone\n");
1222 page_sz = (size_t)mz->hugepage_sz;
1223 cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz);
1224 mz_end = RTE_PTR_ADD(cur_addr, mz->len);
1226 ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz;
1227 ms = rte_fbarray_get(&msl->memseg_arr, ms_idx);
1229 rte_tel_data_add_dict_int(d, "Hugepage_size", page_sz);
1230 snprintf(addr, ADDR_STR, "%p", ms->addr);
1231 rte_tel_data_add_dict_string(d, "Hugepage_base", addr);
1234 /* advance VA to next page */
1235 cur_addr = RTE_PTR_ADD(cur_addr, page_sz);
1237 /* memzones occupy contiguous segments */
1240 } while (cur_addr < mz_end);
1242 rte_tel_data_add_dict_int(d, "Hugepage_used", ms_count);
1248 memzone_list_cb(const struct rte_memzone *mz __rte_unused,
1249 void *arg __rte_unused)
1251 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1252 struct rte_tel_data *d = arg;
1255 mz_idx = rte_fbarray_find_idx(&mcfg->memzones, mz);
1256 rte_tel_data_add_array_int(d, mz_idx);
1260 /* Telemetry callback handler to list the memzones reserved. */
1262 handle_eal_memzone_list_request(const char *cmd __rte_unused,
1263 const char *params __rte_unused,
1264 struct rte_tel_data *d)
1266 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1267 rte_memzone_walk(memzone_list_cb, d);
1272 RTE_INIT(memory_telemetry)
1274 rte_telemetry_register_cmd(
1275 EAL_MEMZONE_LIST_REQ, handle_eal_memzone_list_request,
1276 "List of memzone index reserved. Takes no parameters");
1277 rte_telemetry_register_cmd(
1278 EAL_MEMZONE_INFO_REQ, handle_eal_memzone_info_request,
1279 "Returns memzone info. Parameters: int mz_id");
1280 rte_telemetry_register_cmd(
1281 EAL_HEAP_LIST_REQ, handle_eal_heap_list_request,
1282 "List of heap index setup. Takes no parameters");
1283 rte_telemetry_register_cmd(
1284 EAL_HEAP_INFO_REQ, handle_eal_heap_info_request,
1285 "Returns malloc heap stats. Parameters: int heap_id");