1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/queue.h>
17 #include <rte_fbarray.h>
18 #include <rte_memory.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_errno.h>
24 #include "eal_memalloc.h"
25 #include "eal_private.h"
26 #include "eal_internal_cfg.h"
27 #include "eal_memcfg.h"
28 #include "malloc_heap.h"
31 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
32 * pointer to the mmap'd area and keep *size unmodified. Else, retry
33 * with a smaller zone: decrease *size by hugepage_sz until it reaches
34 * 0. In this case, return NULL. Note: this function returns an address
35 * which is a multiple of hugepage size.
38 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
40 static void *next_baseaddr;
41 static uint64_t system_page_sz;
45 * Linux kernel uses a really high address as starting address for serving
46 * mmaps calls. If there exists addressing limitations and IOVA mode is VA,
47 * this starting address is likely too high for those devices. However, it
48 * is possible to use a lower address in the process virtual address space
49 * as with 64 bits there is a lot of available space.
51 * Current known limitations are 39 or 40 bits. Setting the starting address
52 * at 4GB implies there are 508GB or 1020GB for mapping the available
53 * hugepages. This is likely enough for most systems, although a device with
54 * addressing limitations should call rte_mem_check_dma_mask for ensuring all
55 * memory is within supported range.
57 static uint64_t baseaddr = 0x100000000;
60 #define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
62 eal_get_virtual_area(void *requested_addr, size_t *size,
63 size_t page_sz, int flags, int mmap_flags)
65 bool addr_is_hint, allow_shrink, unmap, no_align;
67 void *mapped_addr, *aligned_addr;
70 if (system_page_sz == 0)
71 system_page_sz = sysconf(_SC_PAGESIZE);
73 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
75 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
77 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
78 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
79 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
81 if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
82 rte_eal_process_type() == RTE_PROC_PRIMARY)
83 next_baseaddr = (void *) internal_config.base_virtaddr;
86 if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 &&
87 rte_eal_process_type() == RTE_PROC_PRIMARY)
88 next_baseaddr = (void *) baseaddr;
90 if (requested_addr == NULL && next_baseaddr != NULL) {
91 requested_addr = next_baseaddr;
92 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
96 /* we don't need alignment of resulting pointer in the following cases:
98 * 1. page size is equal to system size
99 * 2. we have a requested address, and it is page-aligned, and we will
100 * be discarding the address if we get a different one.
102 * for all other cases, alignment is potentially necessary.
104 no_align = (requested_addr != NULL &&
105 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
107 page_sz == system_page_sz;
110 map_sz = no_align ? *size : *size + page_sz;
111 if (map_sz > SIZE_MAX) {
112 RTE_LOG(ERR, EAL, "Map size too big\n");
117 mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
119 if (mapped_addr == MAP_FAILED && allow_shrink)
122 if (mapped_addr != MAP_FAILED && addr_is_hint &&
123 mapped_addr != requested_addr) {
125 next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
126 if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) {
127 /* hint was not used. Try with another offset */
128 munmap(mapped_addr, map_sz);
129 mapped_addr = MAP_FAILED;
130 requested_addr = next_baseaddr;
133 } while ((allow_shrink || addr_is_hint) &&
134 mapped_addr == MAP_FAILED && *size > 0);
136 /* align resulting address - if map failed, we will ignore the value
137 * anyway, so no need to add additional checks.
139 aligned_addr = no_align ? mapped_addr :
140 RTE_PTR_ALIGN(mapped_addr, page_sz);
143 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
147 } else if (mapped_addr == MAP_FAILED) {
148 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
150 /* pass errno up the call chain */
153 } else if (requested_addr != NULL && !addr_is_hint &&
154 aligned_addr != requested_addr) {
155 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
156 requested_addr, aligned_addr);
157 munmap(mapped_addr, map_sz);
158 rte_errno = EADDRNOTAVAIL;
160 } else if (requested_addr != NULL && addr_is_hint &&
161 aligned_addr != requested_addr) {
162 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
163 requested_addr, aligned_addr);
164 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
165 } else if (next_baseaddr != NULL) {
166 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
169 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
170 aligned_addr, *size);
173 munmap(mapped_addr, map_sz);
174 } else if (!no_align) {
175 void *map_end, *aligned_end;
176 size_t before_len, after_len;
178 /* when we reserve space with alignment, we add alignment to
179 * mapping size. On 32-bit, if 1GB alignment was requested, this
180 * would waste 1GB of address space, which is a luxury we cannot
181 * afford. so, if alignment was performed, check if any unneeded
182 * address space can be unmapped back.
185 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
186 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
188 /* unmap space before aligned mmap address */
189 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
191 munmap(mapped_addr, before_len);
193 /* unmap space after aligned end mmap address */
194 after_len = RTE_PTR_DIFF(map_end, aligned_end);
196 munmap(aligned_end, after_len);
202 static struct rte_memseg *
203 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
205 const struct rte_fbarray *arr;
212 /* a memseg list was specified, check if it's the right one */
213 start = msl->base_va;
214 end = RTE_PTR_ADD(start, msl->len);
216 if (addr < start || addr >= end)
219 /* now, calculate index */
220 arr = &msl->memseg_arr;
221 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
222 return rte_fbarray_get(arr, ms_idx);
225 static struct rte_memseg_list *
226 virt2memseg_list(const void *addr)
228 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
229 struct rte_memseg_list *msl;
232 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
234 msl = &mcfg->memsegs[msl_idx];
236 start = msl->base_va;
237 end = RTE_PTR_ADD(start, msl->len);
238 if (addr >= start && addr < end)
241 /* if we didn't find our memseg list */
242 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
247 struct rte_memseg_list *
248 rte_mem_virt2memseg_list(const void *addr)
250 return virt2memseg_list(addr);
258 find_virt(const struct rte_memseg_list *msl __rte_unused,
259 const struct rte_memseg *ms, void *arg)
261 struct virtiova *vi = arg;
262 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
263 size_t offset = vi->iova - ms->iova;
264 vi->virt = RTE_PTR_ADD(ms->addr, offset);
271 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
272 const struct rte_memseg *ms, size_t len, void *arg)
274 struct virtiova *vi = arg;
275 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
276 size_t offset = vi->iova - ms->iova;
277 vi->virt = RTE_PTR_ADD(ms->addr, offset);
285 rte_mem_iova2virt(rte_iova_t iova)
289 memset(&vi, 0, sizeof(vi));
292 /* for legacy mem, we can get away with scanning VA-contiguous segments,
293 * as we know they are PA-contiguous as well
295 if (internal_config.legacy_mem)
296 rte_memseg_contig_walk(find_virt_legacy, &vi);
298 rte_memseg_walk(find_virt, &vi);
304 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
306 return virt2memseg(addr, msl != NULL ? msl :
307 rte_mem_virt2memseg_list(addr));
311 physmem_size(const struct rte_memseg_list *msl, void *arg)
313 uint64_t *total_len = arg;
318 *total_len += msl->memseg_arr.count * msl->page_sz;
323 /* get the total size of memory */
325 rte_eal_get_physmem_size(void)
327 uint64_t total_len = 0;
329 rte_memseg_list_walk(physmem_size, &total_len);
335 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
338 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
339 int msl_idx, ms_idx, fd;
342 msl_idx = msl - mcfg->memsegs;
343 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
346 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
350 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
351 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
352 "virt:%p, socket_id:%"PRId32", "
353 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
354 "nrank:%"PRIx32" fd:%i\n",
369 * Defining here because declared in rte_memory.h, but the actual implementation
370 * is in eal_common_memalloc.c, like all other memalloc internals.
373 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
376 /* FreeBSD boots with legacy mem enabled by default */
377 if (internal_config.legacy_mem) {
378 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
382 return eal_memalloc_mem_event_callback_register(name, clb, arg);
386 rte_mem_event_callback_unregister(const char *name, void *arg)
388 /* FreeBSD boots with legacy mem enabled by default */
389 if (internal_config.legacy_mem) {
390 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
394 return eal_memalloc_mem_event_callback_unregister(name, arg);
398 rte_mem_alloc_validator_register(const char *name,
399 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
401 /* FreeBSD boots with legacy mem enabled by default */
402 if (internal_config.legacy_mem) {
403 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
407 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
412 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
414 /* FreeBSD boots with legacy mem enabled by default */
415 if (internal_config.legacy_mem) {
416 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
420 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
423 /* Dump the physical memory layout on console */
425 rte_dump_physmem_layout(FILE *f)
427 rte_memseg_walk(dump_memseg, f);
431 check_iova(const struct rte_memseg_list *msl __rte_unused,
432 const struct rte_memseg *ms, void *arg)
434 uint64_t *mask = arg;
437 /* higher address within segment */
438 iova = (ms->iova + ms->len) - 1;
442 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
445 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
449 #define MAX_DMA_MASK_BITS 63
451 /* check memseg iovas are within the required range based on dma mask */
453 check_dma_mask(uint8_t maskbits, bool thread_unsafe)
455 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
459 /* Sanity check. We only check width can be managed with 64 bits
460 * variables. Indeed any higher value is likely wrong. */
461 if (maskbits > MAX_DMA_MASK_BITS) {
462 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
463 maskbits, MAX_DMA_MASK_BITS);
467 /* create dma mask */
468 mask = ~((1ULL << maskbits) - 1);
471 ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
473 ret = rte_memseg_walk(check_iova, &mask);
477 * Dma mask precludes hugepage usage.
478 * This device can not be used and we do not need to keep
484 * we need to keep the more restricted maskbit for checking
485 * potential dynamic memory allocation in the future.
487 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
488 RTE_MIN(mcfg->dma_maskbits, maskbits);
494 rte_mem_check_dma_mask(uint8_t maskbits)
496 return check_dma_mask(maskbits, false);
500 rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
502 return check_dma_mask(maskbits, true);
506 * Set dma mask to use when memory initialization is done.
508 * This function should ONLY be used by code executed before the memory
509 * initialization. PMDs should use rte_mem_check_dma_mask if addressing
510 * limitations by the device.
513 rte_mem_set_dma_mask(uint8_t maskbits)
515 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
517 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
518 RTE_MIN(mcfg->dma_maskbits, maskbits);
521 /* return the number of memory channels */
522 unsigned rte_memory_get_nchannel(void)
524 return rte_eal_get_configuration()->mem_config->nchannel;
527 /* return the number of memory rank */
528 unsigned rte_memory_get_nrank(void)
530 return rte_eal_get_configuration()->mem_config->nrank;
534 rte_eal_memdevice_init(void)
536 struct rte_config *config;
538 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
541 config = rte_eal_get_configuration();
542 config->mem_config->nchannel = internal_config.force_nchannel;
543 config->mem_config->nrank = internal_config.force_nrank;
548 /* Lock page in physical memory and prevent from swapping. */
550 rte_mem_lock_page(const void *virt)
552 unsigned long virtual = (unsigned long)virt;
553 int page_size = getpagesize();
554 unsigned long aligned = (virtual & ~(page_size - 1));
555 return mlock((void *)aligned, page_size);
559 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
561 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
562 int i, ms_idx, ret = 0;
564 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
565 struct rte_memseg_list *msl = &mcfg->memsegs[i];
566 const struct rte_memseg *ms;
567 struct rte_fbarray *arr;
569 if (msl->memseg_arr.count == 0)
572 arr = &msl->memseg_arr;
574 ms_idx = rte_fbarray_find_next_used(arr, 0);
575 while (ms_idx >= 0) {
579 ms = rte_fbarray_get(arr, ms_idx);
581 /* find how many more segments there are, starting with
584 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
585 len = n_segs * msl->page_sz;
587 ret = func(msl, ms, len, arg);
590 ms_idx = rte_fbarray_find_next_used(arr,
598 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
602 /* do not allow allocations/frees/init while we iterate */
603 rte_mcfg_mem_read_lock();
604 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
605 rte_mcfg_mem_read_unlock();
611 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
613 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
614 int i, ms_idx, ret = 0;
616 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
617 struct rte_memseg_list *msl = &mcfg->memsegs[i];
618 const struct rte_memseg *ms;
619 struct rte_fbarray *arr;
621 if (msl->memseg_arr.count == 0)
624 arr = &msl->memseg_arr;
626 ms_idx = rte_fbarray_find_next_used(arr, 0);
627 while (ms_idx >= 0) {
628 ms = rte_fbarray_get(arr, ms_idx);
629 ret = func(msl, ms, arg);
632 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
639 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
643 /* do not allow allocations/frees/init while we iterate */
644 rte_mcfg_mem_read_lock();
645 ret = rte_memseg_walk_thread_unsafe(func, arg);
646 rte_mcfg_mem_read_unlock();
652 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
654 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
657 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
658 struct rte_memseg_list *msl = &mcfg->memsegs[i];
660 if (msl->base_va == NULL)
663 ret = func(msl, arg);
671 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
675 /* do not allow allocations/frees/init while we iterate */
676 rte_mcfg_mem_read_lock();
677 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
678 rte_mcfg_mem_read_unlock();
684 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
686 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
687 struct rte_memseg_list *msl;
688 struct rte_fbarray *arr;
689 int msl_idx, seg_idx, ret;
696 msl = rte_mem_virt2memseg_list(ms->addr);
701 arr = &msl->memseg_arr;
703 msl_idx = msl - mcfg->memsegs;
704 seg_idx = rte_fbarray_find_idx(arr, ms);
706 if (!rte_fbarray_is_used(arr, seg_idx)) {
711 /* segment fd API is not supported for external segments */
717 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
726 rte_memseg_get_fd(const struct rte_memseg *ms)
730 rte_mcfg_mem_read_lock();
731 ret = rte_memseg_get_fd_thread_unsafe(ms);
732 rte_mcfg_mem_read_unlock();
738 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
741 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
742 struct rte_memseg_list *msl;
743 struct rte_fbarray *arr;
744 int msl_idx, seg_idx, ret;
746 if (ms == NULL || offset == NULL) {
751 msl = rte_mem_virt2memseg_list(ms->addr);
756 arr = &msl->memseg_arr;
758 msl_idx = msl - mcfg->memsegs;
759 seg_idx = rte_fbarray_find_idx(arr, ms);
761 if (!rte_fbarray_is_used(arr, seg_idx)) {
766 /* segment fd API is not supported for external segments */
772 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
781 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
785 rte_mcfg_mem_read_lock();
786 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
787 rte_mcfg_mem_read_unlock();
793 rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
794 unsigned int n_pages, size_t page_sz)
796 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
797 unsigned int socket_id, n;
800 if (va_addr == NULL || page_sz == 0 || len == 0 ||
801 !rte_is_power_of_2(page_sz) ||
802 RTE_ALIGN(len, page_sz) != len ||
803 ((len / page_sz) != n_pages && iova_addrs != NULL) ||
804 !rte_is_aligned(va_addr, page_sz)) {
808 rte_mcfg_mem_write_lock();
810 /* make sure the segment doesn't already exist */
811 if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
817 /* get next available socket ID */
818 socket_id = mcfg->next_socket_id;
819 if (socket_id > INT32_MAX) {
820 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
826 /* we can create a new memseg */
828 if (malloc_heap_create_external_seg(va_addr, iova_addrs, n,
829 page_sz, "extmem", socket_id) == NULL) {
834 /* memseg list successfully created - increment next socket ID */
835 mcfg->next_socket_id++;
837 rte_mcfg_mem_write_unlock();
842 rte_extmem_unregister(void *va_addr, size_t len)
844 struct rte_memseg_list *msl;
847 if (va_addr == NULL || len == 0) {
851 rte_mcfg_mem_write_lock();
853 /* find our segment */
854 msl = malloc_heap_find_external_seg(va_addr, len);
861 ret = malloc_heap_destroy_external_seg(msl);
863 rte_mcfg_mem_write_unlock();
868 sync_memory(void *va_addr, size_t len, bool attach)
870 struct rte_memseg_list *msl;
873 if (va_addr == NULL || len == 0) {
877 rte_mcfg_mem_write_lock();
879 /* find our segment */
880 msl = malloc_heap_find_external_seg(va_addr, len);
887 ret = rte_fbarray_attach(&msl->memseg_arr);
889 ret = rte_fbarray_detach(&msl->memseg_arr);
892 rte_mcfg_mem_write_unlock();
897 rte_extmem_attach(void *va_addr, size_t len)
899 return sync_memory(va_addr, len, true);
903 rte_extmem_detach(void *va_addr, size_t len)
905 return sync_memory(va_addr, len, false);
908 /* init memory subsystem */
910 rte_eal_memory_init(void)
912 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
914 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
919 /* lock mem hotplug here, to prevent races while we init */
920 rte_mcfg_mem_read_lock();
922 if (rte_eal_memseg_init() < 0)
925 if (eal_memalloc_init() < 0)
928 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
929 rte_eal_hugepage_init() :
930 rte_eal_hugepage_attach();
934 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
939 rte_mcfg_mem_read_unlock();