1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/queue.h>
17 #include <rte_fbarray.h>
18 #include <rte_memory.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_errno.h>
24 #include "eal_memalloc.h"
25 #include "eal_private.h"
26 #include "eal_internal_cfg.h"
29 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
30 * pointer to the mmap'd area and keep *size unmodified. Else, retry
31 * with a smaller zone: decrease *size by hugepage_sz until it reaches
32 * 0. In this case, return NULL. Note: this function returns an address
33 * which is a multiple of hugepage size.
36 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
38 static void *next_baseaddr;
39 static uint64_t system_page_sz;
42 eal_get_virtual_area(void *requested_addr, size_t *size,
43 size_t page_sz, int flags, int mmap_flags)
45 bool addr_is_hint, allow_shrink, unmap, no_align;
47 void *mapped_addr, *aligned_addr;
49 if (system_page_sz == 0)
50 system_page_sz = sysconf(_SC_PAGESIZE);
52 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
54 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
56 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
57 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
58 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
60 if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
61 rte_eal_process_type() == RTE_PROC_PRIMARY)
62 next_baseaddr = (void *) internal_config.base_virtaddr;
64 if (requested_addr == NULL && next_baseaddr != NULL) {
65 requested_addr = next_baseaddr;
66 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
70 /* we don't need alignment of resulting pointer in the following cases:
72 * 1. page size is equal to system size
73 * 2. we have a requested address, and it is page-aligned, and we will
74 * be discarding the address if we get a different one.
76 * for all other cases, alignment is potentially necessary.
78 no_align = (requested_addr != NULL &&
79 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
81 page_sz == system_page_sz;
84 map_sz = no_align ? *size : *size + page_sz;
85 if (map_sz > SIZE_MAX) {
86 RTE_LOG(ERR, EAL, "Map size too big\n");
91 mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
93 if (mapped_addr == MAP_FAILED && allow_shrink)
95 } while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);
97 /* align resulting address - if map failed, we will ignore the value
98 * anyway, so no need to add additional checks.
100 aligned_addr = no_align ? mapped_addr :
101 RTE_PTR_ALIGN(mapped_addr, page_sz);
104 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
108 } else if (mapped_addr == MAP_FAILED) {
109 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
111 /* pass errno up the call chain */
114 } else if (requested_addr != NULL && !addr_is_hint &&
115 aligned_addr != requested_addr) {
116 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
117 requested_addr, aligned_addr);
118 munmap(mapped_addr, map_sz);
119 rte_errno = EADDRNOTAVAIL;
121 } else if (requested_addr != NULL && addr_is_hint &&
122 aligned_addr != requested_addr) {
123 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
124 requested_addr, aligned_addr);
125 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
126 } else if (next_baseaddr != NULL) {
127 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
130 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
131 aligned_addr, *size);
134 munmap(mapped_addr, map_sz);
135 } else if (!no_align) {
136 void *map_end, *aligned_end;
137 size_t before_len, after_len;
139 /* when we reserve space with alignment, we add alignment to
140 * mapping size. On 32-bit, if 1GB alignment was requested, this
141 * would waste 1GB of address space, which is a luxury we cannot
142 * afford. so, if alignment was performed, check if any unneeded
143 * address space can be unmapped back.
146 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
147 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
149 /* unmap space before aligned mmap address */
150 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
152 munmap(mapped_addr, before_len);
154 /* unmap space after aligned end mmap address */
155 after_len = RTE_PTR_DIFF(map_end, aligned_end);
157 munmap(aligned_end, after_len);
163 static struct rte_memseg *
164 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
166 const struct rte_fbarray *arr;
173 /* a memseg list was specified, check if it's the right one */
174 start = msl->base_va;
175 end = RTE_PTR_ADD(start, msl->len);
177 if (addr < start || addr >= end)
180 /* now, calculate index */
181 arr = &msl->memseg_arr;
182 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
183 return rte_fbarray_get(arr, ms_idx);
186 static struct rte_memseg_list *
187 virt2memseg_list(const void *addr)
189 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
190 struct rte_memseg_list *msl;
193 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
195 msl = &mcfg->memsegs[msl_idx];
197 start = msl->base_va;
198 end = RTE_PTR_ADD(start, msl->len);
199 if (addr >= start && addr < end)
202 /* if we didn't find our memseg list */
203 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
208 __rte_experimental struct rte_memseg_list *
209 rte_mem_virt2memseg_list(const void *addr)
211 return virt2memseg_list(addr);
219 find_virt(const struct rte_memseg_list *msl __rte_unused,
220 const struct rte_memseg *ms, void *arg)
222 struct virtiova *vi = arg;
223 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
224 size_t offset = vi->iova - ms->iova;
225 vi->virt = RTE_PTR_ADD(ms->addr, offset);
232 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
233 const struct rte_memseg *ms, size_t len, void *arg)
235 struct virtiova *vi = arg;
236 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
237 size_t offset = vi->iova - ms->iova;
238 vi->virt = RTE_PTR_ADD(ms->addr, offset);
245 __rte_experimental void *
246 rte_mem_iova2virt(rte_iova_t iova)
250 memset(&vi, 0, sizeof(vi));
253 /* for legacy mem, we can get away with scanning VA-contiguous segments,
254 * as we know they are PA-contiguous as well
256 if (internal_config.legacy_mem)
257 rte_memseg_contig_walk(find_virt_legacy, &vi);
259 rte_memseg_walk(find_virt, &vi);
264 __rte_experimental struct rte_memseg *
265 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
267 return virt2memseg(addr, msl != NULL ? msl :
268 rte_mem_virt2memseg_list(addr));
272 physmem_size(const struct rte_memseg_list *msl, void *arg)
274 uint64_t *total_len = arg;
279 *total_len += msl->memseg_arr.count * msl->page_sz;
284 /* get the total size of memory */
286 rte_eal_get_physmem_size(void)
288 uint64_t total_len = 0;
290 rte_memseg_list_walk(physmem_size, &total_len);
296 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
299 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
300 int msl_idx, ms_idx, fd;
303 msl_idx = msl - mcfg->memsegs;
304 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
307 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
311 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
312 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
313 "virt:%p, socket_id:%"PRId32", "
314 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
315 "nrank:%"PRIx32" fd:%i\n",
330 * Defining here because declared in rte_memory.h, but the actual implementation
331 * is in eal_common_memalloc.c, like all other memalloc internals.
333 int __rte_experimental
334 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
337 /* FreeBSD boots with legacy mem enabled by default */
338 if (internal_config.legacy_mem) {
339 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
343 return eal_memalloc_mem_event_callback_register(name, clb, arg);
346 int __rte_experimental
347 rte_mem_event_callback_unregister(const char *name, void *arg)
349 /* FreeBSD boots with legacy mem enabled by default */
350 if (internal_config.legacy_mem) {
351 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
355 return eal_memalloc_mem_event_callback_unregister(name, arg);
358 int __rte_experimental
359 rte_mem_alloc_validator_register(const char *name,
360 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
362 /* FreeBSD boots with legacy mem enabled by default */
363 if (internal_config.legacy_mem) {
364 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
368 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
372 int __rte_experimental
373 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
375 /* FreeBSD boots with legacy mem enabled by default */
376 if (internal_config.legacy_mem) {
377 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
381 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
384 /* Dump the physical memory layout on console */
386 rte_dump_physmem_layout(FILE *f)
388 rte_memseg_walk(dump_memseg, f);
392 check_iova(const struct rte_memseg_list *msl __rte_unused,
393 const struct rte_memseg *ms, void *arg)
395 uint64_t *mask = arg;
398 /* higher address within segment */
399 iova = (ms->iova + ms->len) - 1;
403 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
406 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
410 #if defined(RTE_ARCH_64)
411 #define MAX_DMA_MASK_BITS 63
413 #define MAX_DMA_MASK_BITS 31
416 /* check memseg iovas are within the required range based on dma mask */
417 int __rte_experimental
418 rte_eal_check_dma_mask(uint8_t maskbits)
420 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
424 if (maskbits > MAX_DMA_MASK_BITS) {
425 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
426 maskbits, MAX_DMA_MASK_BITS);
430 /* create dma mask */
431 mask = ~((1ULL << maskbits) - 1);
433 if (rte_memseg_walk(check_iova, &mask))
435 * Dma mask precludes hugepage usage.
436 * This device can not be used and we do not need to keep
442 * we need to keep the more restricted maskbit for checking
443 * potential dynamic memory allocation in the future.
445 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
446 RTE_MIN(mcfg->dma_maskbits, maskbits);
451 /* return the number of memory channels */
452 unsigned rte_memory_get_nchannel(void)
454 return rte_eal_get_configuration()->mem_config->nchannel;
457 /* return the number of memory rank */
458 unsigned rte_memory_get_nrank(void)
460 return rte_eal_get_configuration()->mem_config->nrank;
464 rte_eal_memdevice_init(void)
466 struct rte_config *config;
468 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
471 config = rte_eal_get_configuration();
472 config->mem_config->nchannel = internal_config.force_nchannel;
473 config->mem_config->nrank = internal_config.force_nrank;
478 /* Lock page in physical memory and prevent from swapping. */
480 rte_mem_lock_page(const void *virt)
482 unsigned long virtual = (unsigned long)virt;
483 int page_size = getpagesize();
484 unsigned long aligned = (virtual & ~(page_size - 1));
485 return mlock((void *)aligned, page_size);
488 int __rte_experimental
489 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
491 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
492 int i, ms_idx, ret = 0;
494 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
495 struct rte_memseg_list *msl = &mcfg->memsegs[i];
496 const struct rte_memseg *ms;
497 struct rte_fbarray *arr;
499 if (msl->memseg_arr.count == 0)
502 arr = &msl->memseg_arr;
504 ms_idx = rte_fbarray_find_next_used(arr, 0);
505 while (ms_idx >= 0) {
509 ms = rte_fbarray_get(arr, ms_idx);
511 /* find how many more segments there are, starting with
514 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
515 len = n_segs * msl->page_sz;
517 ret = func(msl, ms, len, arg);
520 ms_idx = rte_fbarray_find_next_used(arr,
527 int __rte_experimental
528 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
530 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
533 /* do not allow allocations/frees/init while we iterate */
534 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
535 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
536 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
541 int __rte_experimental
542 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
544 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
545 int i, ms_idx, ret = 0;
547 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
548 struct rte_memseg_list *msl = &mcfg->memsegs[i];
549 const struct rte_memseg *ms;
550 struct rte_fbarray *arr;
552 if (msl->memseg_arr.count == 0)
555 arr = &msl->memseg_arr;
557 ms_idx = rte_fbarray_find_next_used(arr, 0);
558 while (ms_idx >= 0) {
559 ms = rte_fbarray_get(arr, ms_idx);
560 ret = func(msl, ms, arg);
563 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
569 int __rte_experimental
570 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
572 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
575 /* do not allow allocations/frees/init while we iterate */
576 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
577 ret = rte_memseg_walk_thread_unsafe(func, arg);
578 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
583 int __rte_experimental
584 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
586 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
589 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
590 struct rte_memseg_list *msl = &mcfg->memsegs[i];
592 if (msl->base_va == NULL)
595 ret = func(msl, arg);
602 int __rte_experimental
603 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
605 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
608 /* do not allow allocations/frees/init while we iterate */
609 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
610 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
611 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
616 int __rte_experimental
617 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
619 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
620 struct rte_memseg_list *msl;
621 struct rte_fbarray *arr;
622 int msl_idx, seg_idx, ret;
629 msl = rte_mem_virt2memseg_list(ms->addr);
634 arr = &msl->memseg_arr;
636 msl_idx = msl - mcfg->memsegs;
637 seg_idx = rte_fbarray_find_idx(arr, ms);
639 if (!rte_fbarray_is_used(arr, seg_idx)) {
644 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
652 int __rte_experimental
653 rte_memseg_get_fd(const struct rte_memseg *ms)
655 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
658 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
659 ret = rte_memseg_get_fd_thread_unsafe(ms);
660 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
665 int __rte_experimental
666 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
669 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
670 struct rte_memseg_list *msl;
671 struct rte_fbarray *arr;
672 int msl_idx, seg_idx, ret;
674 if (ms == NULL || offset == NULL) {
679 msl = rte_mem_virt2memseg_list(ms->addr);
684 arr = &msl->memseg_arr;
686 msl_idx = msl - mcfg->memsegs;
687 seg_idx = rte_fbarray_find_idx(arr, ms);
689 if (!rte_fbarray_is_used(arr, seg_idx)) {
694 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
702 int __rte_experimental
703 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
705 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
708 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
709 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
710 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
715 /* init memory subsystem */
717 rte_eal_memory_init(void)
719 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
721 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
726 /* lock mem hotplug here, to prevent races while we init */
727 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
729 if (rte_eal_memseg_init() < 0)
732 if (eal_memalloc_init() < 0)
735 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
736 rte_eal_hugepage_init() :
737 rte_eal_hugepage_attach();
741 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
746 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);