1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
16 #include <rte_fbarray.h>
17 #include <rte_memory.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_errno.h>
23 #include "eal_memalloc.h"
24 #include "eal_private.h"
25 #include "eal_internal_cfg.h"
28 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
29 * pointer to the mmap'd area and keep *size unmodified. Else, retry
30 * with a smaller zone: decrease *size by hugepage_sz until it reaches
31 * 0. In this case, return NULL. Note: this function returns an address
32 * which is a multiple of hugepage size.
35 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
37 static void *next_baseaddr;
38 static uint64_t system_page_sz;
41 eal_get_virtual_area(void *requested_addr, size_t *size,
42 size_t page_sz, int flags, int mmap_flags)
44 bool addr_is_hint, allow_shrink, unmap, no_align;
46 void *mapped_addr, *aligned_addr;
48 if (system_page_sz == 0)
49 system_page_sz = sysconf(_SC_PAGESIZE);
51 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
53 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
55 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
56 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
57 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
59 if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
60 rte_eal_process_type() == RTE_PROC_PRIMARY)
61 next_baseaddr = (void *) internal_config.base_virtaddr;
63 if (requested_addr == NULL && next_baseaddr != NULL) {
64 requested_addr = next_baseaddr;
65 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
69 /* we don't need alignment of resulting pointer in the following cases:
71 * 1. page size is equal to system size
72 * 2. we have a requested address, and it is page-aligned, and we will
73 * be discarding the address if we get a different one.
75 * for all other cases, alignment is potentially necessary.
77 no_align = (requested_addr != NULL &&
78 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
80 page_sz == system_page_sz;
83 map_sz = no_align ? *size : *size + page_sz;
84 if (map_sz > SIZE_MAX) {
85 RTE_LOG(ERR, EAL, "Map size too big\n");
90 mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
92 if (mapped_addr == MAP_FAILED && allow_shrink)
94 } while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);
96 /* align resulting address - if map failed, we will ignore the value
97 * anyway, so no need to add additional checks.
99 aligned_addr = no_align ? mapped_addr :
100 RTE_PTR_ALIGN(mapped_addr, page_sz);
103 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
107 } else if (mapped_addr == MAP_FAILED) {
108 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
110 /* pass errno up the call chain */
113 } else if (requested_addr != NULL && !addr_is_hint &&
114 aligned_addr != requested_addr) {
115 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
116 requested_addr, aligned_addr);
117 munmap(mapped_addr, map_sz);
118 rte_errno = EADDRNOTAVAIL;
120 } else if (requested_addr != NULL && addr_is_hint &&
121 aligned_addr != requested_addr) {
122 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
123 requested_addr, aligned_addr);
124 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
125 } else if (next_baseaddr != NULL) {
126 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
129 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
130 aligned_addr, *size);
133 munmap(mapped_addr, map_sz);
134 } else if (!no_align) {
135 void *map_end, *aligned_end;
136 size_t before_len, after_len;
138 /* when we reserve space with alignment, we add alignment to
139 * mapping size. On 32-bit, if 1GB alignment was requested, this
140 * would waste 1GB of address space, which is a luxury we cannot
141 * afford. so, if alignment was performed, check if any unneeded
142 * address space can be unmapped back.
145 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
146 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
148 /* unmap space before aligned mmap address */
149 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
151 munmap(mapped_addr, before_len);
153 /* unmap space after aligned end mmap address */
154 after_len = RTE_PTR_DIFF(map_end, aligned_end);
156 munmap(aligned_end, after_len);
162 static struct rte_memseg *
163 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
165 const struct rte_fbarray *arr;
172 /* a memseg list was specified, check if it's the right one */
173 start = msl->base_va;
174 end = RTE_PTR_ADD(start, msl->len);
176 if (addr < start || addr >= end)
179 /* now, calculate index */
180 arr = &msl->memseg_arr;
181 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
182 return rte_fbarray_get(arr, ms_idx);
185 static struct rte_memseg_list *
186 virt2memseg_list(const void *addr)
188 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
189 struct rte_memseg_list *msl;
192 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
194 msl = &mcfg->memsegs[msl_idx];
196 start = msl->base_va;
197 end = RTE_PTR_ADD(start, msl->len);
198 if (addr >= start && addr < end)
201 /* if we didn't find our memseg list */
202 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
207 __rte_experimental struct rte_memseg_list *
208 rte_mem_virt2memseg_list(const void *addr)
210 return virt2memseg_list(addr);
218 find_virt(const struct rte_memseg_list *msl __rte_unused,
219 const struct rte_memseg *ms, void *arg)
221 struct virtiova *vi = arg;
222 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
223 size_t offset = vi->iova - ms->iova;
224 vi->virt = RTE_PTR_ADD(ms->addr, offset);
231 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
232 const struct rte_memseg *ms, size_t len, void *arg)
234 struct virtiova *vi = arg;
235 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
236 size_t offset = vi->iova - ms->iova;
237 vi->virt = RTE_PTR_ADD(ms->addr, offset);
244 __rte_experimental void *
245 rte_mem_iova2virt(rte_iova_t iova)
249 memset(&vi, 0, sizeof(vi));
252 /* for legacy mem, we can get away with scanning VA-contiguous segments,
253 * as we know they are PA-contiguous as well
255 if (internal_config.legacy_mem)
256 rte_memseg_contig_walk(find_virt_legacy, &vi);
258 rte_memseg_walk(find_virt, &vi);
263 __rte_experimental struct rte_memseg *
264 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
266 return virt2memseg(addr, msl != NULL ? msl :
267 rte_mem_virt2memseg_list(addr));
271 physmem_size(const struct rte_memseg_list *msl, void *arg)
273 uint64_t *total_len = arg;
275 *total_len += msl->memseg_arr.count * msl->page_sz;
280 /* get the total size of memory */
282 rte_eal_get_physmem_size(void)
284 uint64_t total_len = 0;
286 rte_memseg_list_walk(physmem_size, &total_len);
292 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
295 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
296 int msl_idx, ms_idx, fd;
299 msl_idx = msl - mcfg->memsegs;
300 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
303 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
307 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
308 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
309 "virt:%p, socket_id:%"PRId32", "
310 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
311 "nrank:%"PRIx32" fd:%i\n",
326 * Defining here because declared in rte_memory.h, but the actual implementation
327 * is in eal_common_memalloc.c, like all other memalloc internals.
329 int __rte_experimental
330 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
333 /* FreeBSD boots with legacy mem enabled by default */
334 if (internal_config.legacy_mem) {
335 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
339 return eal_memalloc_mem_event_callback_register(name, clb, arg);
342 int __rte_experimental
343 rte_mem_event_callback_unregister(const char *name, void *arg)
345 /* FreeBSD boots with legacy mem enabled by default */
346 if (internal_config.legacy_mem) {
347 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
351 return eal_memalloc_mem_event_callback_unregister(name, arg);
354 int __rte_experimental
355 rte_mem_alloc_validator_register(const char *name,
356 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
358 /* FreeBSD boots with legacy mem enabled by default */
359 if (internal_config.legacy_mem) {
360 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
364 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
368 int __rte_experimental
369 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
371 /* FreeBSD boots with legacy mem enabled by default */
372 if (internal_config.legacy_mem) {
373 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
377 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
380 /* Dump the physical memory layout on console */
382 rte_dump_physmem_layout(FILE *f)
384 rte_memseg_walk(dump_memseg, f);
387 /* return the number of memory channels */
388 unsigned rte_memory_get_nchannel(void)
390 return rte_eal_get_configuration()->mem_config->nchannel;
393 /* return the number of memory rank */
394 unsigned rte_memory_get_nrank(void)
396 return rte_eal_get_configuration()->mem_config->nrank;
400 rte_eal_memdevice_init(void)
402 struct rte_config *config;
404 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
407 config = rte_eal_get_configuration();
408 config->mem_config->nchannel = internal_config.force_nchannel;
409 config->mem_config->nrank = internal_config.force_nrank;
414 /* Lock page in physical memory and prevent from swapping. */
416 rte_mem_lock_page(const void *virt)
418 unsigned long virtual = (unsigned long)virt;
419 int page_size = getpagesize();
420 unsigned long aligned = (virtual & ~(page_size - 1));
421 return mlock((void *)aligned, page_size);
424 int __rte_experimental
425 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
427 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
428 int i, ms_idx, ret = 0;
430 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
431 struct rte_memseg_list *msl = &mcfg->memsegs[i];
432 const struct rte_memseg *ms;
433 struct rte_fbarray *arr;
435 if (msl->memseg_arr.count == 0)
438 arr = &msl->memseg_arr;
440 ms_idx = rte_fbarray_find_next_used(arr, 0);
441 while (ms_idx >= 0) {
445 ms = rte_fbarray_get(arr, ms_idx);
447 /* find how many more segments there are, starting with
450 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
451 len = n_segs * msl->page_sz;
453 ret = func(msl, ms, len, arg);
456 ms_idx = rte_fbarray_find_next_used(arr,
463 int __rte_experimental
464 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
466 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
469 /* do not allow allocations/frees/init while we iterate */
470 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
471 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
472 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
477 int __rte_experimental
478 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
480 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
481 int i, ms_idx, ret = 0;
483 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
484 struct rte_memseg_list *msl = &mcfg->memsegs[i];
485 const struct rte_memseg *ms;
486 struct rte_fbarray *arr;
488 if (msl->memseg_arr.count == 0)
491 arr = &msl->memseg_arr;
493 ms_idx = rte_fbarray_find_next_used(arr, 0);
494 while (ms_idx >= 0) {
495 ms = rte_fbarray_get(arr, ms_idx);
496 ret = func(msl, ms, arg);
499 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
505 int __rte_experimental
506 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
508 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
511 /* do not allow allocations/frees/init while we iterate */
512 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
513 ret = rte_memseg_walk_thread_unsafe(func, arg);
514 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
519 int __rte_experimental
520 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
522 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
525 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
526 struct rte_memseg_list *msl = &mcfg->memsegs[i];
528 if (msl->base_va == NULL)
531 ret = func(msl, arg);
538 int __rte_experimental
539 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
541 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
544 /* do not allow allocations/frees/init while we iterate */
545 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
546 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
547 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
552 int __rte_experimental
553 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
555 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
556 struct rte_memseg_list *msl;
557 struct rte_fbarray *arr;
558 int msl_idx, seg_idx, ret;
565 msl = rte_mem_virt2memseg_list(ms->addr);
570 arr = &msl->memseg_arr;
572 msl_idx = msl - mcfg->memsegs;
573 seg_idx = rte_fbarray_find_idx(arr, ms);
575 if (!rte_fbarray_is_used(arr, seg_idx)) {
580 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
588 int __rte_experimental
589 rte_memseg_get_fd(const struct rte_memseg *ms)
591 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
594 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
595 ret = rte_memseg_get_fd_thread_unsafe(ms);
596 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
601 int __rte_experimental
602 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
605 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
606 struct rte_memseg_list *msl;
607 struct rte_fbarray *arr;
608 int msl_idx, seg_idx, ret;
610 if (ms == NULL || offset == NULL) {
615 msl = rte_mem_virt2memseg_list(ms->addr);
620 arr = &msl->memseg_arr;
622 msl_idx = msl - mcfg->memsegs;
623 seg_idx = rte_fbarray_find_idx(arr, ms);
625 if (!rte_fbarray_is_used(arr, seg_idx)) {
630 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
638 int __rte_experimental
639 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
641 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
644 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
645 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
646 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
651 /* init memory subsystem */
653 rte_eal_memory_init(void)
655 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
657 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
662 /* lock mem hotplug here, to prevent races while we init */
663 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
665 if (rte_eal_memseg_init() < 0)
668 if (eal_memalloc_init() < 0)
671 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
672 rte_eal_hugepage_init() :
673 rte_eal_hugepage_attach();
677 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
682 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);