1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
14 #include <sys/queue.h>
16 #include <rte_fbarray.h>
17 #include <rte_memory.h>
19 #include <rte_eal_memconfig.h>
20 #include <rte_eal_paging.h>
21 #include <rte_errno.h>
24 #include "eal_memalloc.h"
25 #include "eal_private.h"
26 #include "eal_internal_cfg.h"
27 #include "eal_memcfg.h"
28 #include "eal_options.h"
29 #include "malloc_heap.h"
32 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
33 * pointer to the mmap'd area and keep *size unmodified. Else, retry
34 * with a smaller zone: decrease *size by hugepage_sz until it reaches
35 * 0. In this case, return NULL. Note: this function returns an address
36 * which is a multiple of hugepage size.
39 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
41 static void *next_baseaddr;
42 static uint64_t system_page_sz;
44 #define MAX_MMAP_WITH_DEFINED_ADDR_TRIES 5
46 eal_get_virtual_area(void *requested_addr, size_t *size,
47 size_t page_sz, int flags, int reserve_flags)
49 bool addr_is_hint, allow_shrink, unmap, no_align;
51 void *mapped_addr, *aligned_addr;
54 if (system_page_sz == 0)
55 system_page_sz = rte_mem_page_size();
57 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
59 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
60 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
61 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
63 if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
64 rte_eal_process_type() == RTE_PROC_PRIMARY)
65 next_baseaddr = (void *) internal_config.base_virtaddr;
68 if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 &&
69 rte_eal_process_type() == RTE_PROC_PRIMARY)
70 next_baseaddr = (void *) eal_get_baseaddr();
72 if (requested_addr == NULL && next_baseaddr != NULL) {
73 requested_addr = next_baseaddr;
74 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
78 /* we don't need alignment of resulting pointer in the following cases:
80 * 1. page size is equal to system size
81 * 2. we have a requested address, and it is page-aligned, and we will
82 * be discarding the address if we get a different one.
84 * for all other cases, alignment is potentially necessary.
86 no_align = (requested_addr != NULL &&
87 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
89 page_sz == system_page_sz;
92 map_sz = no_align ? *size : *size + page_sz;
93 if (map_sz > SIZE_MAX) {
94 RTE_LOG(ERR, EAL, "Map size too big\n");
99 mapped_addr = eal_mem_reserve(
100 requested_addr, (size_t)map_sz, reserve_flags);
101 if ((mapped_addr == NULL) && allow_shrink)
104 if ((mapped_addr != NULL) && addr_is_hint &&
105 (mapped_addr != requested_addr)) {
107 next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
108 if (try <= MAX_MMAP_WITH_DEFINED_ADDR_TRIES) {
109 /* hint was not used. Try with another offset */
110 eal_mem_free(mapped_addr, map_sz);
112 requested_addr = next_baseaddr;
115 } while ((allow_shrink || addr_is_hint) &&
116 (mapped_addr == NULL) && (*size > 0));
118 /* align resulting address - if map failed, we will ignore the value
119 * anyway, so no need to add additional checks.
121 aligned_addr = no_align ? mapped_addr :
122 RTE_PTR_ALIGN(mapped_addr, page_sz);
125 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
126 rte_strerror(rte_errno));
128 } else if (mapped_addr == NULL) {
129 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
130 rte_strerror(rte_errno));
132 } else if (requested_addr != NULL && !addr_is_hint &&
133 aligned_addr != requested_addr) {
134 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
135 requested_addr, aligned_addr);
136 eal_mem_free(mapped_addr, map_sz);
137 rte_errno = EADDRNOTAVAIL;
139 } else if (requested_addr != NULL && addr_is_hint &&
140 aligned_addr != requested_addr) {
141 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
142 requested_addr, aligned_addr);
143 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
144 } else if (next_baseaddr != NULL) {
145 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
148 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
149 aligned_addr, *size);
152 eal_mem_free(mapped_addr, map_sz);
153 } else if (!no_align) {
154 void *map_end, *aligned_end;
155 size_t before_len, after_len;
157 /* when we reserve space with alignment, we add alignment to
158 * mapping size. On 32-bit, if 1GB alignment was requested, this
159 * would waste 1GB of address space, which is a luxury we cannot
160 * afford. so, if alignment was performed, check if any unneeded
161 * address space can be unmapped back.
164 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
165 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
167 /* unmap space before aligned mmap address */
168 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
170 eal_mem_free(mapped_addr, before_len);
172 /* unmap space after aligned end mmap address */
173 after_len = RTE_PTR_DIFF(map_end, aligned_end);
175 eal_mem_free(aligned_end, after_len);
179 /* Exclude these pages from a core dump. */
180 eal_mem_set_dump(aligned_addr, *size, false);
187 eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
188 uint64_t page_sz, int n_segs, int socket_id, bool heap)
190 if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
191 sizeof(struct rte_memseg))) {
192 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
193 rte_strerror(rte_errno));
197 msl->page_sz = page_sz;
198 msl->socket_id = socket_id;
203 "Memseg list allocated at socket %i, page size 0x%"PRIx64"kB\n",
204 socket_id, page_sz >> 10);
210 eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
211 int n_segs, int socket_id, int type_msl_idx, bool heap)
213 char name[RTE_FBARRAY_NAME_LEN];
215 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
218 return eal_memseg_list_init_named(
219 msl, name, page_sz, n_segs, socket_id, heap);
223 eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
225 size_t page_sz, mem_sz;
228 page_sz = msl->page_sz;
229 mem_sz = page_sz * msl->memseg_arr.len;
231 addr = eal_get_virtual_area(
232 msl->base_va, &mem_sz, page_sz, 0, reserve_flags);
234 #ifndef RTE_EXEC_ENV_WINDOWS
235 /* The hint would be misleading on Windows, because address
236 * is by default system-selected (base VA = 0).
237 * However, this function is called from many places,
238 * including common code, so don't duplicate the message.
240 if (rte_errno == EADDRNOTAVAIL)
241 RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
242 "please use '--" OPT_BASE_VIRTADDR "' option\n",
243 (unsigned long long)mem_sz, msl->base_va);
250 RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
257 eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs)
259 size_t page_sz = msl->page_sz;
262 for (i = 0; i < n_segs; i++) {
263 struct rte_fbarray *arr = &msl->memseg_arr;
264 struct rte_memseg *ms = rte_fbarray_get(arr, i);
266 if (rte_eal_iova_mode() == RTE_IOVA_VA)
267 ms->iova = (uintptr_t)addr;
269 ms->iova = RTE_BAD_IOVA;
271 ms->hugepage_sz = page_sz;
275 rte_fbarray_set_used(arr, i);
277 addr = RTE_PTR_ADD(addr, page_sz);
281 static struct rte_memseg *
282 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
284 const struct rte_fbarray *arr;
291 /* a memseg list was specified, check if it's the right one */
292 start = msl->base_va;
293 end = RTE_PTR_ADD(start, msl->len);
295 if (addr < start || addr >= end)
298 /* now, calculate index */
299 arr = &msl->memseg_arr;
300 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
301 return rte_fbarray_get(arr, ms_idx);
304 static struct rte_memseg_list *
305 virt2memseg_list(const void *addr)
307 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
308 struct rte_memseg_list *msl;
311 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
313 msl = &mcfg->memsegs[msl_idx];
315 start = msl->base_va;
316 end = RTE_PTR_ADD(start, msl->len);
317 if (addr >= start && addr < end)
320 /* if we didn't find our memseg list */
321 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
326 struct rte_memseg_list *
327 rte_mem_virt2memseg_list(const void *addr)
329 return virt2memseg_list(addr);
337 find_virt(const struct rte_memseg_list *msl __rte_unused,
338 const struct rte_memseg *ms, void *arg)
340 struct virtiova *vi = arg;
341 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
342 size_t offset = vi->iova - ms->iova;
343 vi->virt = RTE_PTR_ADD(ms->addr, offset);
350 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
351 const struct rte_memseg *ms, size_t len, void *arg)
353 struct virtiova *vi = arg;
354 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
355 size_t offset = vi->iova - ms->iova;
356 vi->virt = RTE_PTR_ADD(ms->addr, offset);
364 rte_mem_iova2virt(rte_iova_t iova)
368 memset(&vi, 0, sizeof(vi));
371 /* for legacy mem, we can get away with scanning VA-contiguous segments,
372 * as we know they are PA-contiguous as well
374 if (internal_config.legacy_mem)
375 rte_memseg_contig_walk(find_virt_legacy, &vi);
377 rte_memseg_walk(find_virt, &vi);
383 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
385 return virt2memseg(addr, msl != NULL ? msl :
386 rte_mem_virt2memseg_list(addr));
390 physmem_size(const struct rte_memseg_list *msl, void *arg)
392 uint64_t *total_len = arg;
397 *total_len += msl->memseg_arr.count * msl->page_sz;
402 /* get the total size of memory */
404 rte_eal_get_physmem_size(void)
406 uint64_t total_len = 0;
408 rte_memseg_list_walk(physmem_size, &total_len);
414 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
417 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
418 int msl_idx, ms_idx, fd;
421 msl_idx = msl - mcfg->memsegs;
422 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
425 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
429 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
430 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
431 "virt:%p, socket_id:%"PRId32", "
432 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
433 "nrank:%"PRIx32" fd:%i\n",
448 * Defining here because declared in rte_memory.h, but the actual implementation
449 * is in eal_common_memalloc.c, like all other memalloc internals.
452 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
455 /* FreeBSD boots with legacy mem enabled by default */
456 if (internal_config.legacy_mem) {
457 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
461 return eal_memalloc_mem_event_callback_register(name, clb, arg);
465 rte_mem_event_callback_unregister(const char *name, void *arg)
467 /* FreeBSD boots with legacy mem enabled by default */
468 if (internal_config.legacy_mem) {
469 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
473 return eal_memalloc_mem_event_callback_unregister(name, arg);
477 rte_mem_alloc_validator_register(const char *name,
478 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
480 /* FreeBSD boots with legacy mem enabled by default */
481 if (internal_config.legacy_mem) {
482 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
486 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
491 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
493 /* FreeBSD boots with legacy mem enabled by default */
494 if (internal_config.legacy_mem) {
495 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
499 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
502 /* Dump the physical memory layout on console */
504 rte_dump_physmem_layout(FILE *f)
506 rte_memseg_walk(dump_memseg, f);
510 check_iova(const struct rte_memseg_list *msl __rte_unused,
511 const struct rte_memseg *ms, void *arg)
513 uint64_t *mask = arg;
516 /* higher address within segment */
517 iova = (ms->iova + ms->len) - 1;
521 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
524 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
528 #define MAX_DMA_MASK_BITS 63
530 /* check memseg iovas are within the required range based on dma mask */
532 check_dma_mask(uint8_t maskbits, bool thread_unsafe)
534 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
538 /* Sanity check. We only check width can be managed with 64 bits
539 * variables. Indeed any higher value is likely wrong. */
540 if (maskbits > MAX_DMA_MASK_BITS) {
541 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
542 maskbits, MAX_DMA_MASK_BITS);
546 /* create dma mask */
547 mask = ~((1ULL << maskbits) - 1);
550 ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
552 ret = rte_memseg_walk(check_iova, &mask);
556 * Dma mask precludes hugepage usage.
557 * This device can not be used and we do not need to keep
563 * we need to keep the more restricted maskbit for checking
564 * potential dynamic memory allocation in the future.
566 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
567 RTE_MIN(mcfg->dma_maskbits, maskbits);
573 rte_mem_check_dma_mask(uint8_t maskbits)
575 return check_dma_mask(maskbits, false);
579 rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
581 return check_dma_mask(maskbits, true);
585 * Set dma mask to use when memory initialization is done.
587 * This function should ONLY be used by code executed before the memory
588 * initialization. PMDs should use rte_mem_check_dma_mask if addressing
589 * limitations by the device.
592 rte_mem_set_dma_mask(uint8_t maskbits)
594 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
596 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
597 RTE_MIN(mcfg->dma_maskbits, maskbits);
600 /* return the number of memory channels */
601 unsigned rte_memory_get_nchannel(void)
603 return rte_eal_get_configuration()->mem_config->nchannel;
606 /* return the number of memory rank */
607 unsigned rte_memory_get_nrank(void)
609 return rte_eal_get_configuration()->mem_config->nrank;
613 rte_eal_memdevice_init(void)
615 struct rte_config *config;
617 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
620 config = rte_eal_get_configuration();
621 config->mem_config->nchannel = internal_config.force_nchannel;
622 config->mem_config->nrank = internal_config.force_nrank;
627 /* Lock page in physical memory and prevent from swapping. */
629 rte_mem_lock_page(const void *virt)
631 uintptr_t virtual = (uintptr_t)virt;
632 size_t page_size = rte_mem_page_size();
633 uintptr_t aligned = RTE_PTR_ALIGN_FLOOR(virtual, page_size);
634 return rte_mem_lock((void *)aligned, page_size);
638 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
640 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
641 int i, ms_idx, ret = 0;
643 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
644 struct rte_memseg_list *msl = &mcfg->memsegs[i];
645 const struct rte_memseg *ms;
646 struct rte_fbarray *arr;
648 if (msl->memseg_arr.count == 0)
651 arr = &msl->memseg_arr;
653 ms_idx = rte_fbarray_find_next_used(arr, 0);
654 while (ms_idx >= 0) {
658 ms = rte_fbarray_get(arr, ms_idx);
660 /* find how many more segments there are, starting with
663 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
664 len = n_segs * msl->page_sz;
666 ret = func(msl, ms, len, arg);
669 ms_idx = rte_fbarray_find_next_used(arr,
677 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
681 /* do not allow allocations/frees/init while we iterate */
682 rte_mcfg_mem_read_lock();
683 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
684 rte_mcfg_mem_read_unlock();
690 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
692 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
693 int i, ms_idx, ret = 0;
695 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
696 struct rte_memseg_list *msl = &mcfg->memsegs[i];
697 const struct rte_memseg *ms;
698 struct rte_fbarray *arr;
700 if (msl->memseg_arr.count == 0)
703 arr = &msl->memseg_arr;
705 ms_idx = rte_fbarray_find_next_used(arr, 0);
706 while (ms_idx >= 0) {
707 ms = rte_fbarray_get(arr, ms_idx);
708 ret = func(msl, ms, arg);
711 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
718 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
722 /* do not allow allocations/frees/init while we iterate */
723 rte_mcfg_mem_read_lock();
724 ret = rte_memseg_walk_thread_unsafe(func, arg);
725 rte_mcfg_mem_read_unlock();
731 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
733 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
736 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
737 struct rte_memseg_list *msl = &mcfg->memsegs[i];
739 if (msl->base_va == NULL)
742 ret = func(msl, arg);
750 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
754 /* do not allow allocations/frees/init while we iterate */
755 rte_mcfg_mem_read_lock();
756 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
757 rte_mcfg_mem_read_unlock();
763 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
765 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
766 struct rte_memseg_list *msl;
767 struct rte_fbarray *arr;
768 int msl_idx, seg_idx, ret;
775 msl = rte_mem_virt2memseg_list(ms->addr);
780 arr = &msl->memseg_arr;
782 msl_idx = msl - mcfg->memsegs;
783 seg_idx = rte_fbarray_find_idx(arr, ms);
785 if (!rte_fbarray_is_used(arr, seg_idx)) {
790 /* segment fd API is not supported for external segments */
796 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
805 rte_memseg_get_fd(const struct rte_memseg *ms)
809 rte_mcfg_mem_read_lock();
810 ret = rte_memseg_get_fd_thread_unsafe(ms);
811 rte_mcfg_mem_read_unlock();
817 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
820 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
821 struct rte_memseg_list *msl;
822 struct rte_fbarray *arr;
823 int msl_idx, seg_idx, ret;
825 if (ms == NULL || offset == NULL) {
830 msl = rte_mem_virt2memseg_list(ms->addr);
835 arr = &msl->memseg_arr;
837 msl_idx = msl - mcfg->memsegs;
838 seg_idx = rte_fbarray_find_idx(arr, ms);
840 if (!rte_fbarray_is_used(arr, seg_idx)) {
845 /* segment fd API is not supported for external segments */
851 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
860 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
864 rte_mcfg_mem_read_lock();
865 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
866 rte_mcfg_mem_read_unlock();
872 rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
873 unsigned int n_pages, size_t page_sz)
875 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
876 unsigned int socket_id, n;
879 if (va_addr == NULL || page_sz == 0 || len == 0 ||
880 !rte_is_power_of_2(page_sz) ||
881 RTE_ALIGN(len, page_sz) != len ||
882 ((len / page_sz) != n_pages && iova_addrs != NULL) ||
883 !rte_is_aligned(va_addr, page_sz)) {
887 rte_mcfg_mem_write_lock();
889 /* make sure the segment doesn't already exist */
890 if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
896 /* get next available socket ID */
897 socket_id = mcfg->next_socket_id;
898 if (socket_id > INT32_MAX) {
899 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
905 /* we can create a new memseg */
907 if (malloc_heap_create_external_seg(va_addr, iova_addrs, n,
908 page_sz, "extmem", socket_id) == NULL) {
913 /* memseg list successfully created - increment next socket ID */
914 mcfg->next_socket_id++;
916 rte_mcfg_mem_write_unlock();
921 rte_extmem_unregister(void *va_addr, size_t len)
923 struct rte_memseg_list *msl;
926 if (va_addr == NULL || len == 0) {
930 rte_mcfg_mem_write_lock();
932 /* find our segment */
933 msl = malloc_heap_find_external_seg(va_addr, len);
940 ret = malloc_heap_destroy_external_seg(msl);
942 rte_mcfg_mem_write_unlock();
947 sync_memory(void *va_addr, size_t len, bool attach)
949 struct rte_memseg_list *msl;
952 if (va_addr == NULL || len == 0) {
956 rte_mcfg_mem_write_lock();
958 /* find our segment */
959 msl = malloc_heap_find_external_seg(va_addr, len);
966 ret = rte_fbarray_attach(&msl->memseg_arr);
968 ret = rte_fbarray_detach(&msl->memseg_arr);
971 rte_mcfg_mem_write_unlock();
976 rte_extmem_attach(void *va_addr, size_t len)
978 return sync_memory(va_addr, len, true);
982 rte_extmem_detach(void *va_addr, size_t len)
984 return sync_memory(va_addr, len, false);
987 /* init memory subsystem */
989 rte_eal_memory_init(void)
991 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
993 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
998 /* lock mem hotplug here, to prevent races while we init */
999 rte_mcfg_mem_read_lock();
1001 if (rte_eal_memseg_init() < 0)
1004 if (eal_memalloc_init() < 0)
1007 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
1008 rte_eal_hugepage_init() :
1009 rte_eal_hugepage_attach();
1013 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
1018 rte_mcfg_mem_read_unlock();