1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
15 #include <sys/queue.h>
17 #include <rte_fbarray.h>
18 #include <rte_memory.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_errno.h>
24 #include "eal_memalloc.h"
25 #include "eal_private.h"
26 #include "eal_internal_cfg.h"
27 #include "malloc_heap.h"
30 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
31 * pointer to the mmap'd area and keep *size unmodified. Else, retry
32 * with a smaller zone: decrease *size by hugepage_sz until it reaches
33 * 0. In this case, return NULL. Note: this function returns an address
34 * which is a multiple of hugepage size.
37 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
39 static void *next_baseaddr;
40 static uint64_t system_page_sz;
44 * Linux kernel uses a really high address as starting address for serving
45 * mmaps calls. If there exists addressing limitations and IOVA mode is VA,
46 * this starting address is likely too high for those devices. However, it
47 * is possible to use a lower address in the process virtual address space
48 * as with 64 bits there is a lot of available space.
50 * Current known limitations are 39 or 40 bits. Setting the starting address
51 * at 4GB implies there are 508GB or 1020GB for mapping the available
52 * hugepages. This is likely enough for most systems, although a device with
53 * addressing limitations should call rte_mem_check_dma_mask for ensuring all
54 * memory is within supported range.
56 static uint64_t baseaddr = 0x100000000;
60 eal_get_virtual_area(void *requested_addr, size_t *size,
61 size_t page_sz, int flags, int mmap_flags)
63 bool addr_is_hint, allow_shrink, unmap, no_align;
65 void *mapped_addr, *aligned_addr;
67 if (system_page_sz == 0)
68 system_page_sz = sysconf(_SC_PAGESIZE);
70 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
72 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
74 addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
75 allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
76 unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
78 if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
79 rte_eal_process_type() == RTE_PROC_PRIMARY)
80 next_baseaddr = (void *) internal_config.base_virtaddr;
83 if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 &&
84 rte_eal_process_type() == RTE_PROC_PRIMARY)
85 next_baseaddr = (void *) baseaddr;
87 if (requested_addr == NULL && next_baseaddr != NULL) {
88 requested_addr = next_baseaddr;
89 requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
93 /* we don't need alignment of resulting pointer in the following cases:
95 * 1. page size is equal to system size
96 * 2. we have a requested address, and it is page-aligned, and we will
97 * be discarding the address if we get a different one.
99 * for all other cases, alignment is potentially necessary.
101 no_align = (requested_addr != NULL &&
102 requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
104 page_sz == system_page_sz;
107 map_sz = no_align ? *size : *size + page_sz;
108 if (map_sz > SIZE_MAX) {
109 RTE_LOG(ERR, EAL, "Map size too big\n");
114 mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
116 if (mapped_addr == MAP_FAILED && allow_shrink)
119 if (mapped_addr != MAP_FAILED && addr_is_hint &&
120 mapped_addr != requested_addr) {
121 /* hint was not used. Try with another offset */
122 munmap(mapped_addr, map_sz);
123 mapped_addr = MAP_FAILED;
124 next_baseaddr = RTE_PTR_ADD(next_baseaddr, page_sz);
125 requested_addr = next_baseaddr;
127 } while ((allow_shrink || addr_is_hint) &&
128 mapped_addr == MAP_FAILED && *size > 0);
130 /* align resulting address - if map failed, we will ignore the value
131 * anyway, so no need to add additional checks.
133 aligned_addr = no_align ? mapped_addr :
134 RTE_PTR_ALIGN(mapped_addr, page_sz);
137 RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
141 } else if (mapped_addr == MAP_FAILED) {
142 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
144 /* pass errno up the call chain */
147 } else if (requested_addr != NULL && !addr_is_hint &&
148 aligned_addr != requested_addr) {
149 RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
150 requested_addr, aligned_addr);
151 munmap(mapped_addr, map_sz);
152 rte_errno = EADDRNOTAVAIL;
154 } else if (requested_addr != NULL && addr_is_hint &&
155 aligned_addr != requested_addr) {
156 RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
157 requested_addr, aligned_addr);
158 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
159 } else if (next_baseaddr != NULL) {
160 next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
163 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
164 aligned_addr, *size);
167 munmap(mapped_addr, map_sz);
168 } else if (!no_align) {
169 void *map_end, *aligned_end;
170 size_t before_len, after_len;
172 /* when we reserve space with alignment, we add alignment to
173 * mapping size. On 32-bit, if 1GB alignment was requested, this
174 * would waste 1GB of address space, which is a luxury we cannot
175 * afford. so, if alignment was performed, check if any unneeded
176 * address space can be unmapped back.
179 map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
180 aligned_end = RTE_PTR_ADD(aligned_addr, *size);
182 /* unmap space before aligned mmap address */
183 before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
185 munmap(mapped_addr, before_len);
187 /* unmap space after aligned end mmap address */
188 after_len = RTE_PTR_DIFF(map_end, aligned_end);
190 munmap(aligned_end, after_len);
196 static struct rte_memseg *
197 virt2memseg(const void *addr, const struct rte_memseg_list *msl)
199 const struct rte_fbarray *arr;
206 /* a memseg list was specified, check if it's the right one */
207 start = msl->base_va;
208 end = RTE_PTR_ADD(start, msl->len);
210 if (addr < start || addr >= end)
213 /* now, calculate index */
214 arr = &msl->memseg_arr;
215 ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
216 return rte_fbarray_get(arr, ms_idx);
219 static struct rte_memseg_list *
220 virt2memseg_list(const void *addr)
222 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
223 struct rte_memseg_list *msl;
226 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
228 msl = &mcfg->memsegs[msl_idx];
230 start = msl->base_va;
231 end = RTE_PTR_ADD(start, msl->len);
232 if (addr >= start && addr < end)
235 /* if we didn't find our memseg list */
236 if (msl_idx == RTE_MAX_MEMSEG_LISTS)
241 __rte_experimental struct rte_memseg_list *
242 rte_mem_virt2memseg_list(const void *addr)
244 return virt2memseg_list(addr);
252 find_virt(const struct rte_memseg_list *msl __rte_unused,
253 const struct rte_memseg *ms, void *arg)
255 struct virtiova *vi = arg;
256 if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
257 size_t offset = vi->iova - ms->iova;
258 vi->virt = RTE_PTR_ADD(ms->addr, offset);
265 find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
266 const struct rte_memseg *ms, size_t len, void *arg)
268 struct virtiova *vi = arg;
269 if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
270 size_t offset = vi->iova - ms->iova;
271 vi->virt = RTE_PTR_ADD(ms->addr, offset);
278 __rte_experimental void *
279 rte_mem_iova2virt(rte_iova_t iova)
283 memset(&vi, 0, sizeof(vi));
286 /* for legacy mem, we can get away with scanning VA-contiguous segments,
287 * as we know they are PA-contiguous as well
289 if (internal_config.legacy_mem)
290 rte_memseg_contig_walk(find_virt_legacy, &vi);
292 rte_memseg_walk(find_virt, &vi);
297 __rte_experimental struct rte_memseg *
298 rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
300 return virt2memseg(addr, msl != NULL ? msl :
301 rte_mem_virt2memseg_list(addr));
305 physmem_size(const struct rte_memseg_list *msl, void *arg)
307 uint64_t *total_len = arg;
312 *total_len += msl->memseg_arr.count * msl->page_sz;
317 /* get the total size of memory */
319 rte_eal_get_physmem_size(void)
321 uint64_t total_len = 0;
323 rte_memseg_list_walk(physmem_size, &total_len);
329 dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
332 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
333 int msl_idx, ms_idx, fd;
336 msl_idx = msl - mcfg->memsegs;
337 if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
340 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
344 fd = eal_memalloc_get_seg_fd(msl_idx, ms_idx);
345 fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
346 "virt:%p, socket_id:%"PRId32", "
347 "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
348 "nrank:%"PRIx32" fd:%i\n",
363 * Defining here because declared in rte_memory.h, but the actual implementation
364 * is in eal_common_memalloc.c, like all other memalloc internals.
366 int __rte_experimental
367 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
370 /* FreeBSD boots with legacy mem enabled by default */
371 if (internal_config.legacy_mem) {
372 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
376 return eal_memalloc_mem_event_callback_register(name, clb, arg);
379 int __rte_experimental
380 rte_mem_event_callback_unregister(const char *name, void *arg)
382 /* FreeBSD boots with legacy mem enabled by default */
383 if (internal_config.legacy_mem) {
384 RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
388 return eal_memalloc_mem_event_callback_unregister(name, arg);
391 int __rte_experimental
392 rte_mem_alloc_validator_register(const char *name,
393 rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
395 /* FreeBSD boots with legacy mem enabled by default */
396 if (internal_config.legacy_mem) {
397 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
401 return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
405 int __rte_experimental
406 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
408 /* FreeBSD boots with legacy mem enabled by default */
409 if (internal_config.legacy_mem) {
410 RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
414 return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
417 /* Dump the physical memory layout on console */
419 rte_dump_physmem_layout(FILE *f)
421 rte_memseg_walk(dump_memseg, f);
425 check_iova(const struct rte_memseg_list *msl __rte_unused,
426 const struct rte_memseg *ms, void *arg)
428 uint64_t *mask = arg;
431 /* higher address within segment */
432 iova = (ms->iova + ms->len) - 1;
436 RTE_LOG(DEBUG, EAL, "memseg iova %"PRIx64", len %zx, out of range\n",
439 RTE_LOG(DEBUG, EAL, "\tusing dma mask %"PRIx64"\n", *mask);
443 #define MAX_DMA_MASK_BITS 63
445 /* check memseg iovas are within the required range based on dma mask */
446 static int __rte_experimental
447 check_dma_mask(uint8_t maskbits, bool thread_unsafe)
449 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
453 /* Sanity check. We only check width can be managed with 64 bits
454 * variables. Indeed any higher value is likely wrong. */
455 if (maskbits > MAX_DMA_MASK_BITS) {
456 RTE_LOG(ERR, EAL, "wrong dma mask size %u (Max: %u)\n",
457 maskbits, MAX_DMA_MASK_BITS);
461 /* create dma mask */
462 mask = ~((1ULL << maskbits) - 1);
465 ret = rte_memseg_walk_thread_unsafe(check_iova, &mask);
467 ret = rte_memseg_walk(check_iova, &mask);
471 * Dma mask precludes hugepage usage.
472 * This device can not be used and we do not need to keep
478 * we need to keep the more restricted maskbit for checking
479 * potential dynamic memory allocation in the future.
481 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
482 RTE_MIN(mcfg->dma_maskbits, maskbits);
487 int __rte_experimental
488 rte_mem_check_dma_mask(uint8_t maskbits)
490 return check_dma_mask(maskbits, false);
493 int __rte_experimental
494 rte_mem_check_dma_mask_thread_unsafe(uint8_t maskbits)
496 return check_dma_mask(maskbits, true);
500 * Set dma mask to use when memory initialization is done.
502 * This function should ONLY be used by code executed before the memory
503 * initialization. PMDs should use rte_mem_check_dma_mask if addressing
504 * limitations by the device.
506 void __rte_experimental
507 rte_mem_set_dma_mask(uint8_t maskbits)
509 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
511 mcfg->dma_maskbits = mcfg->dma_maskbits == 0 ? maskbits :
512 RTE_MIN(mcfg->dma_maskbits, maskbits);
515 /* return the number of memory channels */
516 unsigned rte_memory_get_nchannel(void)
518 return rte_eal_get_configuration()->mem_config->nchannel;
521 /* return the number of memory rank */
522 unsigned rte_memory_get_nrank(void)
524 return rte_eal_get_configuration()->mem_config->nrank;
528 rte_eal_memdevice_init(void)
530 struct rte_config *config;
532 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
535 config = rte_eal_get_configuration();
536 config->mem_config->nchannel = internal_config.force_nchannel;
537 config->mem_config->nrank = internal_config.force_nrank;
542 /* Lock page in physical memory and prevent from swapping. */
544 rte_mem_lock_page(const void *virt)
546 unsigned long virtual = (unsigned long)virt;
547 int page_size = getpagesize();
548 unsigned long aligned = (virtual & ~(page_size - 1));
549 return mlock((void *)aligned, page_size);
552 int __rte_experimental
553 rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
555 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
556 int i, ms_idx, ret = 0;
558 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
559 struct rte_memseg_list *msl = &mcfg->memsegs[i];
560 const struct rte_memseg *ms;
561 struct rte_fbarray *arr;
563 if (msl->memseg_arr.count == 0)
566 arr = &msl->memseg_arr;
568 ms_idx = rte_fbarray_find_next_used(arr, 0);
569 while (ms_idx >= 0) {
573 ms = rte_fbarray_get(arr, ms_idx);
575 /* find how many more segments there are, starting with
578 n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
579 len = n_segs * msl->page_sz;
581 ret = func(msl, ms, len, arg);
584 ms_idx = rte_fbarray_find_next_used(arr,
591 int __rte_experimental
592 rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
594 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
597 /* do not allow allocations/frees/init while we iterate */
598 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
599 ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
600 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
605 int __rte_experimental
606 rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
608 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
609 int i, ms_idx, ret = 0;
611 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
612 struct rte_memseg_list *msl = &mcfg->memsegs[i];
613 const struct rte_memseg *ms;
614 struct rte_fbarray *arr;
616 if (msl->memseg_arr.count == 0)
619 arr = &msl->memseg_arr;
621 ms_idx = rte_fbarray_find_next_used(arr, 0);
622 while (ms_idx >= 0) {
623 ms = rte_fbarray_get(arr, ms_idx);
624 ret = func(msl, ms, arg);
627 ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
633 int __rte_experimental
634 rte_memseg_walk(rte_memseg_walk_t func, void *arg)
636 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
639 /* do not allow allocations/frees/init while we iterate */
640 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
641 ret = rte_memseg_walk_thread_unsafe(func, arg);
642 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
647 int __rte_experimental
648 rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
650 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
653 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
654 struct rte_memseg_list *msl = &mcfg->memsegs[i];
656 if (msl->base_va == NULL)
659 ret = func(msl, arg);
666 int __rte_experimental
667 rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
669 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
672 /* do not allow allocations/frees/init while we iterate */
673 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
674 ret = rte_memseg_list_walk_thread_unsafe(func, arg);
675 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
680 int __rte_experimental
681 rte_memseg_get_fd_thread_unsafe(const struct rte_memseg *ms)
683 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
684 struct rte_memseg_list *msl;
685 struct rte_fbarray *arr;
686 int msl_idx, seg_idx, ret;
693 msl = rte_mem_virt2memseg_list(ms->addr);
698 arr = &msl->memseg_arr;
700 msl_idx = msl - mcfg->memsegs;
701 seg_idx = rte_fbarray_find_idx(arr, ms);
703 if (!rte_fbarray_is_used(arr, seg_idx)) {
708 ret = eal_memalloc_get_seg_fd(msl_idx, seg_idx);
716 int __rte_experimental
717 rte_memseg_get_fd(const struct rte_memseg *ms)
719 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
722 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
723 ret = rte_memseg_get_fd_thread_unsafe(ms);
724 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
729 int __rte_experimental
730 rte_memseg_get_fd_offset_thread_unsafe(const struct rte_memseg *ms,
733 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
734 struct rte_memseg_list *msl;
735 struct rte_fbarray *arr;
736 int msl_idx, seg_idx, ret;
738 if (ms == NULL || offset == NULL) {
743 msl = rte_mem_virt2memseg_list(ms->addr);
748 arr = &msl->memseg_arr;
750 msl_idx = msl - mcfg->memsegs;
751 seg_idx = rte_fbarray_find_idx(arr, ms);
753 if (!rte_fbarray_is_used(arr, seg_idx)) {
758 ret = eal_memalloc_get_seg_fd_offset(msl_idx, seg_idx, offset);
766 int __rte_experimental
767 rte_memseg_get_fd_offset(const struct rte_memseg *ms, size_t *offset)
769 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
772 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
773 ret = rte_memseg_get_fd_offset_thread_unsafe(ms, offset);
774 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
779 int __rte_experimental
780 rte_extmem_register(void *va_addr, size_t len, rte_iova_t iova_addrs[],
781 unsigned int n_pages, size_t page_sz)
783 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
784 unsigned int socket_id, n;
787 if (va_addr == NULL || page_sz == 0 || len == 0 ||
788 !rte_is_power_of_2(page_sz) ||
789 RTE_ALIGN(len, page_sz) != len ||
790 ((len / page_sz) != n_pages && iova_addrs != NULL) ||
791 !rte_is_aligned(va_addr, page_sz)) {
795 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
797 /* make sure the segment doesn't already exist */
798 if (malloc_heap_find_external_seg(va_addr, len) != NULL) {
804 /* get next available socket ID */
805 socket_id = mcfg->next_socket_id;
806 if (socket_id > INT32_MAX) {
807 RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
813 /* we can create a new memseg */
815 if (malloc_heap_create_external_seg(va_addr, iova_addrs, n,
816 page_sz, "extmem", socket_id) == NULL) {
821 /* memseg list successfully created - increment next socket ID */
822 mcfg->next_socket_id++;
824 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
828 int __rte_experimental
829 rte_extmem_unregister(void *va_addr, size_t len)
831 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
832 struct rte_memseg_list *msl;
835 if (va_addr == NULL || len == 0) {
839 rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
841 /* find our segment */
842 msl = malloc_heap_find_external_seg(va_addr, len);
849 ret = malloc_heap_destroy_external_seg(msl);
851 rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
855 /* init memory subsystem */
857 rte_eal_memory_init(void)
859 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
861 RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
866 /* lock mem hotplug here, to prevent races while we init */
867 rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
869 if (rte_eal_memseg_init() < 0)
872 if (eal_memalloc_init() < 0)
875 retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
876 rte_eal_hugepage_init() :
877 rte_eal_hugepage_attach();
881 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
886 rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);