1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2013 6WIND S.A.
16 #include <sys/types.h>
18 #include <sys/queue.h>
20 #include <sys/resource.h>
23 #include <sys/ioctl.h>
27 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
28 #include <linux/memfd.h>
29 #define MEMFD_SUPPORTED
31 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
36 #include <rte_errno.h>
38 #include <rte_memory.h>
39 #include <rte_launch.h>
41 #include <rte_per_lcore.h>
42 #include <rte_lcore.h>
43 #include <rte_common.h>
44 #include <rte_string_fns.h>
46 #include "eal_private.h"
47 #include "eal_memalloc.h"
48 #include "eal_memcfg.h"
49 #include "eal_internal_cfg.h"
50 #include "eal_filesystem.h"
51 #include "eal_hugepages.h"
52 #include "eal_options.h"
54 #define PFN_MASK_SIZE 8
58 * Huge page mapping under linux
60 * To reserve a big contiguous amount of memory, we use the hugepage
61 * feature of linux. For that, we need to have hugetlbfs mounted. This
62 * code will create many files in this directory (one per page) and
63 * map them in virtual memory. For each page, we will retrieve its
64 * physical address and remap it in order to have a virtual contiguous
65 * zone as well as a physical contiguous zone.
68 static int phys_addrs_available = -1;
70 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
72 uint64_t eal_get_baseaddr(void)
75 * Linux kernel uses a really high address as starting address for
76 * serving mmaps calls. If there exists addressing limitations and IOVA
77 * mode is VA, this starting address is likely too high for those
78 * devices. However, it is possible to use a lower address in the
79 * process virtual address space as with 64 bits there is a lot of
82 * Current known limitations are 39 or 40 bits. Setting the starting
83 * address at 4GB implies there are 508GB or 1020GB for mapping the
84 * available hugepages. This is likely enough for most systems, although
85 * a device with addressing limitations should call
86 * rte_mem_check_dma_mask for ensuring all memory is within supported
89 return 0x100000000ULL;
93 * Get physical address of any mapped virtual address in the current process.
96 rte_mem_virt2phy(const void *virtaddr)
99 uint64_t page, physaddr;
100 unsigned long virt_pfn;
104 if (phys_addrs_available == 0)
107 /* standard page size */
108 page_size = getpagesize();
110 fd = open("/proc/self/pagemap", O_RDONLY);
112 RTE_LOG(INFO, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
113 __func__, strerror(errno));
117 virt_pfn = (unsigned long)virtaddr / page_size;
118 offset = sizeof(uint64_t) * virt_pfn;
119 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
120 RTE_LOG(INFO, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
121 __func__, strerror(errno));
126 retval = read(fd, &page, PFN_MASK_SIZE);
129 RTE_LOG(INFO, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
130 __func__, strerror(errno));
132 } else if (retval != PFN_MASK_SIZE) {
133 RTE_LOG(INFO, EAL, "%s(): read %d bytes from /proc/self/pagemap "
134 "but expected %d:\n",
135 __func__, retval, PFN_MASK_SIZE);
140 * the pfn (page frame number) are bits 0-54 (see
141 * pagemap.txt in linux Documentation)
143 if ((page & 0x7fffffffffffffULL) == 0)
146 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
147 + ((unsigned long)virtaddr % page_size);
153 rte_mem_virt2iova(const void *virtaddr)
155 if (rte_eal_iova_mode() == RTE_IOVA_VA)
156 return (uintptr_t)virtaddr;
157 return rte_mem_virt2phy(virtaddr);
161 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
162 * it by browsing the /proc/self/pagemap special file.
165 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
170 for (i = 0; i < hpi->num_pages[0]; i++) {
171 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
172 if (addr == RTE_BAD_PHYS_ADDR)
174 hugepg_tbl[i].physaddr = addr;
180 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
183 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
186 static phys_addr_t addr;
188 for (i = 0; i < hpi->num_pages[0]; i++) {
189 hugepg_tbl[i].physaddr = addr;
190 addr += hugepg_tbl[i].size;
196 * Check whether address-space layout randomization is enabled in
197 * the kernel. This is important for multi-process as it can prevent
198 * two processes mapping data to the same virtual address
200 * 0 - address space randomization disabled
201 * 1/2 - address space randomization enabled
202 * negative error code on error
208 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
211 retval = read(fd, &c, 1);
221 default: return -EINVAL;
225 static sigjmp_buf huge_jmpenv;
227 static void huge_sigbus_handler(int signo __rte_unused)
229 siglongjmp(huge_jmpenv, 1);
232 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
233 * non-static local variable in the stack frame calling sigsetjmp might be
234 * clobbered by a call to longjmp.
236 static int huge_wrap_sigsetjmp(void)
238 return sigsetjmp(huge_jmpenv, 1);
241 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
242 /* Callback for numa library. */
243 void numa_error(char *where)
245 RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
250 * Mmap all hugepages of hugepage table: it first open a file in
251 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
252 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
253 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
254 * map contiguous physical blocks in contiguous virtual blocks.
257 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
258 uint64_t *essential_memory __rte_unused)
263 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
265 int essential_prev = 0;
267 struct bitmask *oldmask = NULL;
268 bool have_numa = true;
269 unsigned long maxnode = 0;
270 const struct internal_config *internal_conf =
271 eal_get_internal_configuration();
273 /* Check if kernel supports NUMA. */
274 if (numa_available() != 0) {
275 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
280 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
281 oldmask = numa_allocate_nodemask();
282 if (get_mempolicy(&oldpolicy, oldmask->maskp,
283 oldmask->size + 1, 0, 0) < 0) {
285 "Failed to get current mempolicy: %s. "
286 "Assuming MPOL_DEFAULT.\n", strerror(errno));
287 oldpolicy = MPOL_DEFAULT;
289 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
290 if (internal_conf->socket_mem[i])
295 for (i = 0; i < hpi->num_pages[0]; i++) {
296 struct hugepage_file *hf = &hugepg_tbl[i];
297 uint64_t hugepage_sz = hpi->hugepage_sz;
299 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
303 for (j = 0; j < maxnode; j++)
304 if (essential_memory[j])
308 node_id = (node_id + 1) % maxnode;
309 while (!internal_conf->socket_mem[node_id]) {
316 essential_prev = essential_memory[j];
318 if (essential_memory[j] < hugepage_sz)
319 essential_memory[j] = 0;
321 essential_memory[j] -= hugepage_sz;
325 "Setting policy MPOL_PREFERRED for socket %d\n",
327 numa_set_preferred(node_id);
332 hf->size = hugepage_sz;
333 eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
334 hpi->hugedir, hf->file_id);
335 hf->filepath[sizeof(hf->filepath) - 1] = '\0';
337 /* try to create hugepage file */
338 fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
340 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
345 /* map the segment, and populate page tables,
346 * the kernel fills this segment with zeros. we don't care where
347 * this gets mapped - we already have contiguous memory areas
348 * ready for us to map into.
350 virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
351 MAP_SHARED | MAP_POPULATE, fd, 0);
352 if (virtaddr == MAP_FAILED) {
353 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
359 hf->orig_va = virtaddr;
361 /* In linux, hugetlb limitations, like cgroup, are
362 * enforced at fault time instead of mmap(), even
363 * with the option of MAP_POPULATE. Kernel will send
364 * a SIGBUS signal. To avoid to be killed, save stack
365 * environment here, if SIGBUS happens, we can jump
368 if (huge_wrap_sigsetjmp()) {
369 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
370 "hugepages of size %u MB\n",
371 (unsigned int)(hugepage_sz / 0x100000));
372 munmap(virtaddr, hugepage_sz);
374 unlink(hugepg_tbl[i].filepath);
375 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
377 essential_memory[node_id] =
382 *(int *)virtaddr = 0;
384 /* set shared lock on the file. */
385 if (flock(fd, LOCK_SH) < 0) {
386 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
387 __func__, strerror(errno));
396 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
399 "Restoring previous memory policy: %d\n", oldpolicy);
400 if (oldpolicy == MPOL_DEFAULT) {
401 numa_set_localalloc();
402 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
403 oldmask->size + 1) < 0) {
404 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
406 numa_set_localalloc();
410 numa_free_cpumask(oldmask);
416 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
420 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
424 unsigned i, hp_count = 0;
427 char hugedir_str[PATH_MAX];
430 f = fopen("/proc/self/numa_maps", "r");
432 RTE_LOG(NOTICE, EAL, "NUMA support not available"
433 " consider that all memory is in socket_id 0\n");
437 snprintf(hugedir_str, sizeof(hugedir_str),
438 "%s/%s", hpi->hugedir, eal_get_hugefile_prefix());
441 while (fgets(buf, sizeof(buf), f) != NULL) {
443 /* ignore non huge page */
444 if (strstr(buf, " huge ") == NULL &&
445 strstr(buf, hugedir_str) == NULL)
449 virt_addr = strtoull(buf, &end, 16);
450 if (virt_addr == 0 || end == buf) {
451 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
455 /* get node id (socket id) */
456 nodestr = strstr(buf, " N");
457 if (nodestr == NULL) {
458 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
462 end = strstr(nodestr, "=");
464 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
470 socket_id = strtoul(nodestr, &end, 0);
471 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
472 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
476 /* if we find this page in our mappings, set socket_id */
477 for (i = 0; i < hpi->num_pages[0]; i++) {
478 void *va = (void *)(unsigned long)virt_addr;
479 if (hugepg_tbl[i].orig_va == va) {
480 hugepg_tbl[i].socket_id = socket_id;
482 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
484 "Hugepage %s is on socket %d\n",
485 hugepg_tbl[i].filepath, socket_id);
491 if (hp_count < hpi->num_pages[0])
503 cmp_physaddr(const void *a, const void *b)
505 #ifndef RTE_ARCH_PPC_64
506 const struct hugepage_file *p1 = a;
507 const struct hugepage_file *p2 = b;
509 /* PowerPC needs memory sorted in reverse order from x86 */
510 const struct hugepage_file *p1 = b;
511 const struct hugepage_file *p2 = a;
513 if (p1->physaddr < p2->physaddr)
515 else if (p1->physaddr > p2->physaddr)
522 * Uses mmap to create a shared memory area for storage of data
523 * Used in this file to store the hugepage file map on disk
526 create_shared_memory(const char *filename, const size_t mem_size)
530 const struct internal_config *internal_conf =
531 eal_get_internal_configuration();
533 /* if no shared files mode is used, create anonymous memory instead */
534 if (internal_conf->no_shconf) {
535 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
536 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
537 if (retval == MAP_FAILED)
542 fd = open(filename, O_CREAT | O_RDWR, 0600);
545 if (ftruncate(fd, mem_size) < 0) {
549 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
551 if (retval == MAP_FAILED)
557 * this copies *active* hugepages from one hugepage table to another.
558 * destination is typically the shared memory.
561 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
562 const struct hugepage_file * src, int src_size)
564 int src_pos, dst_pos = 0;
566 for (src_pos = 0; src_pos < src_size; src_pos++) {
567 if (src[src_pos].orig_va != NULL) {
568 /* error on overflow attempt */
569 if (dst_pos == dest_size)
571 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
579 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
580 unsigned num_hp_info)
582 unsigned socket, size;
583 int page, nrpages = 0;
584 const struct internal_config *internal_conf =
585 eal_get_internal_configuration();
587 /* get total number of hugepages */
588 for (size = 0; size < num_hp_info; size++)
589 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
591 internal_conf->hugepage_info[size].num_pages[socket];
593 for (page = 0; page < nrpages; page++) {
594 struct hugepage_file *hp = &hugepg_tbl[page];
596 if (hp->orig_va != NULL && unlink(hp->filepath)) {
597 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
598 __func__, hp->filepath, strerror(errno));
605 * unmaps hugepages that are not going to be used. since we originally allocate
606 * ALL hugepages (not just those we need), additional unmapping needs to be done.
609 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
610 struct hugepage_info *hpi,
611 unsigned num_hp_info)
613 unsigned socket, size;
614 int page, nrpages = 0;
615 const struct internal_config *internal_conf =
616 eal_get_internal_configuration();
618 /* get total number of hugepages */
619 for (size = 0; size < num_hp_info; size++)
620 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
621 nrpages += internal_conf->hugepage_info[size].num_pages[socket];
623 for (size = 0; size < num_hp_info; size++) {
624 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
625 unsigned pages_found = 0;
627 /* traverse until we have unmapped all the unused pages */
628 for (page = 0; page < nrpages; page++) {
629 struct hugepage_file *hp = &hugepg_tbl[page];
631 /* find a page that matches the criteria */
632 if ((hp->size == hpi[size].hugepage_sz) &&
633 (hp->socket_id == (int) socket)) {
635 /* if we skipped enough pages, unmap the rest */
636 if (pages_found == hpi[size].num_pages[socket]) {
639 unmap_len = hp->size;
641 /* get start addr and len of the remaining segment */
646 if (unlink(hp->filepath) == -1) {
647 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
648 __func__, hp->filepath, strerror(errno));
652 /* lock the page and skip */
658 } /* foreach socket */
659 } /* foreach pagesize */
665 remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
667 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
668 struct rte_memseg_list *msl;
669 struct rte_fbarray *arr;
670 int cur_page, seg_len;
671 unsigned int msl_idx;
677 const struct internal_config *internal_conf =
678 eal_get_internal_configuration();
680 page_sz = hugepages[seg_start].size;
681 socket_id = hugepages[seg_start].socket_id;
682 seg_len = seg_end - seg_start;
684 RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
685 (seg_len * page_sz) >> 20ULL, socket_id);
687 /* find free space in memseg lists */
688 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
690 msl = &mcfg->memsegs[msl_idx];
691 arr = &msl->memseg_arr;
693 if (msl->page_sz != page_sz)
695 if (msl->socket_id != socket_id)
698 /* leave space for a hole if array is not empty */
699 empty = arr->count == 0;
700 ms_idx = rte_fbarray_find_next_n_free(arr, 0,
701 seg_len + (empty ? 0 : 1));
703 /* memseg list is full? */
707 /* leave some space between memsegs, they are not IOVA
708 * contiguous, so they shouldn't be VA contiguous either.
714 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
715 RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
716 RTE_STR(RTE_MAX_MEMSEG_PER_TYPE),
717 RTE_STR(RTE_MAX_MEM_MB_PER_TYPE));
721 #ifdef RTE_ARCH_PPC_64
722 /* for PPC64 we go through the list backwards */
723 for (cur_page = seg_end - 1; cur_page >= seg_start;
724 cur_page--, ms_idx++) {
726 for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
728 struct hugepage_file *hfile = &hugepages[cur_page];
729 struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
733 fd = open(hfile->filepath, O_RDWR);
735 RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
736 hfile->filepath, strerror(errno));
739 /* set shared lock on the file. */
740 if (flock(fd, LOCK_SH) < 0) {
741 RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
742 hfile->filepath, strerror(errno));
746 memseg_len = (size_t)page_sz;
747 addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
749 /* we know this address is already mmapped by memseg list, so
750 * using MAP_FIXED here is safe
752 addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
753 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
754 if (addr == MAP_FAILED) {
755 RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
756 hfile->filepath, strerror(errno));
761 /* we have a new address, so unmap previous one */
763 /* in 32-bit legacy mode, we have already unmapped the page */
764 if (!internal_conf->legacy_mem)
765 munmap(hfile->orig_va, page_sz);
767 munmap(hfile->orig_va, page_sz);
770 hfile->orig_va = NULL;
771 hfile->final_va = addr;
773 /* rewrite physical addresses in IOVA as VA mode */
774 if (rte_eal_iova_mode() == RTE_IOVA_VA)
775 hfile->physaddr = (uintptr_t)addr;
777 /* set up memseg data */
779 ms->hugepage_sz = page_sz;
780 ms->len = memseg_len;
781 ms->iova = hfile->physaddr;
782 ms->socket_id = hfile->socket_id;
783 ms->nchannel = rte_memory_get_nchannel();
784 ms->nrank = rte_memory_get_nrank();
786 rte_fbarray_set_used(arr, ms_idx);
788 /* store segment fd internally */
789 if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
790 RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
791 rte_strerror(rte_errno));
793 RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
794 (seg_len * page_sz) >> 20, socket_id);
799 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
801 uint64_t area_sz, max_pages;
803 /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
804 max_pages = RTE_MAX_MEMSEG_PER_LIST;
805 max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
807 area_sz = RTE_MIN(page_sz * max_pages, max_mem);
809 /* make sure the list isn't smaller than the page size */
810 area_sz = RTE_MAX(area_sz, page_sz);
812 return RTE_ALIGN(area_sz, page_sz);
816 memseg_list_free(struct rte_memseg_list *msl)
818 if (rte_fbarray_destroy(&msl->memseg_arr)) {
819 RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
822 memset(msl, 0, sizeof(*msl));
827 * Our VA space is not preallocated yet, so preallocate it here. We need to know
828 * how many segments there are in order to map all pages into one address space,
829 * and leave appropriate holes between segments so that rte_malloc does not
830 * concatenate them into one big segment.
832 * we also need to unmap original pages to free up address space.
834 static int __rte_unused
835 prealloc_segments(struct hugepage_file *hugepages, int n_pages)
837 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
838 int cur_page, seg_start_page, end_seg, new_memseg;
839 unsigned int hpi_idx, socket, i;
840 int n_contig_segs, n_segs;
842 const struct internal_config *internal_conf =
843 eal_get_internal_configuration();
845 /* before we preallocate segments, we need to free up our VA space.
846 * we're not removing files, and we already have information about
847 * PA-contiguousness, so it is safe to unmap everything.
849 for (cur_page = 0; cur_page < n_pages; cur_page++) {
850 struct hugepage_file *hpi = &hugepages[cur_page];
851 munmap(hpi->orig_va, hpi->size);
855 /* we cannot know how many page sizes and sockets we have discovered, so
856 * loop over all of them
858 for (hpi_idx = 0; hpi_idx < internal_conf->num_hugepage_sizes;
861 internal_conf->hugepage_info[hpi_idx].hugepage_sz;
863 for (i = 0; i < rte_socket_count(); i++) {
864 struct rte_memseg_list *msl;
866 socket = rte_socket_id_by_idx(i);
871 for (cur_page = 0; cur_page < n_pages; cur_page++) {
872 struct hugepage_file *prev, *cur;
873 int prev_seg_start_page = -1;
875 cur = &hugepages[cur_page];
876 prev = cur_page == 0 ? NULL :
877 &hugepages[cur_page - 1];
884 else if (cur->socket_id != (int) socket)
886 else if (cur->size != page_sz)
888 else if (cur_page == 0)
890 #ifdef RTE_ARCH_PPC_64
891 /* On PPC64 architecture, the mmap always start
892 * from higher address to lower address. Here,
893 * physical addresses are in descending order.
895 else if ((prev->physaddr - cur->physaddr) !=
899 else if ((cur->physaddr - prev->physaddr) !=
904 /* if we're already inside a segment,
905 * new segment means end of current one
907 if (seg_start_page != -1) {
909 prev_seg_start_page =
912 seg_start_page = cur_page;
916 if (prev_seg_start_page != -1) {
917 /* we've found a new segment */
921 } else if (seg_start_page != -1) {
922 /* we didn't find new segment,
923 * but did end current one
931 /* we're skipping this page */
935 /* segment continues */
937 /* check if we missed last segment */
938 if (seg_start_page != -1) {
940 n_segs += cur_page - seg_start_page;
943 /* if no segments were found, do not preallocate */
947 /* we now have total number of pages that we will
948 * allocate for this segment list. add separator pages
949 * to the total count, and preallocate VA space.
951 n_segs += n_contig_segs - 1;
953 /* now, preallocate VA space for these segments */
955 /* first, find suitable memseg list for this */
956 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
958 msl = &mcfg->memsegs[msl_idx];
960 if (msl->base_va != NULL)
964 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
965 RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
966 RTE_STR(RTE_MAX_MEMSEG_LISTS));
970 /* now, allocate fbarray itself */
971 if (eal_memseg_list_init(msl, page_sz, n_segs,
972 socket, msl_idx, true) < 0)
975 /* finally, allocate VA space */
976 if (eal_memseg_list_alloc(msl, 0) < 0) {
977 RTE_LOG(ERR, EAL, "Cannot preallocate 0x%"PRIx64"kB hugepages\n",
987 * We cannot reallocate memseg lists on the fly because PPC64 stores pages
988 * backwards, therefore we have to process the entire memseg first before
989 * remapping it into memseg list VA space.
992 remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
994 int cur_page, seg_start_page, new_memseg, ret;
997 for (cur_page = 0; cur_page < n_pages; cur_page++) {
998 struct hugepage_file *prev, *cur;
1002 cur = &hugepages[cur_page];
1003 prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1005 /* if size is zero, no more pages left */
1011 else if (cur->socket_id != prev->socket_id)
1013 else if (cur->size != prev->size)
1015 #ifdef RTE_ARCH_PPC_64
1016 /* On PPC64 architecture, the mmap always start from higher
1017 * address to lower address. Here, physical addresses are in
1020 else if ((prev->physaddr - cur->physaddr) != cur->size)
1023 else if ((cur->physaddr - prev->physaddr) != cur->size)
1028 /* if this isn't the first time, remap segment */
1029 if (cur_page != 0) {
1030 ret = remap_segment(hugepages, seg_start_page,
1035 /* remember where we started */
1036 seg_start_page = cur_page;
1038 /* continuation of previous memseg */
1040 /* we were stopped, but we didn't remap the last segment, do it now */
1041 if (cur_page != 0) {
1042 ret = remap_segment(hugepages, seg_start_page,
1050 static inline size_t
1051 eal_get_hugepage_mem_size(void)
1055 struct internal_config *internal_conf =
1056 eal_get_internal_configuration();
1058 for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
1059 struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
1060 if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
1061 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1062 size += hpi->hugepage_sz * hpi->num_pages[j];
1067 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1070 static struct sigaction huge_action_old;
1071 static int huge_need_recover;
1074 huge_register_sigbus(void)
1077 struct sigaction action;
1080 sigaddset(&mask, SIGBUS);
1081 action.sa_flags = 0;
1082 action.sa_mask = mask;
1083 action.sa_handler = huge_sigbus_handler;
1085 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1089 huge_recover_sigbus(void)
1091 if (huge_need_recover) {
1092 sigaction(SIGBUS, &huge_action_old, NULL);
1093 huge_need_recover = 0;
1098 * Prepare physical memory mapping: fill configuration structure with
1099 * these infos, return 0 on success.
1100 * 1. map N huge pages in separate files in hugetlbfs
1101 * 2. find associated physical addr
1102 * 3. find associated NUMA socket ID
1103 * 4. sort all huge pages by physical address
1104 * 5. remap these N huge pages in the correct order
1105 * 6. unmap the first mapping
1106 * 7. fill memsegs in configuration with contiguous zones
1109 eal_legacy_hugepage_init(void)
1111 struct rte_mem_config *mcfg;
1112 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1113 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1114 struct internal_config *internal_conf =
1115 eal_get_internal_configuration();
1117 uint64_t memory[RTE_MAX_NUMA_NODES];
1121 int nr_hugefiles, nr_hugepages = 0;
1124 memset(used_hp, 0, sizeof(used_hp));
1126 /* get pointer to global configuration */
1127 mcfg = rte_eal_get_configuration()->mem_config;
1129 /* hugetlbfs can be disabled */
1130 if (internal_conf->no_hugetlbfs) {
1131 void *prealloc_addr;
1133 struct rte_memseg_list *msl;
1134 int n_segs, fd, flags;
1135 #ifdef MEMFD_SUPPORTED
1140 /* nohuge mode is legacy mode */
1141 internal_conf->legacy_mem = 1;
1143 /* nohuge mode is single-file segments mode */
1144 internal_conf->single_file_segments = 1;
1146 /* create a memseg list */
1147 msl = &mcfg->memsegs[0];
1149 mem_sz = internal_conf->memory;
1150 page_sz = RTE_PGSIZE_4K;
1151 n_segs = mem_sz / page_sz;
1153 if (eal_memseg_list_init_named(
1154 msl, "nohugemem", page_sz, n_segs, 0, true)) {
1158 /* set up parameters for anonymous mmap */
1160 flags = MAP_PRIVATE | MAP_ANONYMOUS;
1162 #ifdef MEMFD_SUPPORTED
1163 /* create a memfd and store it in the segment fd table */
1164 memfd = memfd_create("nohuge", 0);
1166 RTE_LOG(DEBUG, EAL, "Cannot create memfd: %s\n",
1168 RTE_LOG(DEBUG, EAL, "Falling back to anonymous map\n");
1170 /* we got an fd - now resize it */
1171 if (ftruncate(memfd, internal_conf->memory) < 0) {
1172 RTE_LOG(ERR, EAL, "Cannot resize memfd: %s\n",
1174 RTE_LOG(ERR, EAL, "Falling back to anonymous map\n");
1177 /* creating memfd-backed file was successful.
1178 * we want changes to memfd to be visible to
1179 * other processes (such as vhost backend), so
1180 * map it as shared memory.
1182 RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1188 /* preallocate address space for the memory, so that it can be
1189 * fit into the DMA mask.
1191 if (eal_memseg_list_alloc(msl, 0)) {
1192 RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
1196 prealloc_addr = msl->base_va;
1197 addr = mmap(prealloc_addr, mem_sz, PROT_READ | PROT_WRITE,
1198 flags | MAP_FIXED, fd, 0);
1199 if (addr == MAP_FAILED || addr != prealloc_addr) {
1200 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1202 munmap(prealloc_addr, mem_sz);
1206 /* we're in single-file segments mode, so only the segment list
1207 * fd needs to be set up.
1210 if (eal_memalloc_set_seg_list_fd(0, fd) < 0) {
1211 RTE_LOG(ERR, EAL, "Cannot set up segment list fd\n");
1212 /* not a serious error, proceed */
1216 eal_memseg_list_populate(msl, addr, n_segs);
1218 if (mcfg->dma_maskbits &&
1219 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1221 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1223 if (rte_eal_iova_mode() == RTE_IOVA_VA &&
1224 rte_eal_using_phys_addrs())
1226 "%s(): Please try initializing EAL with --iova-mode=pa parameter.\n",
1233 /* calculate total number of hugepages available. at this point we haven't
1234 * yet started sorting them so they all are on socket 0 */
1235 for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1236 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1237 used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
1239 nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
1243 * allocate a memory area for hugepage table.
1244 * this isn't shared memory yet. due to the fact that we need some
1245 * processing done on these pages, shared memory will be created
1248 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1252 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1254 hp_offset = 0; /* where we start the current page size entries */
1256 huge_register_sigbus();
1258 /* make a copy of socket_mem, needed for balanced allocation. */
1259 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1260 memory[i] = internal_conf->socket_mem[i];
1262 /* map all hugepages and sort them */
1263 for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
1264 unsigned pages_old, pages_new;
1265 struct hugepage_info *hpi;
1268 * we don't yet mark hugepages as used at this stage, so
1269 * we just map all hugepages available to the system
1270 * all hugepages are still located on socket 0
1272 hpi = &internal_conf->hugepage_info[i];
1274 if (hpi->num_pages[0] == 0)
1277 /* map all hugepages available */
1278 pages_old = hpi->num_pages[0];
1279 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1280 if (pages_new < pages_old) {
1282 "%d not %d hugepages of size %u MB allocated\n",
1283 pages_new, pages_old,
1284 (unsigned)(hpi->hugepage_sz / 0x100000));
1286 int pages = pages_old - pages_new;
1288 nr_hugepages -= pages;
1289 hpi->num_pages[0] = pages_new;
1294 if (rte_eal_using_phys_addrs() &&
1295 rte_eal_iova_mode() != RTE_IOVA_VA) {
1296 /* find physical addresses for each hugepage */
1297 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1298 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1299 "for %u MB pages\n",
1300 (unsigned int)(hpi->hugepage_sz / 0x100000));
1304 /* set physical addresses for each hugepage */
1305 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1306 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1307 "for %u MB pages\n",
1308 (unsigned int)(hpi->hugepage_sz / 0x100000));
1313 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1314 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1315 (unsigned)(hpi->hugepage_sz / 0x100000));
1319 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1320 sizeof(struct hugepage_file), cmp_physaddr);
1322 /* we have processed a num of hugepages of this size, so inc offset */
1323 hp_offset += hpi->num_pages[0];
1326 huge_recover_sigbus();
1328 if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
1329 internal_conf->memory = eal_get_hugepage_mem_size();
1331 nr_hugefiles = nr_hugepages;
1334 /* clean out the numbers of pages */
1335 for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++)
1336 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1337 internal_conf->hugepage_info[i].num_pages[j] = 0;
1339 /* get hugepages for each socket */
1340 for (i = 0; i < nr_hugefiles; i++) {
1341 int socket = tmp_hp[i].socket_id;
1343 /* find a hugepage info with right size and increment num_pages */
1344 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1345 (int)internal_conf->num_hugepage_sizes);
1346 for (j = 0; j < nb_hpsizes; j++) {
1347 if (tmp_hp[i].size ==
1348 internal_conf->hugepage_info[j].hugepage_sz) {
1349 internal_conf->hugepage_info[j].num_pages[socket]++;
1354 /* make a copy of socket_mem, needed for number of pages calculation */
1355 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1356 memory[i] = internal_conf->socket_mem[i];
1358 /* calculate final number of pages */
1359 nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
1360 internal_conf->hugepage_info, used_hp,
1361 internal_conf->num_hugepage_sizes);
1363 /* error if not enough memory available */
1364 if (nr_hugepages < 0)
1368 for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1369 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1370 if (used_hp[i].num_pages[j] > 0) {
1372 "Requesting %u pages of size %uMB"
1373 " from socket %i\n",
1374 used_hp[i].num_pages[j],
1376 (used_hp[i].hugepage_sz / 0x100000),
1382 /* create shared memory */
1383 hugepage = create_shared_memory(eal_hugepage_data_path(),
1384 nr_hugefiles * sizeof(struct hugepage_file));
1386 if (hugepage == NULL) {
1387 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1390 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1393 * unmap pages that we won't need (looks at used_hp).
1394 * also, sets final_va to NULL on pages that were unmapped.
1396 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1397 internal_conf->num_hugepage_sizes) < 0) {
1398 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1403 * copy stuff from malloc'd hugepage* to the actual shared memory.
1404 * this procedure only copies those hugepages that have orig_va
1405 * not NULL. has overflow protection.
1407 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1408 tmp_hp, nr_hugefiles) < 0) {
1409 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1414 /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1415 if (internal_conf->legacy_mem &&
1416 prealloc_segments(hugepage, nr_hugefiles)) {
1417 RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
1422 /* remap all pages we do need into memseg list VA space, so that those
1423 * pages become first-class citizens in DPDK memory subsystem
1425 if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1426 RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
1430 /* free the hugepage backing files */
1431 if (internal_conf->hugepage_unlink &&
1432 unlink_hugepage_files(tmp_hp, internal_conf->num_hugepage_sizes) < 0) {
1433 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1437 /* free the temporary hugepage table */
1441 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1444 /* we're not going to allocate more pages, so release VA space for
1445 * unused memseg lists
1447 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1448 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1451 /* skip inactive lists */
1452 if (msl->base_va == NULL)
1454 /* skip lists where there is at least one page allocated */
1455 if (msl->memseg_arr.count > 0)
1457 /* this is an unused list, deallocate it */
1459 munmap(msl->base_va, mem_sz);
1460 msl->base_va = NULL;
1463 /* destroy backing fbarray */
1464 rte_fbarray_destroy(&msl->memseg_arr);
1467 if (mcfg->dma_maskbits &&
1468 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1470 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1478 huge_recover_sigbus();
1480 if (hugepage != NULL)
1481 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1487 * uses fstat to report the size of a file on disk
1493 if (fstat(fd, &st) < 0)
1499 * This creates the memory mappings in the secondary process to match that of
1500 * the server process. It goes through each memory segment in the DPDK runtime
1501 * configuration and finds the hugepages which form that segment, mapping them
1502 * in order to form a contiguous block in the virtual memory space
1505 eal_legacy_hugepage_attach(void)
1507 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1508 struct hugepage_file *hp = NULL;
1509 unsigned int num_hp = 0;
1511 unsigned int cur_seg;
1513 int fd, fd_hugepage = -1;
1515 if (aslr_enabled() > 0) {
1516 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1517 "(ASLR) is enabled in the kernel.\n");
1518 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1519 "into secondary processes\n");
1522 fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
1523 if (fd_hugepage < 0) {
1524 RTE_LOG(ERR, EAL, "Could not open %s\n",
1525 eal_hugepage_data_path());
1529 size = getFileSize(fd_hugepage);
1530 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1531 if (hp == MAP_FAILED) {
1532 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1533 eal_hugepage_data_path());
1537 num_hp = size / sizeof(struct hugepage_file);
1538 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1540 /* map all segments into memory to make sure we get the addrs. the
1541 * segments themselves are already in memseg list (which is shared and
1542 * has its VA space already preallocated), so we just need to map
1543 * everything into correct addresses.
1545 for (i = 0; i < num_hp; i++) {
1546 struct hugepage_file *hf = &hp[i];
1547 size_t map_sz = hf->size;
1548 void *map_addr = hf->final_va;
1549 int msl_idx, ms_idx;
1550 struct rte_memseg_list *msl;
1551 struct rte_memseg *ms;
1553 /* if size is zero, no more pages left */
1557 fd = open(hf->filepath, O_RDWR);
1559 RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
1560 hf->filepath, strerror(errno));
1564 map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1565 MAP_SHARED | MAP_FIXED, fd, 0);
1566 if (map_addr == MAP_FAILED) {
1567 RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
1568 hf->filepath, strerror(errno));
1572 /* set shared lock on the file. */
1573 if (flock(fd, LOCK_SH) < 0) {
1574 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
1575 __func__, strerror(errno));
1579 /* find segment data */
1580 msl = rte_mem_virt2memseg_list(map_addr);
1582 RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
1586 ms = rte_mem_virt2memseg(map_addr, msl);
1588 RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
1593 msl_idx = msl - mcfg->memsegs;
1594 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
1596 RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg idx\n",
1601 /* store segment fd internally */
1602 if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
1603 RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
1604 rte_strerror(rte_errno));
1606 /* unmap the hugepage config file, since we are done using it */
1612 munmap(hp[i].final_va, hp[i].size);
1616 /* unwind mmap's done so far */
1617 for (cur_seg = 0; cur_seg < i; cur_seg++)
1618 munmap(hp[cur_seg].final_va, hp[cur_seg].size);
1620 if (hp != NULL && hp != MAP_FAILED)
1622 if (fd_hugepage >= 0)
1628 eal_hugepage_attach(void)
1630 if (eal_memalloc_sync_with_primary()) {
1631 RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
1632 if (aslr_enabled() > 0)
1633 RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
1640 rte_eal_hugepage_init(void)
1642 const struct internal_config *internal_conf =
1643 eal_get_internal_configuration();
1645 return internal_conf->legacy_mem ?
1646 eal_legacy_hugepage_init() :
1647 eal_dynmem_hugepage_init();
1651 rte_eal_hugepage_attach(void)
1653 const struct internal_config *internal_conf =
1654 eal_get_internal_configuration();
1656 return internal_conf->legacy_mem ?
1657 eal_legacy_hugepage_attach() :
1658 eal_hugepage_attach();
1662 rte_eal_using_phys_addrs(void)
1664 if (phys_addrs_available == -1) {
1667 if (rte_eal_has_hugepages() != 0 &&
1668 rte_mem_virt2phy(&tmp) != RTE_BAD_PHYS_ADDR)
1669 phys_addrs_available = 1;
1671 phys_addrs_available = 0;
1673 return phys_addrs_available;
1676 static int __rte_unused
1677 memseg_primary_init_32(void)
1679 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1680 int active_sockets, hpi_idx, msl_idx = 0;
1681 unsigned int socket_id, i;
1682 struct rte_memseg_list *msl;
1683 uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
1685 struct internal_config *internal_conf =
1686 eal_get_internal_configuration();
1688 /* no-huge does not need this at all */
1689 if (internal_conf->no_hugetlbfs)
1692 /* this is a giant hack, but desperate times call for desperate
1693 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
1694 * because having upwards of 2 gigabytes of VA space already mapped will
1695 * interfere with our ability to map and sort hugepages.
1697 * therefore, in legacy 32-bit mode, we will be initializing memseg
1698 * lists much later - in eal_memory.c, right after we unmap all the
1699 * unneeded pages. this will not affect secondary processes, as those
1700 * should be able to mmap the space without (too many) problems.
1702 if (internal_conf->legacy_mem)
1705 /* 32-bit mode is a very special case. we cannot know in advance where
1706 * the user will want to allocate their memory, so we have to do some
1710 total_requested_mem = 0;
1711 if (internal_conf->force_sockets)
1712 for (i = 0; i < rte_socket_count(); i++) {
1715 socket_id = rte_socket_id_by_idx(i);
1716 mem = internal_conf->socket_mem[socket_id];
1722 total_requested_mem += mem;
1725 total_requested_mem = internal_conf->memory;
1727 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
1728 if (total_requested_mem > max_mem) {
1729 RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
1730 (unsigned int)(max_mem >> 20));
1733 total_extra_mem = max_mem - total_requested_mem;
1734 extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
1735 total_extra_mem / active_sockets;
1737 /* the allocation logic is a little bit convoluted, but here's how it
1738 * works, in a nutshell:
1739 * - if user hasn't specified on which sockets to allocate memory via
1740 * --socket-mem, we allocate all of our memory on main core socket.
1741 * - if user has specified sockets to allocate memory on, there may be
1742 * some "unused" memory left (e.g. if user has specified --socket-mem
1743 * such that not all memory adds up to 2 gigabytes), so add it to all
1744 * sockets that are in use equally.
1746 * page sizes are sorted by size in descending order, so we can safely
1747 * assume that we dispense with bigger page sizes first.
1750 /* create memseg lists */
1751 for (i = 0; i < rte_socket_count(); i++) {
1752 int hp_sizes = (int) internal_conf->num_hugepage_sizes;
1753 uint64_t max_socket_mem, cur_socket_mem;
1754 unsigned int main_lcore_socket;
1755 struct rte_config *cfg = rte_eal_get_configuration();
1758 socket_id = rte_socket_id_by_idx(i);
1760 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1761 /* we can still sort pages by socket in legacy mode */
1762 if (!internal_conf->legacy_mem && socket_id > 0)
1766 /* if we didn't specifically request memory on this socket */
1767 skip = active_sockets != 0 &&
1768 internal_conf->socket_mem[socket_id] == 0;
1769 /* ...or if we didn't specifically request memory on *any*
1770 * socket, and this is not main lcore
1772 main_lcore_socket = rte_lcore_to_socket_id(cfg->main_lcore);
1773 skip |= active_sockets == 0 && socket_id != main_lcore_socket;
1776 RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
1781 /* max amount of memory on this socket */
1782 max_socket_mem = (active_sockets != 0 ?
1783 internal_conf->socket_mem[socket_id] :
1784 internal_conf->memory) +
1785 extra_mem_per_socket;
1788 for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
1789 uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
1790 uint64_t hugepage_sz;
1791 struct hugepage_info *hpi;
1792 int type_msl_idx, max_segs, total_segs = 0;
1794 hpi = &internal_conf->hugepage_info[hpi_idx];
1795 hugepage_sz = hpi->hugepage_sz;
1797 /* check if pages are actually available */
1798 if (hpi->num_pages[socket_id] == 0)
1801 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
1802 max_pagesz_mem = max_socket_mem - cur_socket_mem;
1804 /* make it multiple of page size */
1805 max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
1808 RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
1809 "%" PRIu64 "M on socket %i\n",
1810 max_pagesz_mem >> 20, socket_id);
1813 while (cur_pagesz_mem < max_pagesz_mem &&
1814 total_segs < max_segs) {
1816 unsigned int n_segs;
1818 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
1820 "No more space in memseg lists, please increase %s\n",
1821 RTE_STR(RTE_MAX_MEMSEG_LISTS));
1825 msl = &mcfg->memsegs[msl_idx];
1827 cur_mem = get_mem_amount(hugepage_sz,
1829 n_segs = cur_mem / hugepage_sz;
1831 if (eal_memseg_list_init(msl, hugepage_sz,
1832 n_segs, socket_id, type_msl_idx,
1834 /* failing to allocate a memseg list is
1837 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
1841 if (eal_memseg_list_alloc(msl, 0)) {
1842 /* if we couldn't allocate VA space, we
1843 * can try with smaller page sizes.
1845 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
1846 /* deallocate memseg list */
1847 if (memseg_list_free(msl))
1852 total_segs += msl->memseg_arr.len;
1853 cur_pagesz_mem = total_segs * hugepage_sz;
1857 cur_socket_mem += cur_pagesz_mem;
1859 if (cur_socket_mem == 0) {
1860 RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
1869 static int __rte_unused
1870 memseg_primary_init(void)
1872 return eal_dynmem_memseg_lists_init();
1876 memseg_secondary_init(void)
1878 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1880 struct rte_memseg_list *msl;
1882 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
1884 msl = &mcfg->memsegs[msl_idx];
1886 /* skip empty memseg lists */
1887 if (msl->memseg_arr.len == 0)
1890 if (rte_fbarray_attach(&msl->memseg_arr)) {
1891 RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
1895 /* preallocate VA space */
1896 if (eal_memseg_list_alloc(msl, 0)) {
1897 RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
1906 rte_eal_memseg_init(void)
1908 /* increase rlimit to maximum */
1911 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1912 const struct internal_config *internal_conf =
1913 eal_get_internal_configuration();
1915 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
1916 /* set limit to maximum */
1917 lim.rlim_cur = lim.rlim_max;
1919 if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
1920 RTE_LOG(DEBUG, EAL, "Setting maximum number of open files failed: %s\n",
1923 RTE_LOG(DEBUG, EAL, "Setting maximum number of open files to %"
1925 (uint64_t)lim.rlim_cur);
1928 RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
1930 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1931 if (!internal_conf->legacy_mem && rte_socket_count() > 1) {
1932 RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
1933 RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
1934 RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
1938 return rte_eal_process_type() == RTE_PROC_PRIMARY ?
1940 memseg_primary_init_32() :
1942 memseg_primary_init() :
1944 memseg_secondary_init();