1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2013 6WIND S.A.
6 #define _FILE_OFFSET_BITS 64
16 #include <sys/types.h>
18 #include <sys/queue.h>
20 #include <sys/resource.h>
23 #include <sys/ioctl.h>
27 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
32 #include <rte_errno.h>
34 #include <rte_memory.h>
35 #include <rte_launch.h>
37 #include <rte_eal_memconfig.h>
38 #include <rte_per_lcore.h>
39 #include <rte_lcore.h>
40 #include <rte_common.h>
41 #include <rte_string_fns.h>
43 #include "eal_private.h"
44 #include "eal_memalloc.h"
45 #include "eal_internal_cfg.h"
46 #include "eal_filesystem.h"
47 #include "eal_hugepages.h"
49 #define PFN_MASK_SIZE 8
53 * Huge page mapping under linux
55 * To reserve a big contiguous amount of memory, we use the hugepage
56 * feature of linux. For that, we need to have hugetlbfs mounted. This
57 * code will create many files in this directory (one per page) and
58 * map them in virtual memory. For each page, we will retrieve its
59 * physical address and remap it in order to have a virtual contiguous
60 * zone as well as a physical contiguous zone.
63 static bool phys_addrs_available = true;
65 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
68 test_phys_addrs_available(void)
73 if (!rte_eal_has_hugepages()) {
75 "Started without hugepages support, physical addresses not available\n");
76 phys_addrs_available = false;
80 physaddr = rte_mem_virt2phy(&tmp);
81 if (physaddr == RTE_BAD_PHYS_ADDR) {
82 if (rte_eal_iova_mode() == RTE_IOVA_PA)
84 "Cannot obtain physical addresses: %s. "
85 "Only vfio will function.\n",
87 phys_addrs_available = false;
92 * Get physical address of any mapped virtual address in the current process.
95 rte_mem_virt2phy(const void *virtaddr)
98 uint64_t page, physaddr;
99 unsigned long virt_pfn;
103 /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
104 if (!phys_addrs_available)
107 /* standard page size */
108 page_size = getpagesize();
110 fd = open("/proc/self/pagemap", O_RDONLY);
112 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
113 __func__, strerror(errno));
117 virt_pfn = (unsigned long)virtaddr / page_size;
118 offset = sizeof(uint64_t) * virt_pfn;
119 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
120 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
121 __func__, strerror(errno));
126 retval = read(fd, &page, PFN_MASK_SIZE);
129 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
130 __func__, strerror(errno));
132 } else if (retval != PFN_MASK_SIZE) {
133 RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
134 "but expected %d:\n",
135 __func__, retval, PFN_MASK_SIZE);
140 * the pfn (page frame number) are bits 0-54 (see
141 * pagemap.txt in linux Documentation)
143 if ((page & 0x7fffffffffffffULL) == 0)
146 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
147 + ((unsigned long)virtaddr % page_size);
153 rte_mem_virt2iova(const void *virtaddr)
155 if (rte_eal_iova_mode() == RTE_IOVA_VA)
156 return (uintptr_t)virtaddr;
157 return rte_mem_virt2phy(virtaddr);
161 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
162 * it by browsing the /proc/self/pagemap special file.
165 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
170 for (i = 0; i < hpi->num_pages[0]; i++) {
171 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
172 if (addr == RTE_BAD_PHYS_ADDR)
174 hugepg_tbl[i].physaddr = addr;
180 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
183 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
186 static phys_addr_t addr;
188 for (i = 0; i < hpi->num_pages[0]; i++) {
189 hugepg_tbl[i].physaddr = addr;
190 addr += hugepg_tbl[i].size;
196 * Check whether address-space layout randomization is enabled in
197 * the kernel. This is important for multi-process as it can prevent
198 * two processes mapping data to the same virtual address
200 * 0 - address space randomization disabled
201 * 1/2 - address space randomization enabled
202 * negative error code on error
208 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
211 retval = read(fd, &c, 1);
221 default: return -EINVAL;
225 static sigjmp_buf huge_jmpenv;
227 static void huge_sigbus_handler(int signo __rte_unused)
229 siglongjmp(huge_jmpenv, 1);
232 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
233 * non-static local variable in the stack frame calling sigsetjmp might be
234 * clobbered by a call to longjmp.
236 static int huge_wrap_sigsetjmp(void)
238 return sigsetjmp(huge_jmpenv, 1);
241 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
242 /* Callback for numa library. */
243 void numa_error(char *where)
245 RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
250 * Mmap all hugepages of hugepage table: it first open a file in
251 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
252 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
253 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
254 * map contiguous physical blocks in contiguous virtual blocks.
257 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
258 uint64_t *essential_memory __rte_unused)
263 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
265 int essential_prev = 0;
267 struct bitmask *oldmask = numa_allocate_nodemask();
268 bool have_numa = true;
269 unsigned long maxnode = 0;
271 /* Check if kernel supports NUMA. */
272 if (numa_available() != 0) {
273 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
278 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
279 if (get_mempolicy(&oldpolicy, oldmask->maskp,
280 oldmask->size + 1, 0, 0) < 0) {
282 "Failed to get current mempolicy: %s. "
283 "Assuming MPOL_DEFAULT.\n", strerror(errno));
284 oldpolicy = MPOL_DEFAULT;
286 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
287 if (internal_config.socket_mem[i])
292 for (i = 0; i < hpi->num_pages[0]; i++) {
293 struct hugepage_file *hf = &hugepg_tbl[i];
294 uint64_t hugepage_sz = hpi->hugepage_sz;
296 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
300 for (j = 0; j < maxnode; j++)
301 if (essential_memory[j])
305 node_id = (node_id + 1) % maxnode;
306 while (!internal_config.socket_mem[node_id]) {
313 essential_prev = essential_memory[j];
315 if (essential_memory[j] < hugepage_sz)
316 essential_memory[j] = 0;
318 essential_memory[j] -= hugepage_sz;
322 "Setting policy MPOL_PREFERRED for socket %d\n",
324 numa_set_preferred(node_id);
329 hf->size = hugepage_sz;
330 eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
331 hpi->hugedir, hf->file_id);
332 hf->filepath[sizeof(hf->filepath) - 1] = '\0';
334 /* try to create hugepage file */
335 fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
337 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
342 /* map the segment, and populate page tables,
343 * the kernel fills this segment with zeros. we don't care where
344 * this gets mapped - we already have contiguous memory areas
345 * ready for us to map into.
347 virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
348 MAP_SHARED | MAP_POPULATE, fd, 0);
349 if (virtaddr == MAP_FAILED) {
350 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
356 hf->orig_va = virtaddr;
358 /* In linux, hugetlb limitations, like cgroup, are
359 * enforced at fault time instead of mmap(), even
360 * with the option of MAP_POPULATE. Kernel will send
361 * a SIGBUS signal. To avoid to be killed, save stack
362 * environment here, if SIGBUS happens, we can jump
365 if (huge_wrap_sigsetjmp()) {
366 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
367 "hugepages of size %u MB\n",
368 (unsigned int)(hugepage_sz / 0x100000));
369 munmap(virtaddr, hugepage_sz);
371 unlink(hugepg_tbl[i].filepath);
372 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
374 essential_memory[node_id] =
379 *(int *)virtaddr = 0;
381 /* set shared lock on the file. */
382 if (flock(fd, LOCK_SH) < 0) {
383 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
384 __func__, strerror(errno));
393 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
396 "Restoring previous memory policy: %d\n", oldpolicy);
397 if (oldpolicy == MPOL_DEFAULT) {
398 numa_set_localalloc();
399 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
400 oldmask->size + 1) < 0) {
401 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
403 numa_set_localalloc();
406 numa_free_cpumask(oldmask);
412 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
416 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
420 unsigned i, hp_count = 0;
423 char hugedir_str[PATH_MAX];
426 f = fopen("/proc/self/numa_maps", "r");
428 RTE_LOG(NOTICE, EAL, "NUMA support not available"
429 " consider that all memory is in socket_id 0\n");
433 snprintf(hugedir_str, sizeof(hugedir_str),
434 "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
437 while (fgets(buf, sizeof(buf), f) != NULL) {
439 /* ignore non huge page */
440 if (strstr(buf, " huge ") == NULL &&
441 strstr(buf, hugedir_str) == NULL)
445 virt_addr = strtoull(buf, &end, 16);
446 if (virt_addr == 0 || end == buf) {
447 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
451 /* get node id (socket id) */
452 nodestr = strstr(buf, " N");
453 if (nodestr == NULL) {
454 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
458 end = strstr(nodestr, "=");
460 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
466 socket_id = strtoul(nodestr, &end, 0);
467 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
468 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
472 /* if we find this page in our mappings, set socket_id */
473 for (i = 0; i < hpi->num_pages[0]; i++) {
474 void *va = (void *)(unsigned long)virt_addr;
475 if (hugepg_tbl[i].orig_va == va) {
476 hugepg_tbl[i].socket_id = socket_id;
478 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
480 "Hugepage %s is on socket %d\n",
481 hugepg_tbl[i].filepath, socket_id);
487 if (hp_count < hpi->num_pages[0])
499 cmp_physaddr(const void *a, const void *b)
501 #ifndef RTE_ARCH_PPC_64
502 const struct hugepage_file *p1 = a;
503 const struct hugepage_file *p2 = b;
505 /* PowerPC needs memory sorted in reverse order from x86 */
506 const struct hugepage_file *p1 = b;
507 const struct hugepage_file *p2 = a;
509 if (p1->physaddr < p2->physaddr)
511 else if (p1->physaddr > p2->physaddr)
518 * Uses mmap to create a shared memory area for storage of data
519 * Used in this file to store the hugepage file map on disk
522 create_shared_memory(const char *filename, const size_t mem_size)
527 /* if no shared files mode is used, create anonymous memory instead */
528 if (internal_config.no_shconf) {
529 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
530 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
531 if (retval == MAP_FAILED)
536 fd = open(filename, O_CREAT | O_RDWR, 0666);
539 if (ftruncate(fd, mem_size) < 0) {
543 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
545 if (retval == MAP_FAILED)
551 * this copies *active* hugepages from one hugepage table to another.
552 * destination is typically the shared memory.
555 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
556 const struct hugepage_file * src, int src_size)
558 int src_pos, dst_pos = 0;
560 for (src_pos = 0; src_pos < src_size; src_pos++) {
561 if (src[src_pos].orig_va != NULL) {
562 /* error on overflow attempt */
563 if (dst_pos == dest_size)
565 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
573 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
574 unsigned num_hp_info)
576 unsigned socket, size;
577 int page, nrpages = 0;
579 /* get total number of hugepages */
580 for (size = 0; size < num_hp_info; size++)
581 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
583 internal_config.hugepage_info[size].num_pages[socket];
585 for (page = 0; page < nrpages; page++) {
586 struct hugepage_file *hp = &hugepg_tbl[page];
588 if (hp->final_va != NULL && unlink(hp->filepath)) {
589 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
590 __func__, hp->filepath, strerror(errno));
597 * unmaps hugepages that are not going to be used. since we originally allocate
598 * ALL hugepages (not just those we need), additional unmapping needs to be done.
601 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
602 struct hugepage_info *hpi,
603 unsigned num_hp_info)
605 unsigned socket, size;
606 int page, nrpages = 0;
608 /* get total number of hugepages */
609 for (size = 0; size < num_hp_info; size++)
610 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
611 nrpages += internal_config.hugepage_info[size].num_pages[socket];
613 for (size = 0; size < num_hp_info; size++) {
614 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
615 unsigned pages_found = 0;
617 /* traverse until we have unmapped all the unused pages */
618 for (page = 0; page < nrpages; page++) {
619 struct hugepage_file *hp = &hugepg_tbl[page];
621 /* find a page that matches the criteria */
622 if ((hp->size == hpi[size].hugepage_sz) &&
623 (hp->socket_id == (int) socket)) {
625 /* if we skipped enough pages, unmap the rest */
626 if (pages_found == hpi[size].num_pages[socket]) {
629 unmap_len = hp->size;
631 /* get start addr and len of the remaining segment */
636 if (unlink(hp->filepath) == -1) {
637 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
638 __func__, hp->filepath, strerror(errno));
642 /* lock the page and skip */
648 } /* foreach socket */
649 } /* foreach pagesize */
655 remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
657 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
658 struct rte_memseg_list *msl;
659 struct rte_fbarray *arr;
660 int cur_page, seg_len;
661 unsigned int msl_idx;
667 page_sz = hugepages[seg_start].size;
668 socket_id = hugepages[seg_start].socket_id;
669 seg_len = seg_end - seg_start;
671 RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
672 (seg_len * page_sz) >> 20ULL, socket_id);
674 /* find free space in memseg lists */
675 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
677 msl = &mcfg->memsegs[msl_idx];
678 arr = &msl->memseg_arr;
680 if (msl->page_sz != page_sz)
682 if (msl->socket_id != socket_id)
685 /* leave space for a hole if array is not empty */
686 empty = arr->count == 0;
687 ms_idx = rte_fbarray_find_next_n_free(arr, 0,
688 seg_len + (empty ? 0 : 1));
690 /* memseg list is full? */
694 /* leave some space between memsegs, they are not IOVA
695 * contiguous, so they shouldn't be VA contiguous either.
701 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
702 RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
703 RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
704 RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
708 #ifdef RTE_ARCH_PPC64
709 /* for PPC64 we go through the list backwards */
710 for (cur_page = seg_end - 1; cur_page >= seg_start;
711 cur_page--, ms_idx++) {
713 for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
715 struct hugepage_file *hfile = &hugepages[cur_page];
716 struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
720 fd = open(hfile->filepath, O_RDWR);
722 RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
723 hfile->filepath, strerror(errno));
726 /* set shared lock on the file. */
727 if (flock(fd, LOCK_SH) < 0) {
728 RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
729 hfile->filepath, strerror(errno));
733 memseg_len = (size_t)page_sz;
734 addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
736 /* we know this address is already mmapped by memseg list, so
737 * using MAP_FIXED here is safe
739 addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
740 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
741 if (addr == MAP_FAILED) {
742 RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
743 hfile->filepath, strerror(errno));
748 /* we have a new address, so unmap previous one */
750 /* in 32-bit legacy mode, we have already unmapped the page */
751 if (!internal_config.legacy_mem)
752 munmap(hfile->orig_va, page_sz);
754 munmap(hfile->orig_va, page_sz);
757 hfile->orig_va = NULL;
758 hfile->final_va = addr;
760 /* rewrite physical addresses in IOVA as VA mode */
761 if (rte_eal_iova_mode() == RTE_IOVA_VA)
762 hfile->physaddr = (uintptr_t)addr;
764 /* set up memseg data */
766 ms->hugepage_sz = page_sz;
767 ms->len = memseg_len;
768 ms->iova = hfile->physaddr;
769 ms->socket_id = hfile->socket_id;
770 ms->nchannel = rte_memory_get_nchannel();
771 ms->nrank = rte_memory_get_nrank();
773 rte_fbarray_set_used(arr, ms_idx);
777 RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
778 (seg_len * page_sz) >> 20, socket_id);
783 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
785 uint64_t area_sz, max_pages;
787 /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
788 max_pages = RTE_MAX_MEMSEG_PER_LIST;
789 max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
791 area_sz = RTE_MIN(page_sz * max_pages, max_mem);
793 /* make sure the list isn't smaller than the page size */
794 area_sz = RTE_MAX(area_sz, page_sz);
796 return RTE_ALIGN(area_sz, page_sz);
800 free_memseg_list(struct rte_memseg_list *msl)
802 if (rte_fbarray_destroy(&msl->memseg_arr)) {
803 RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
806 memset(msl, 0, sizeof(*msl));
810 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
812 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
813 int n_segs, int socket_id, int type_msl_idx)
815 char name[RTE_FBARRAY_NAME_LEN];
817 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
819 if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
820 sizeof(struct rte_memseg))) {
821 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
822 rte_strerror(rte_errno));
826 msl->page_sz = page_sz;
827 msl->socket_id = socket_id;
830 RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
831 (size_t)page_sz >> 10, socket_id);
837 alloc_va_space(struct rte_memseg_list *msl)
844 #ifdef RTE_ARCH_PPC_64
845 flags |= MAP_HUGETLB;
848 page_sz = msl->page_sz;
849 mem_sz = page_sz * msl->memseg_arr.len;
851 addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
853 if (rte_errno == EADDRNOTAVAIL)
854 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
855 (unsigned long long)mem_sz, msl->base_va);
857 RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
866 * Our VA space is not preallocated yet, so preallocate it here. We need to know
867 * how many segments there are in order to map all pages into one address space,
868 * and leave appropriate holes between segments so that rte_malloc does not
869 * concatenate them into one big segment.
871 * we also need to unmap original pages to free up address space.
873 static int __rte_unused
874 prealloc_segments(struct hugepage_file *hugepages, int n_pages)
876 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
877 int cur_page, seg_start_page, end_seg, new_memseg;
878 unsigned int hpi_idx, socket, i;
879 int n_contig_segs, n_segs;
882 /* before we preallocate segments, we need to free up our VA space.
883 * we're not removing files, and we already have information about
884 * PA-contiguousness, so it is safe to unmap everything.
886 for (cur_page = 0; cur_page < n_pages; cur_page++) {
887 struct hugepage_file *hpi = &hugepages[cur_page];
888 munmap(hpi->orig_va, hpi->size);
892 /* we cannot know how many page sizes and sockets we have discovered, so
893 * loop over all of them
895 for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes;
898 internal_config.hugepage_info[hpi_idx].hugepage_sz;
900 for (i = 0; i < rte_socket_count(); i++) {
901 struct rte_memseg_list *msl;
903 socket = rte_socket_id_by_idx(i);
908 for (cur_page = 0; cur_page < n_pages; cur_page++) {
909 struct hugepage_file *prev, *cur;
910 int prev_seg_start_page = -1;
912 cur = &hugepages[cur_page];
913 prev = cur_page == 0 ? NULL :
914 &hugepages[cur_page - 1];
921 else if (cur->socket_id != (int) socket)
923 else if (cur->size != page_sz)
925 else if (cur_page == 0)
927 #ifdef RTE_ARCH_PPC_64
928 /* On PPC64 architecture, the mmap always start
929 * from higher address to lower address. Here,
930 * physical addresses are in descending order.
932 else if ((prev->physaddr - cur->physaddr) !=
936 else if ((cur->physaddr - prev->physaddr) !=
941 /* if we're already inside a segment,
942 * new segment means end of current one
944 if (seg_start_page != -1) {
946 prev_seg_start_page =
949 seg_start_page = cur_page;
953 if (prev_seg_start_page != -1) {
954 /* we've found a new segment */
958 } else if (seg_start_page != -1) {
959 /* we didn't find new segment,
960 * but did end current one
968 /* we're skipping this page */
972 /* segment continues */
974 /* check if we missed last segment */
975 if (seg_start_page != -1) {
977 n_segs += cur_page - seg_start_page;
980 /* if no segments were found, do not preallocate */
984 /* we now have total number of pages that we will
985 * allocate for this segment list. add separator pages
986 * to the total count, and preallocate VA space.
988 n_segs += n_contig_segs - 1;
990 /* now, preallocate VA space for these segments */
992 /* first, find suitable memseg list for this */
993 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
995 msl = &mcfg->memsegs[msl_idx];
997 if (msl->base_va != NULL)
1001 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
1002 RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
1003 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
1007 /* now, allocate fbarray itself */
1008 if (alloc_memseg_list(msl, page_sz, n_segs, socket,
1012 /* finally, allocate VA space */
1013 if (alloc_va_space(msl) < 0)
1021 * We cannot reallocate memseg lists on the fly because PPC64 stores pages
1022 * backwards, therefore we have to process the entire memseg first before
1023 * remapping it into memseg list VA space.
1026 remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
1028 int cur_page, seg_start_page, new_memseg, ret;
1031 for (cur_page = 0; cur_page < n_pages; cur_page++) {
1032 struct hugepage_file *prev, *cur;
1036 cur = &hugepages[cur_page];
1037 prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1039 /* if size is zero, no more pages left */
1045 else if (cur->socket_id != prev->socket_id)
1047 else if (cur->size != prev->size)
1049 #ifdef RTE_ARCH_PPC_64
1050 /* On PPC64 architecture, the mmap always start from higher
1051 * address to lower address. Here, physical addresses are in
1054 else if ((prev->physaddr - cur->physaddr) != cur->size)
1057 else if ((cur->physaddr - prev->physaddr) != cur->size)
1062 /* if this isn't the first time, remap segment */
1063 if (cur_page != 0) {
1064 ret = remap_segment(hugepages, seg_start_page,
1069 /* remember where we started */
1070 seg_start_page = cur_page;
1072 /* continuation of previous memseg */
1074 /* we were stopped, but we didn't remap the last segment, do it now */
1075 if (cur_page != 0) {
1076 ret = remap_segment(hugepages, seg_start_page,
1084 static inline uint64_t
1085 get_socket_mem_size(int socket)
1090 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
1091 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1092 size += hpi->hugepage_sz * hpi->num_pages[socket];
1099 * This function is a NUMA-aware equivalent of calc_num_pages.
1100 * It takes in the list of hugepage sizes and the
1101 * number of pages thereof, and calculates the best number of
1102 * pages of each size to fulfill the request for <memory> ram
1105 calc_num_pages_per_socket(uint64_t * memory,
1106 struct hugepage_info *hp_info,
1107 struct hugepage_info *hp_used,
1108 unsigned num_hp_info)
1110 unsigned socket, j, i = 0;
1111 unsigned requested, available;
1112 int total_num_pages = 0;
1113 uint64_t remaining_mem, cur_mem;
1114 uint64_t total_mem = internal_config.memory;
1116 if (num_hp_info == 0)
1119 /* if specific memory amounts per socket weren't requested */
1120 if (internal_config.force_sockets == 0) {
1123 int cpu_per_socket[RTE_MAX_NUMA_NODES];
1124 size_t default_size;
1127 /* Compute number of cores per socket */
1128 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
1129 RTE_LCORE_FOREACH(lcore_id) {
1130 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
1134 * Automatically spread requested memory amongst detected sockets according
1135 * to number of cores from cpu mask present on each socket
1137 total_size = internal_config.memory;
1138 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1140 /* Set memory amount per socket */
1141 default_size = (internal_config.memory * cpu_per_socket[socket])
1142 / rte_lcore_count();
1144 /* Limit to maximum available memory on socket */
1145 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
1148 memory[socket] = default_size;
1149 total_size -= default_size;
1153 * If some memory is remaining, try to allocate it by getting all
1154 * available memory from sockets, one after the other
1156 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1157 /* take whatever is available */
1158 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
1162 memory[socket] += default_size;
1163 total_size -= default_size;
1166 /* in 32-bit mode, allocate all of the memory only on master
1169 total_size = internal_config.memory;
1170 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
1172 struct rte_config *cfg = rte_eal_get_configuration();
1173 unsigned int master_lcore_socket;
1175 master_lcore_socket =
1176 rte_lcore_to_socket_id(cfg->master_lcore);
1178 if (master_lcore_socket != socket)
1182 memory[socket] = total_size;
1188 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
1189 /* skips if the memory on specific socket wasn't requested */
1190 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
1191 strlcpy(hp_used[i].hugedir, hp_info[i].hugedir,
1192 sizeof(hp_used[i].hugedir));
1193 hp_used[i].num_pages[socket] = RTE_MIN(
1194 memory[socket] / hp_info[i].hugepage_sz,
1195 hp_info[i].num_pages[socket]);
1197 cur_mem = hp_used[i].num_pages[socket] *
1198 hp_used[i].hugepage_sz;
1200 memory[socket] -= cur_mem;
1201 total_mem -= cur_mem;
1203 total_num_pages += hp_used[i].num_pages[socket];
1205 /* check if we have met all memory requests */
1206 if (memory[socket] == 0)
1209 /* check if we have any more pages left at this size, if so
1210 * move on to next size */
1211 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
1213 /* At this point we know that there are more pages available that are
1214 * bigger than the memory we want, so lets see if we can get enough
1215 * from other page sizes.
1218 for (j = i+1; j < num_hp_info; j++)
1219 remaining_mem += hp_info[j].hugepage_sz *
1220 hp_info[j].num_pages[socket];
1222 /* is there enough other memory, if not allocate another page and quit */
1223 if (remaining_mem < memory[socket]){
1224 cur_mem = RTE_MIN(memory[socket],
1225 hp_info[i].hugepage_sz);
1226 memory[socket] -= cur_mem;
1227 total_mem -= cur_mem;
1228 hp_used[i].num_pages[socket]++;
1230 break; /* we are done with this socket*/
1233 /* if we didn't satisfy all memory requirements per socket */
1234 if (memory[socket] > 0 &&
1235 internal_config.socket_mem[socket] != 0) {
1236 /* to prevent icc errors */
1237 requested = (unsigned) (internal_config.socket_mem[socket] /
1239 available = requested -
1240 ((unsigned) (memory[socket] / 0x100000));
1241 RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
1242 "Requested: %uMB, available: %uMB\n", socket,
1243 requested, available);
1248 /* if we didn't satisfy total memory requirements */
1249 if (total_mem > 0) {
1250 requested = (unsigned) (internal_config.memory / 0x100000);
1251 available = requested - (unsigned) (total_mem / 0x100000);
1252 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
1253 " available: %uMB\n", requested, available);
1256 return total_num_pages;
1259 static inline size_t
1260 eal_get_hugepage_mem_size(void)
1265 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1266 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1267 if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
1268 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1269 size += hpi->hugepage_sz * hpi->num_pages[j];
1274 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1277 static struct sigaction huge_action_old;
1278 static int huge_need_recover;
1281 huge_register_sigbus(void)
1284 struct sigaction action;
1287 sigaddset(&mask, SIGBUS);
1288 action.sa_flags = 0;
1289 action.sa_mask = mask;
1290 action.sa_handler = huge_sigbus_handler;
1292 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1296 huge_recover_sigbus(void)
1298 if (huge_need_recover) {
1299 sigaction(SIGBUS, &huge_action_old, NULL);
1300 huge_need_recover = 0;
1305 * Prepare physical memory mapping: fill configuration structure with
1306 * these infos, return 0 on success.
1307 * 1. map N huge pages in separate files in hugetlbfs
1308 * 2. find associated physical addr
1309 * 3. find associated NUMA socket ID
1310 * 4. sort all huge pages by physical address
1311 * 5. remap these N huge pages in the correct order
1312 * 6. unmap the first mapping
1313 * 7. fill memsegs in configuration with contiguous zones
1316 eal_legacy_hugepage_init(void)
1318 struct rte_mem_config *mcfg;
1319 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1320 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1321 struct rte_fbarray *arr;
1322 struct rte_memseg *ms;
1324 uint64_t memory[RTE_MAX_NUMA_NODES];
1328 int nr_hugefiles, nr_hugepages = 0;
1331 test_phys_addrs_available();
1333 memset(used_hp, 0, sizeof(used_hp));
1335 /* get pointer to global configuration */
1336 mcfg = rte_eal_get_configuration()->mem_config;
1338 /* hugetlbfs can be disabled */
1339 if (internal_config.no_hugetlbfs) {
1340 struct rte_memseg_list *msl;
1342 int n_segs, cur_seg;
1344 /* nohuge mode is legacy mode */
1345 internal_config.legacy_mem = 1;
1347 /* create a memseg list */
1348 msl = &mcfg->memsegs[0];
1350 page_sz = RTE_PGSIZE_4K;
1351 n_segs = internal_config.memory / page_sz;
1353 if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
1354 sizeof(struct rte_memseg))) {
1355 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
1359 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1361 if (addr == MAP_FAILED) {
1362 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1366 msl->base_va = addr;
1367 msl->page_sz = page_sz;
1370 /* populate memsegs. each memseg is one page long */
1371 for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
1372 arr = &msl->memseg_arr;
1374 ms = rte_fbarray_get(arr, cur_seg);
1375 if (rte_eal_iova_mode() == RTE_IOVA_VA)
1376 ms->iova = (uintptr_t)addr;
1378 ms->iova = RTE_BAD_IOVA;
1380 ms->hugepage_sz = page_sz;
1384 rte_fbarray_set_used(arr, cur_seg);
1386 addr = RTE_PTR_ADD(addr, (size_t)page_sz);
1391 /* calculate total number of hugepages available. at this point we haven't
1392 * yet started sorting them so they all are on socket 0 */
1393 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1394 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1395 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1397 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1401 * allocate a memory area for hugepage table.
1402 * this isn't shared memory yet. due to the fact that we need some
1403 * processing done on these pages, shared memory will be created
1406 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1410 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1412 hp_offset = 0; /* where we start the current page size entries */
1414 huge_register_sigbus();
1416 /* make a copy of socket_mem, needed for balanced allocation. */
1417 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1418 memory[i] = internal_config.socket_mem[i];
1420 /* map all hugepages and sort them */
1421 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1422 unsigned pages_old, pages_new;
1423 struct hugepage_info *hpi;
1426 * we don't yet mark hugepages as used at this stage, so
1427 * we just map all hugepages available to the system
1428 * all hugepages are still located on socket 0
1430 hpi = &internal_config.hugepage_info[i];
1432 if (hpi->num_pages[0] == 0)
1435 /* map all hugepages available */
1436 pages_old = hpi->num_pages[0];
1437 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1438 if (pages_new < pages_old) {
1440 "%d not %d hugepages of size %u MB allocated\n",
1441 pages_new, pages_old,
1442 (unsigned)(hpi->hugepage_sz / 0x100000));
1444 int pages = pages_old - pages_new;
1446 nr_hugepages -= pages;
1447 hpi->num_pages[0] = pages_new;
1452 if (phys_addrs_available &&
1453 rte_eal_iova_mode() != RTE_IOVA_VA) {
1454 /* find physical addresses for each hugepage */
1455 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1456 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1457 "for %u MB pages\n",
1458 (unsigned int)(hpi->hugepage_sz / 0x100000));
1462 /* set physical addresses for each hugepage */
1463 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1464 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1465 "for %u MB pages\n",
1466 (unsigned int)(hpi->hugepage_sz / 0x100000));
1471 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1472 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1473 (unsigned)(hpi->hugepage_sz / 0x100000));
1477 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1478 sizeof(struct hugepage_file), cmp_physaddr);
1480 /* we have processed a num of hugepages of this size, so inc offset */
1481 hp_offset += hpi->num_pages[0];
1484 huge_recover_sigbus();
1486 if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1487 internal_config.memory = eal_get_hugepage_mem_size();
1489 nr_hugefiles = nr_hugepages;
1492 /* clean out the numbers of pages */
1493 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1494 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1495 internal_config.hugepage_info[i].num_pages[j] = 0;
1497 /* get hugepages for each socket */
1498 for (i = 0; i < nr_hugefiles; i++) {
1499 int socket = tmp_hp[i].socket_id;
1501 /* find a hugepage info with right size and increment num_pages */
1502 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1503 (int)internal_config.num_hugepage_sizes);
1504 for (j = 0; j < nb_hpsizes; j++) {
1505 if (tmp_hp[i].size ==
1506 internal_config.hugepage_info[j].hugepage_sz) {
1507 internal_config.hugepage_info[j].num_pages[socket]++;
1512 /* make a copy of socket_mem, needed for number of pages calculation */
1513 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1514 memory[i] = internal_config.socket_mem[i];
1516 /* calculate final number of pages */
1517 nr_hugepages = calc_num_pages_per_socket(memory,
1518 internal_config.hugepage_info, used_hp,
1519 internal_config.num_hugepage_sizes);
1521 /* error if not enough memory available */
1522 if (nr_hugepages < 0)
1526 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1527 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1528 if (used_hp[i].num_pages[j] > 0) {
1530 "Requesting %u pages of size %uMB"
1531 " from socket %i\n",
1532 used_hp[i].num_pages[j],
1534 (used_hp[i].hugepage_sz / 0x100000),
1540 /* create shared memory */
1541 hugepage = create_shared_memory(eal_hugepage_data_path(),
1542 nr_hugefiles * sizeof(struct hugepage_file));
1544 if (hugepage == NULL) {
1545 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1548 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1551 * unmap pages that we won't need (looks at used_hp).
1552 * also, sets final_va to NULL on pages that were unmapped.
1554 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1555 internal_config.num_hugepage_sizes) < 0) {
1556 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1561 * copy stuff from malloc'd hugepage* to the actual shared memory.
1562 * this procedure only copies those hugepages that have orig_va
1563 * not NULL. has overflow protection.
1565 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1566 tmp_hp, nr_hugefiles) < 0) {
1567 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1572 /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1573 if (internal_config.legacy_mem &&
1574 prealloc_segments(hugepage, nr_hugefiles)) {
1575 RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
1580 /* remap all pages we do need into memseg list VA space, so that those
1581 * pages become first-class citizens in DPDK memory subsystem
1583 if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1584 RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
1588 /* free the hugepage backing files */
1589 if (internal_config.hugepage_unlink &&
1590 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1591 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1595 /* free the temporary hugepage table */
1599 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1601 /* we're not going to allocate more pages, so release VA space for
1602 * unused memseg lists
1604 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1605 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1608 /* skip inactive lists */
1609 if (msl->base_va == NULL)
1611 /* skip lists where there is at least one page allocated */
1612 if (msl->memseg_arr.count > 0)
1614 /* this is an unused list, deallocate it */
1615 mem_sz = (size_t)msl->page_sz * msl->memseg_arr.len;
1616 munmap(msl->base_va, mem_sz);
1617 msl->base_va = NULL;
1619 /* destroy backing fbarray */
1620 rte_fbarray_destroy(&msl->memseg_arr);
1626 huge_recover_sigbus();
1628 if (hugepage != NULL)
1629 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1634 static int __rte_unused
1635 hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
1637 struct hugepage_info *hpi = arg;
1639 if (msl->page_sz != hpi->hugepage_sz)
1642 hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
1647 limits_callback(int socket_id, size_t cur_limit, size_t new_len)
1649 RTE_SET_USED(socket_id);
1650 RTE_SET_USED(cur_limit);
1651 RTE_SET_USED(new_len);
1656 eal_hugepage_init(void)
1658 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1659 uint64_t memory[RTE_MAX_NUMA_NODES];
1660 int hp_sz_idx, socket_id;
1662 test_phys_addrs_available();
1664 memset(used_hp, 0, sizeof(used_hp));
1667 hp_sz_idx < (int) internal_config.num_hugepage_sizes;
1670 struct hugepage_info dummy;
1673 /* also initialize used_hp hugepage sizes in used_hp */
1674 struct hugepage_info *hpi;
1675 hpi = &internal_config.hugepage_info[hp_sz_idx];
1676 used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
1679 /* for 32-bit, limit number of pages on socket to whatever we've
1680 * preallocated, as we cannot allocate more.
1682 memset(&dummy, 0, sizeof(dummy));
1683 dummy.hugepage_sz = hpi->hugepage_sz;
1684 if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
1687 for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
1688 hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
1689 dummy.num_pages[i]);
1694 /* make a copy of socket_mem, needed for balanced allocation. */
1695 for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
1696 memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
1698 /* calculate final number of pages */
1699 if (calc_num_pages_per_socket(memory,
1700 internal_config.hugepage_info, used_hp,
1701 internal_config.num_hugepage_sizes) < 0)
1705 hp_sz_idx < (int)internal_config.num_hugepage_sizes;
1707 for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
1709 struct rte_memseg **pages;
1710 struct hugepage_info *hpi = &used_hp[hp_sz_idx];
1711 unsigned int num_pages = hpi->num_pages[socket_id];
1712 int num_pages_alloc, i;
1717 pages = malloc(sizeof(*pages) * num_pages);
1719 RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
1720 num_pages, hpi->hugepage_sz >> 20, socket_id);
1722 num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
1723 num_pages, hpi->hugepage_sz,
1725 if (num_pages_alloc < 0) {
1730 /* mark preallocated pages as unfreeable */
1731 for (i = 0; i < num_pages_alloc; i++) {
1732 struct rte_memseg *ms = pages[i];
1733 ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
1738 /* if socket limits were specified, set them */
1739 if (internal_config.force_socket_limits) {
1741 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
1742 uint64_t limit = internal_config.socket_limit[i];
1745 if (rte_mem_alloc_validator_register("socket-limit",
1746 limits_callback, i, limit))
1747 RTE_LOG(ERR, EAL, "Failed to register socket limits validator callback\n");
1754 * uses fstat to report the size of a file on disk
1760 if (fstat(fd, &st) < 0)
1766 * This creates the memory mappings in the secondary process to match that of
1767 * the server process. It goes through each memory segment in the DPDK runtime
1768 * configuration and finds the hugepages which form that segment, mapping them
1769 * in order to form a contiguous block in the virtual memory space
1772 eal_legacy_hugepage_attach(void)
1774 struct hugepage_file *hp = NULL;
1775 unsigned int num_hp = 0;
1777 unsigned int cur_seg;
1779 int fd, fd_hugepage = -1;
1781 if (aslr_enabled() > 0) {
1782 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1783 "(ASLR) is enabled in the kernel.\n");
1784 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1785 "into secondary processes\n");
1788 test_phys_addrs_available();
1790 fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
1791 if (fd_hugepage < 0) {
1792 RTE_LOG(ERR, EAL, "Could not open %s\n",
1793 eal_hugepage_data_path());
1797 size = getFileSize(fd_hugepage);
1798 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1799 if (hp == MAP_FAILED) {
1800 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1801 eal_hugepage_data_path());
1805 num_hp = size / sizeof(struct hugepage_file);
1806 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1808 /* map all segments into memory to make sure we get the addrs. the
1809 * segments themselves are already in memseg list (which is shared and
1810 * has its VA space already preallocated), so we just need to map
1811 * everything into correct addresses.
1813 for (i = 0; i < num_hp; i++) {
1814 struct hugepage_file *hf = &hp[i];
1815 size_t map_sz = hf->size;
1816 void *map_addr = hf->final_va;
1818 /* if size is zero, no more pages left */
1822 fd = open(hf->filepath, O_RDWR);
1824 RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
1825 hf->filepath, strerror(errno));
1829 map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1830 MAP_SHARED | MAP_FIXED, fd, 0);
1831 if (map_addr == MAP_FAILED) {
1832 RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
1833 hf->filepath, strerror(errno));
1838 /* set shared lock on the file. */
1839 if (flock(fd, LOCK_SH) < 0) {
1840 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
1841 __func__, strerror(errno));
1848 /* unmap the hugepage config file, since we are done using it */
1854 /* map all segments into memory to make sure we get the addrs */
1856 for (cur_seg = 0; cur_seg < i; cur_seg++) {
1857 struct hugepage_file *hf = &hp[i];
1858 size_t map_sz = hf->size;
1859 void *map_addr = hf->final_va;
1861 munmap(map_addr, map_sz);
1863 if (hp != NULL && hp != MAP_FAILED)
1865 if (fd_hugepage >= 0)
1871 eal_hugepage_attach(void)
1873 if (eal_memalloc_sync_with_primary()) {
1874 RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
1875 if (aslr_enabled() > 0)
1876 RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
1883 rte_eal_hugepage_init(void)
1885 return internal_config.legacy_mem ?
1886 eal_legacy_hugepage_init() :
1887 eal_hugepage_init();
1891 rte_eal_hugepage_attach(void)
1893 return internal_config.legacy_mem ?
1894 eal_legacy_hugepage_attach() :
1895 eal_hugepage_attach();
1899 rte_eal_using_phys_addrs(void)
1901 return phys_addrs_available;
1904 static int __rte_unused
1905 memseg_primary_init_32(void)
1907 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1908 int active_sockets, hpi_idx, msl_idx = 0;
1909 unsigned int socket_id, i;
1910 struct rte_memseg_list *msl;
1911 uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
1914 /* no-huge does not need this at all */
1915 if (internal_config.no_hugetlbfs)
1918 /* this is a giant hack, but desperate times call for desperate
1919 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
1920 * because having upwards of 2 gigabytes of VA space already mapped will
1921 * interfere with our ability to map and sort hugepages.
1923 * therefore, in legacy 32-bit mode, we will be initializing memseg
1924 * lists much later - in eal_memory.c, right after we unmap all the
1925 * unneeded pages. this will not affect secondary processes, as those
1926 * should be able to mmap the space without (too many) problems.
1928 if (internal_config.legacy_mem)
1931 /* 32-bit mode is a very special case. we cannot know in advance where
1932 * the user will want to allocate their memory, so we have to do some
1936 total_requested_mem = 0;
1937 if (internal_config.force_sockets)
1938 for (i = 0; i < rte_socket_count(); i++) {
1941 socket_id = rte_socket_id_by_idx(i);
1942 mem = internal_config.socket_mem[socket_id];
1948 total_requested_mem += mem;
1951 total_requested_mem = internal_config.memory;
1953 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
1954 if (total_requested_mem > max_mem) {
1955 RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
1956 (unsigned int)(max_mem >> 20));
1959 total_extra_mem = max_mem - total_requested_mem;
1960 extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
1961 total_extra_mem / active_sockets;
1963 /* the allocation logic is a little bit convoluted, but here's how it
1964 * works, in a nutshell:
1965 * - if user hasn't specified on which sockets to allocate memory via
1966 * --socket-mem, we allocate all of our memory on master core socket.
1967 * - if user has specified sockets to allocate memory on, there may be
1968 * some "unused" memory left (e.g. if user has specified --socket-mem
1969 * such that not all memory adds up to 2 gigabytes), so add it to all
1970 * sockets that are in use equally.
1972 * page sizes are sorted by size in descending order, so we can safely
1973 * assume that we dispense with bigger page sizes first.
1976 /* create memseg lists */
1977 for (i = 0; i < rte_socket_count(); i++) {
1978 int hp_sizes = (int) internal_config.num_hugepage_sizes;
1979 uint64_t max_socket_mem, cur_socket_mem;
1980 unsigned int master_lcore_socket;
1981 struct rte_config *cfg = rte_eal_get_configuration();
1984 socket_id = rte_socket_id_by_idx(i);
1986 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1991 /* if we didn't specifically request memory on this socket */
1992 skip = active_sockets != 0 &&
1993 internal_config.socket_mem[socket_id] == 0;
1994 /* ...or if we didn't specifically request memory on *any*
1995 * socket, and this is not master lcore
1997 master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
1998 skip |= active_sockets == 0 && socket_id != master_lcore_socket;
2001 RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
2006 /* max amount of memory on this socket */
2007 max_socket_mem = (active_sockets != 0 ?
2008 internal_config.socket_mem[socket_id] :
2009 internal_config.memory) +
2010 extra_mem_per_socket;
2013 for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
2014 uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
2015 uint64_t hugepage_sz;
2016 struct hugepage_info *hpi;
2017 int type_msl_idx, max_segs, total_segs = 0;
2019 hpi = &internal_config.hugepage_info[hpi_idx];
2020 hugepage_sz = hpi->hugepage_sz;
2022 /* check if pages are actually available */
2023 if (hpi->num_pages[socket_id] == 0)
2026 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
2027 max_pagesz_mem = max_socket_mem - cur_socket_mem;
2029 /* make it multiple of page size */
2030 max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
2033 RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
2034 "%" PRIu64 "M on socket %i\n",
2035 max_pagesz_mem >> 20, socket_id);
2038 while (cur_pagesz_mem < max_pagesz_mem &&
2039 total_segs < max_segs) {
2041 unsigned int n_segs;
2043 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
2045 "No more space in memseg lists, please increase %s\n",
2046 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
2050 msl = &mcfg->memsegs[msl_idx];
2052 cur_mem = get_mem_amount(hugepage_sz,
2054 n_segs = cur_mem / hugepage_sz;
2056 if (alloc_memseg_list(msl, hugepage_sz, n_segs,
2057 socket_id, type_msl_idx)) {
2058 /* failing to allocate a memseg list is
2061 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
2065 if (alloc_va_space(msl)) {
2066 /* if we couldn't allocate VA space, we
2067 * can try with smaller page sizes.
2069 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
2070 /* deallocate memseg list */
2071 if (free_memseg_list(msl))
2076 total_segs += msl->memseg_arr.len;
2077 cur_pagesz_mem = total_segs * hugepage_sz;
2081 cur_socket_mem += cur_pagesz_mem;
2083 if (cur_socket_mem == 0) {
2084 RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
2093 static int __rte_unused
2094 memseg_primary_init(void)
2096 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
2097 int i, socket_id, hpi_idx, msl_idx = 0;
2098 struct rte_memseg_list *msl;
2099 uint64_t max_mem, total_mem;
2101 /* no-huge does not need this at all */
2102 if (internal_config.no_hugetlbfs)
2105 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
2108 /* create memseg lists */
2109 for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
2111 struct hugepage_info *hpi;
2112 uint64_t hugepage_sz;
2114 hpi = &internal_config.hugepage_info[hpi_idx];
2115 hugepage_sz = hpi->hugepage_sz;
2117 for (i = 0; i < (int) rte_socket_count(); i++) {
2118 uint64_t max_type_mem, total_type_mem = 0;
2119 int type_msl_idx, max_segs, total_segs = 0;
2121 socket_id = rte_socket_id_by_idx(i);
2123 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
2128 if (total_mem >= max_mem)
2131 max_type_mem = RTE_MIN(max_mem - total_mem,
2132 (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
2133 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
2136 while (total_type_mem < max_type_mem &&
2137 total_segs < max_segs) {
2138 uint64_t cur_max_mem, cur_mem;
2139 unsigned int n_segs;
2141 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
2143 "No more space in memseg lists, please increase %s\n",
2144 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
2148 msl = &mcfg->memsegs[msl_idx++];
2150 cur_max_mem = max_type_mem - total_type_mem;
2152 cur_mem = get_mem_amount(hugepage_sz,
2154 n_segs = cur_mem / hugepage_sz;
2156 if (alloc_memseg_list(msl, hugepage_sz, n_segs,
2157 socket_id, type_msl_idx))
2160 total_segs += msl->memseg_arr.len;
2161 total_type_mem = total_segs * hugepage_sz;
2164 if (alloc_va_space(msl)) {
2165 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
2169 total_mem += total_type_mem;
2176 memseg_secondary_init(void)
2178 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
2180 struct rte_memseg_list *msl;
2182 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
2184 msl = &mcfg->memsegs[msl_idx];
2186 /* skip empty memseg lists */
2187 if (msl->memseg_arr.len == 0)
2190 if (rte_fbarray_attach(&msl->memseg_arr)) {
2191 RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
2195 /* preallocate VA space */
2196 if (alloc_va_space(msl)) {
2197 RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
2206 rte_eal_memseg_init(void)
2208 /* increase rlimit to maximum */
2211 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
2212 /* set limit to maximum */
2213 lim.rlim_cur = lim.rlim_max;
2215 if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
2216 RTE_LOG(DEBUG, EAL, "Setting maximum number of open files failed: %s\n",
2219 RTE_LOG(DEBUG, EAL, "Setting maximum number of open files to %"
2221 (uint64_t)lim.rlim_cur);
2224 RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
2227 return rte_eal_process_type() == RTE_PROC_PRIMARY ?
2229 memseg_primary_init_32() :
2231 memseg_primary_init() :
2233 memseg_secondary_init();