1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2013 6WIND S.A.
16 #include <sys/types.h>
18 #include <sys/queue.h>
20 #include <sys/resource.h>
23 #include <sys/ioctl.h>
27 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
28 #include <linux/memfd.h>
29 #define MEMFD_SUPPORTED
31 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
36 #include <rte_errno.h>
38 #include <rte_memory.h>
39 #include <rte_launch.h>
41 #include <rte_eal_memconfig.h>
42 #include <rte_per_lcore.h>
43 #include <rte_lcore.h>
44 #include <rte_common.h>
45 #include <rte_string_fns.h>
47 #include "eal_private.h"
48 #include "eal_memalloc.h"
49 #include "eal_memcfg.h"
50 #include "eal_internal_cfg.h"
51 #include "eal_filesystem.h"
52 #include "eal_hugepages.h"
53 #include "eal_options.h"
55 #define PFN_MASK_SIZE 8
59 * Huge page mapping under linux
61 * To reserve a big contiguous amount of memory, we use the hugepage
62 * feature of linux. For that, we need to have hugetlbfs mounted. This
63 * code will create many files in this directory (one per page) and
64 * map them in virtual memory. For each page, we will retrieve its
65 * physical address and remap it in order to have a virtual contiguous
66 * zone as well as a physical contiguous zone.
69 static int phys_addrs_available = -1;
71 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
74 * Get physical address of any mapped virtual address in the current process.
77 rte_mem_virt2phy(const void *virtaddr)
80 uint64_t page, physaddr;
81 unsigned long virt_pfn;
85 if (phys_addrs_available == 0)
88 /* standard page size */
89 page_size = getpagesize();
91 fd = open("/proc/self/pagemap", O_RDONLY);
93 RTE_LOG(INFO, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
94 __func__, strerror(errno));
98 virt_pfn = (unsigned long)virtaddr / page_size;
99 offset = sizeof(uint64_t) * virt_pfn;
100 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
101 RTE_LOG(INFO, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
102 __func__, strerror(errno));
107 retval = read(fd, &page, PFN_MASK_SIZE);
110 RTE_LOG(INFO, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
111 __func__, strerror(errno));
113 } else if (retval != PFN_MASK_SIZE) {
114 RTE_LOG(INFO, EAL, "%s(): read %d bytes from /proc/self/pagemap "
115 "but expected %d:\n",
116 __func__, retval, PFN_MASK_SIZE);
121 * the pfn (page frame number) are bits 0-54 (see
122 * pagemap.txt in linux Documentation)
124 if ((page & 0x7fffffffffffffULL) == 0)
127 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
128 + ((unsigned long)virtaddr % page_size);
134 rte_mem_virt2iova(const void *virtaddr)
136 if (rte_eal_iova_mode() == RTE_IOVA_VA)
137 return (uintptr_t)virtaddr;
138 return rte_mem_virt2phy(virtaddr);
142 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
143 * it by browsing the /proc/self/pagemap special file.
146 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
151 for (i = 0; i < hpi->num_pages[0]; i++) {
152 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
153 if (addr == RTE_BAD_PHYS_ADDR)
155 hugepg_tbl[i].physaddr = addr;
161 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
164 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
167 static phys_addr_t addr;
169 for (i = 0; i < hpi->num_pages[0]; i++) {
170 hugepg_tbl[i].physaddr = addr;
171 addr += hugepg_tbl[i].size;
177 * Check whether address-space layout randomization is enabled in
178 * the kernel. This is important for multi-process as it can prevent
179 * two processes mapping data to the same virtual address
181 * 0 - address space randomization disabled
182 * 1/2 - address space randomization enabled
183 * negative error code on error
189 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
192 retval = read(fd, &c, 1);
202 default: return -EINVAL;
206 static sigjmp_buf huge_jmpenv;
208 static void huge_sigbus_handler(int signo __rte_unused)
210 siglongjmp(huge_jmpenv, 1);
213 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
214 * non-static local variable in the stack frame calling sigsetjmp might be
215 * clobbered by a call to longjmp.
217 static int huge_wrap_sigsetjmp(void)
219 return sigsetjmp(huge_jmpenv, 1);
222 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
223 /* Callback for numa library. */
224 void numa_error(char *where)
226 RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
231 * Mmap all hugepages of hugepage table: it first open a file in
232 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
233 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
234 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
235 * map contiguous physical blocks in contiguous virtual blocks.
238 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
239 uint64_t *essential_memory __rte_unused)
244 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
246 int essential_prev = 0;
248 struct bitmask *oldmask = NULL;
249 bool have_numa = true;
250 unsigned long maxnode = 0;
252 /* Check if kernel supports NUMA. */
253 if (numa_available() != 0) {
254 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
259 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
260 oldmask = numa_allocate_nodemask();
261 if (get_mempolicy(&oldpolicy, oldmask->maskp,
262 oldmask->size + 1, 0, 0) < 0) {
264 "Failed to get current mempolicy: %s. "
265 "Assuming MPOL_DEFAULT.\n", strerror(errno));
266 oldpolicy = MPOL_DEFAULT;
268 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
269 if (internal_config.socket_mem[i])
274 for (i = 0; i < hpi->num_pages[0]; i++) {
275 struct hugepage_file *hf = &hugepg_tbl[i];
276 uint64_t hugepage_sz = hpi->hugepage_sz;
278 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
282 for (j = 0; j < maxnode; j++)
283 if (essential_memory[j])
287 node_id = (node_id + 1) % maxnode;
288 while (!internal_config.socket_mem[node_id]) {
295 essential_prev = essential_memory[j];
297 if (essential_memory[j] < hugepage_sz)
298 essential_memory[j] = 0;
300 essential_memory[j] -= hugepage_sz;
304 "Setting policy MPOL_PREFERRED for socket %d\n",
306 numa_set_preferred(node_id);
311 hf->size = hugepage_sz;
312 eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
313 hpi->hugedir, hf->file_id);
314 hf->filepath[sizeof(hf->filepath) - 1] = '\0';
316 /* try to create hugepage file */
317 fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
319 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
324 /* map the segment, and populate page tables,
325 * the kernel fills this segment with zeros. we don't care where
326 * this gets mapped - we already have contiguous memory areas
327 * ready for us to map into.
329 virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
330 MAP_SHARED | MAP_POPULATE, fd, 0);
331 if (virtaddr == MAP_FAILED) {
332 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
338 hf->orig_va = virtaddr;
340 /* In linux, hugetlb limitations, like cgroup, are
341 * enforced at fault time instead of mmap(), even
342 * with the option of MAP_POPULATE. Kernel will send
343 * a SIGBUS signal. To avoid to be killed, save stack
344 * environment here, if SIGBUS happens, we can jump
347 if (huge_wrap_sigsetjmp()) {
348 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
349 "hugepages of size %u MB\n",
350 (unsigned int)(hugepage_sz / 0x100000));
351 munmap(virtaddr, hugepage_sz);
353 unlink(hugepg_tbl[i].filepath);
354 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
356 essential_memory[node_id] =
361 *(int *)virtaddr = 0;
363 /* set shared lock on the file. */
364 if (flock(fd, LOCK_SH) < 0) {
365 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
366 __func__, strerror(errno));
375 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
378 "Restoring previous memory policy: %d\n", oldpolicy);
379 if (oldpolicy == MPOL_DEFAULT) {
380 numa_set_localalloc();
381 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
382 oldmask->size + 1) < 0) {
383 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
385 numa_set_localalloc();
389 numa_free_cpumask(oldmask);
395 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
399 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
403 unsigned i, hp_count = 0;
406 char hugedir_str[PATH_MAX];
409 f = fopen("/proc/self/numa_maps", "r");
411 RTE_LOG(NOTICE, EAL, "NUMA support not available"
412 " consider that all memory is in socket_id 0\n");
416 snprintf(hugedir_str, sizeof(hugedir_str),
417 "%s/%s", hpi->hugedir, eal_get_hugefile_prefix());
420 while (fgets(buf, sizeof(buf), f) != NULL) {
422 /* ignore non huge page */
423 if (strstr(buf, " huge ") == NULL &&
424 strstr(buf, hugedir_str) == NULL)
428 virt_addr = strtoull(buf, &end, 16);
429 if (virt_addr == 0 || end == buf) {
430 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
434 /* get node id (socket id) */
435 nodestr = strstr(buf, " N");
436 if (nodestr == NULL) {
437 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
441 end = strstr(nodestr, "=");
443 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
449 socket_id = strtoul(nodestr, &end, 0);
450 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
451 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
455 /* if we find this page in our mappings, set socket_id */
456 for (i = 0; i < hpi->num_pages[0]; i++) {
457 void *va = (void *)(unsigned long)virt_addr;
458 if (hugepg_tbl[i].orig_va == va) {
459 hugepg_tbl[i].socket_id = socket_id;
461 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
463 "Hugepage %s is on socket %d\n",
464 hugepg_tbl[i].filepath, socket_id);
470 if (hp_count < hpi->num_pages[0])
482 cmp_physaddr(const void *a, const void *b)
484 #ifndef RTE_ARCH_PPC_64
485 const struct hugepage_file *p1 = a;
486 const struct hugepage_file *p2 = b;
488 /* PowerPC needs memory sorted in reverse order from x86 */
489 const struct hugepage_file *p1 = b;
490 const struct hugepage_file *p2 = a;
492 if (p1->physaddr < p2->physaddr)
494 else if (p1->physaddr > p2->physaddr)
501 * Uses mmap to create a shared memory area for storage of data
502 * Used in this file to store the hugepage file map on disk
505 create_shared_memory(const char *filename, const size_t mem_size)
510 /* if no shared files mode is used, create anonymous memory instead */
511 if (internal_config.no_shconf) {
512 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
513 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
514 if (retval == MAP_FAILED)
519 fd = open(filename, O_CREAT | O_RDWR, 0600);
522 if (ftruncate(fd, mem_size) < 0) {
526 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
528 if (retval == MAP_FAILED)
534 * this copies *active* hugepages from one hugepage table to another.
535 * destination is typically the shared memory.
538 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
539 const struct hugepage_file * src, int src_size)
541 int src_pos, dst_pos = 0;
543 for (src_pos = 0; src_pos < src_size; src_pos++) {
544 if (src[src_pos].orig_va != NULL) {
545 /* error on overflow attempt */
546 if (dst_pos == dest_size)
548 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
556 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
557 unsigned num_hp_info)
559 unsigned socket, size;
560 int page, nrpages = 0;
562 /* get total number of hugepages */
563 for (size = 0; size < num_hp_info; size++)
564 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
566 internal_config.hugepage_info[size].num_pages[socket];
568 for (page = 0; page < nrpages; page++) {
569 struct hugepage_file *hp = &hugepg_tbl[page];
571 if (hp->orig_va != NULL && unlink(hp->filepath)) {
572 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
573 __func__, hp->filepath, strerror(errno));
580 * unmaps hugepages that are not going to be used. since we originally allocate
581 * ALL hugepages (not just those we need), additional unmapping needs to be done.
584 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
585 struct hugepage_info *hpi,
586 unsigned num_hp_info)
588 unsigned socket, size;
589 int page, nrpages = 0;
591 /* get total number of hugepages */
592 for (size = 0; size < num_hp_info; size++)
593 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
594 nrpages += internal_config.hugepage_info[size].num_pages[socket];
596 for (size = 0; size < num_hp_info; size++) {
597 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
598 unsigned pages_found = 0;
600 /* traverse until we have unmapped all the unused pages */
601 for (page = 0; page < nrpages; page++) {
602 struct hugepage_file *hp = &hugepg_tbl[page];
604 /* find a page that matches the criteria */
605 if ((hp->size == hpi[size].hugepage_sz) &&
606 (hp->socket_id == (int) socket)) {
608 /* if we skipped enough pages, unmap the rest */
609 if (pages_found == hpi[size].num_pages[socket]) {
612 unmap_len = hp->size;
614 /* get start addr and len of the remaining segment */
619 if (unlink(hp->filepath) == -1) {
620 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
621 __func__, hp->filepath, strerror(errno));
625 /* lock the page and skip */
631 } /* foreach socket */
632 } /* foreach pagesize */
638 remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
640 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
641 struct rte_memseg_list *msl;
642 struct rte_fbarray *arr;
643 int cur_page, seg_len;
644 unsigned int msl_idx;
650 page_sz = hugepages[seg_start].size;
651 socket_id = hugepages[seg_start].socket_id;
652 seg_len = seg_end - seg_start;
654 RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
655 (seg_len * page_sz) >> 20ULL, socket_id);
657 /* find free space in memseg lists */
658 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
660 msl = &mcfg->memsegs[msl_idx];
661 arr = &msl->memseg_arr;
663 if (msl->page_sz != page_sz)
665 if (msl->socket_id != socket_id)
668 /* leave space for a hole if array is not empty */
669 empty = arr->count == 0;
670 ms_idx = rte_fbarray_find_next_n_free(arr, 0,
671 seg_len + (empty ? 0 : 1));
673 /* memseg list is full? */
677 /* leave some space between memsegs, they are not IOVA
678 * contiguous, so they shouldn't be VA contiguous either.
684 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
685 RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
686 RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
687 RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
691 #ifdef RTE_ARCH_PPC64
692 /* for PPC64 we go through the list backwards */
693 for (cur_page = seg_end - 1; cur_page >= seg_start;
694 cur_page--, ms_idx++) {
696 for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
698 struct hugepage_file *hfile = &hugepages[cur_page];
699 struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
703 fd = open(hfile->filepath, O_RDWR);
705 RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
706 hfile->filepath, strerror(errno));
709 /* set shared lock on the file. */
710 if (flock(fd, LOCK_SH) < 0) {
711 RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
712 hfile->filepath, strerror(errno));
716 memseg_len = (size_t)page_sz;
717 addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
719 /* we know this address is already mmapped by memseg list, so
720 * using MAP_FIXED here is safe
722 addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
723 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
724 if (addr == MAP_FAILED) {
725 RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
726 hfile->filepath, strerror(errno));
731 /* we have a new address, so unmap previous one */
733 /* in 32-bit legacy mode, we have already unmapped the page */
734 if (!internal_config.legacy_mem)
735 munmap(hfile->orig_va, page_sz);
737 munmap(hfile->orig_va, page_sz);
740 hfile->orig_va = NULL;
741 hfile->final_va = addr;
743 /* rewrite physical addresses in IOVA as VA mode */
744 if (rte_eal_iova_mode() == RTE_IOVA_VA)
745 hfile->physaddr = (uintptr_t)addr;
747 /* set up memseg data */
749 ms->hugepage_sz = page_sz;
750 ms->len = memseg_len;
751 ms->iova = hfile->physaddr;
752 ms->socket_id = hfile->socket_id;
753 ms->nchannel = rte_memory_get_nchannel();
754 ms->nrank = rte_memory_get_nrank();
756 rte_fbarray_set_used(arr, ms_idx);
758 /* store segment fd internally */
759 if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
760 RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
761 rte_strerror(rte_errno));
763 RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
764 (seg_len * page_sz) >> 20, socket_id);
769 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
771 uint64_t area_sz, max_pages;
773 /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
774 max_pages = RTE_MAX_MEMSEG_PER_LIST;
775 max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
777 area_sz = RTE_MIN(page_sz * max_pages, max_mem);
779 /* make sure the list isn't smaller than the page size */
780 area_sz = RTE_MAX(area_sz, page_sz);
782 return RTE_ALIGN(area_sz, page_sz);
786 free_memseg_list(struct rte_memseg_list *msl)
788 if (rte_fbarray_destroy(&msl->memseg_arr)) {
789 RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
792 memset(msl, 0, sizeof(*msl));
796 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
798 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
799 int n_segs, int socket_id, int type_msl_idx)
801 char name[RTE_FBARRAY_NAME_LEN];
803 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
805 if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
806 sizeof(struct rte_memseg))) {
807 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
808 rte_strerror(rte_errno));
812 msl->page_sz = page_sz;
813 msl->socket_id = socket_id;
816 RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
817 (size_t)page_sz >> 10, socket_id);
823 alloc_va_space(struct rte_memseg_list *msl)
830 page_sz = msl->page_sz;
831 mem_sz = page_sz * msl->memseg_arr.len;
833 addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
835 if (rte_errno == EADDRNOTAVAIL)
836 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
837 (unsigned long long)mem_sz, msl->base_va);
839 RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
849 * Our VA space is not preallocated yet, so preallocate it here. We need to know
850 * how many segments there are in order to map all pages into one address space,
851 * and leave appropriate holes between segments so that rte_malloc does not
852 * concatenate them into one big segment.
854 * we also need to unmap original pages to free up address space.
856 static int __rte_unused
857 prealloc_segments(struct hugepage_file *hugepages, int n_pages)
859 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
860 int cur_page, seg_start_page, end_seg, new_memseg;
861 unsigned int hpi_idx, socket, i;
862 int n_contig_segs, n_segs;
865 /* before we preallocate segments, we need to free up our VA space.
866 * we're not removing files, and we already have information about
867 * PA-contiguousness, so it is safe to unmap everything.
869 for (cur_page = 0; cur_page < n_pages; cur_page++) {
870 struct hugepage_file *hpi = &hugepages[cur_page];
871 munmap(hpi->orig_va, hpi->size);
875 /* we cannot know how many page sizes and sockets we have discovered, so
876 * loop over all of them
878 for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes;
881 internal_config.hugepage_info[hpi_idx].hugepage_sz;
883 for (i = 0; i < rte_socket_count(); i++) {
884 struct rte_memseg_list *msl;
886 socket = rte_socket_id_by_idx(i);
891 for (cur_page = 0; cur_page < n_pages; cur_page++) {
892 struct hugepage_file *prev, *cur;
893 int prev_seg_start_page = -1;
895 cur = &hugepages[cur_page];
896 prev = cur_page == 0 ? NULL :
897 &hugepages[cur_page - 1];
904 else if (cur->socket_id != (int) socket)
906 else if (cur->size != page_sz)
908 else if (cur_page == 0)
910 #ifdef RTE_ARCH_PPC_64
911 /* On PPC64 architecture, the mmap always start
912 * from higher address to lower address. Here,
913 * physical addresses are in descending order.
915 else if ((prev->physaddr - cur->physaddr) !=
919 else if ((cur->physaddr - prev->physaddr) !=
924 /* if we're already inside a segment,
925 * new segment means end of current one
927 if (seg_start_page != -1) {
929 prev_seg_start_page =
932 seg_start_page = cur_page;
936 if (prev_seg_start_page != -1) {
937 /* we've found a new segment */
941 } else if (seg_start_page != -1) {
942 /* we didn't find new segment,
943 * but did end current one
951 /* we're skipping this page */
955 /* segment continues */
957 /* check if we missed last segment */
958 if (seg_start_page != -1) {
960 n_segs += cur_page - seg_start_page;
963 /* if no segments were found, do not preallocate */
967 /* we now have total number of pages that we will
968 * allocate for this segment list. add separator pages
969 * to the total count, and preallocate VA space.
971 n_segs += n_contig_segs - 1;
973 /* now, preallocate VA space for these segments */
975 /* first, find suitable memseg list for this */
976 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
978 msl = &mcfg->memsegs[msl_idx];
980 if (msl->base_va != NULL)
984 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
985 RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
986 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
990 /* now, allocate fbarray itself */
991 if (alloc_memseg_list(msl, page_sz, n_segs, socket,
995 /* finally, allocate VA space */
996 if (alloc_va_space(msl) < 0)
1004 * We cannot reallocate memseg lists on the fly because PPC64 stores pages
1005 * backwards, therefore we have to process the entire memseg first before
1006 * remapping it into memseg list VA space.
1009 remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
1011 int cur_page, seg_start_page, new_memseg, ret;
1014 for (cur_page = 0; cur_page < n_pages; cur_page++) {
1015 struct hugepage_file *prev, *cur;
1019 cur = &hugepages[cur_page];
1020 prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1022 /* if size is zero, no more pages left */
1028 else if (cur->socket_id != prev->socket_id)
1030 else if (cur->size != prev->size)
1032 #ifdef RTE_ARCH_PPC_64
1033 /* On PPC64 architecture, the mmap always start from higher
1034 * address to lower address. Here, physical addresses are in
1037 else if ((prev->physaddr - cur->physaddr) != cur->size)
1040 else if ((cur->physaddr - prev->physaddr) != cur->size)
1045 /* if this isn't the first time, remap segment */
1046 if (cur_page != 0) {
1047 ret = remap_segment(hugepages, seg_start_page,
1052 /* remember where we started */
1053 seg_start_page = cur_page;
1055 /* continuation of previous memseg */
1057 /* we were stopped, but we didn't remap the last segment, do it now */
1058 if (cur_page != 0) {
1059 ret = remap_segment(hugepages, seg_start_page,
1067 __rte_unused /* function is unused on 32-bit builds */
1068 static inline uint64_t
1069 get_socket_mem_size(int socket)
1074 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
1075 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1076 size += hpi->hugepage_sz * hpi->num_pages[socket];
1083 * This function is a NUMA-aware equivalent of calc_num_pages.
1084 * It takes in the list of hugepage sizes and the
1085 * number of pages thereof, and calculates the best number of
1086 * pages of each size to fulfill the request for <memory> ram
1089 calc_num_pages_per_socket(uint64_t * memory,
1090 struct hugepage_info *hp_info,
1091 struct hugepage_info *hp_used,
1092 unsigned num_hp_info)
1094 unsigned socket, j, i = 0;
1095 unsigned requested, available;
1096 int total_num_pages = 0;
1097 uint64_t remaining_mem, cur_mem;
1098 uint64_t total_mem = internal_config.memory;
1100 if (num_hp_info == 0)
1103 /* if specific memory amounts per socket weren't requested */
1104 if (internal_config.force_sockets == 0) {
1107 int cpu_per_socket[RTE_MAX_NUMA_NODES];
1108 size_t default_size;
1111 /* Compute number of cores per socket */
1112 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
1113 RTE_LCORE_FOREACH(lcore_id) {
1114 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
1118 * Automatically spread requested memory amongst detected sockets according
1119 * to number of cores from cpu mask present on each socket
1121 total_size = internal_config.memory;
1122 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1124 /* Set memory amount per socket */
1125 default_size = (internal_config.memory * cpu_per_socket[socket])
1126 / rte_lcore_count();
1128 /* Limit to maximum available memory on socket */
1129 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
1132 memory[socket] = default_size;
1133 total_size -= default_size;
1137 * If some memory is remaining, try to allocate it by getting all
1138 * available memory from sockets, one after the other
1140 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1141 /* take whatever is available */
1142 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
1146 memory[socket] += default_size;
1147 total_size -= default_size;
1150 /* in 32-bit mode, allocate all of the memory only on master
1153 total_size = internal_config.memory;
1154 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
1156 struct rte_config *cfg = rte_eal_get_configuration();
1157 unsigned int master_lcore_socket;
1159 master_lcore_socket =
1160 rte_lcore_to_socket_id(cfg->master_lcore);
1162 if (master_lcore_socket != socket)
1166 memory[socket] = total_size;
1172 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
1173 /* skips if the memory on specific socket wasn't requested */
1174 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
1175 strlcpy(hp_used[i].hugedir, hp_info[i].hugedir,
1176 sizeof(hp_used[i].hugedir));
1177 hp_used[i].num_pages[socket] = RTE_MIN(
1178 memory[socket] / hp_info[i].hugepage_sz,
1179 hp_info[i].num_pages[socket]);
1181 cur_mem = hp_used[i].num_pages[socket] *
1182 hp_used[i].hugepage_sz;
1184 memory[socket] -= cur_mem;
1185 total_mem -= cur_mem;
1187 total_num_pages += hp_used[i].num_pages[socket];
1189 /* check if we have met all memory requests */
1190 if (memory[socket] == 0)
1193 /* check if we have any more pages left at this size, if so
1194 * move on to next size */
1195 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
1197 /* At this point we know that there are more pages available that are
1198 * bigger than the memory we want, so lets see if we can get enough
1199 * from other page sizes.
1202 for (j = i+1; j < num_hp_info; j++)
1203 remaining_mem += hp_info[j].hugepage_sz *
1204 hp_info[j].num_pages[socket];
1206 /* is there enough other memory, if not allocate another page and quit */
1207 if (remaining_mem < memory[socket]){
1208 cur_mem = RTE_MIN(memory[socket],
1209 hp_info[i].hugepage_sz);
1210 memory[socket] -= cur_mem;
1211 total_mem -= cur_mem;
1212 hp_used[i].num_pages[socket]++;
1214 break; /* we are done with this socket*/
1217 /* if we didn't satisfy all memory requirements per socket */
1218 if (memory[socket] > 0 &&
1219 internal_config.socket_mem[socket] != 0) {
1220 /* to prevent icc errors */
1221 requested = (unsigned) (internal_config.socket_mem[socket] /
1223 available = requested -
1224 ((unsigned) (memory[socket] / 0x100000));
1225 RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
1226 "Requested: %uMB, available: %uMB\n", socket,
1227 requested, available);
1232 /* if we didn't satisfy total memory requirements */
1233 if (total_mem > 0) {
1234 requested = (unsigned) (internal_config.memory / 0x100000);
1235 available = requested - (unsigned) (total_mem / 0x100000);
1236 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
1237 " available: %uMB\n", requested, available);
1240 return total_num_pages;
1243 static inline size_t
1244 eal_get_hugepage_mem_size(void)
1249 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1250 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1251 if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
1252 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1253 size += hpi->hugepage_sz * hpi->num_pages[j];
1258 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1261 static struct sigaction huge_action_old;
1262 static int huge_need_recover;
1265 huge_register_sigbus(void)
1268 struct sigaction action;
1271 sigaddset(&mask, SIGBUS);
1272 action.sa_flags = 0;
1273 action.sa_mask = mask;
1274 action.sa_handler = huge_sigbus_handler;
1276 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1280 huge_recover_sigbus(void)
1282 if (huge_need_recover) {
1283 sigaction(SIGBUS, &huge_action_old, NULL);
1284 huge_need_recover = 0;
1289 * Prepare physical memory mapping: fill configuration structure with
1290 * these infos, return 0 on success.
1291 * 1. map N huge pages in separate files in hugetlbfs
1292 * 2. find associated physical addr
1293 * 3. find associated NUMA socket ID
1294 * 4. sort all huge pages by physical address
1295 * 5. remap these N huge pages in the correct order
1296 * 6. unmap the first mapping
1297 * 7. fill memsegs in configuration with contiguous zones
1300 eal_legacy_hugepage_init(void)
1302 struct rte_mem_config *mcfg;
1303 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1304 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1305 struct rte_fbarray *arr;
1306 struct rte_memseg *ms;
1308 uint64_t memory[RTE_MAX_NUMA_NODES];
1312 int nr_hugefiles, nr_hugepages = 0;
1315 memset(used_hp, 0, sizeof(used_hp));
1317 /* get pointer to global configuration */
1318 mcfg = rte_eal_get_configuration()->mem_config;
1320 /* hugetlbfs can be disabled */
1321 if (internal_config.no_hugetlbfs) {
1322 struct rte_memseg_list *msl;
1323 int n_segs, cur_seg, fd, flags;
1324 #ifdef MEMFD_SUPPORTED
1329 /* nohuge mode is legacy mode */
1330 internal_config.legacy_mem = 1;
1332 /* nohuge mode is single-file segments mode */
1333 internal_config.single_file_segments = 1;
1335 /* create a memseg list */
1336 msl = &mcfg->memsegs[0];
1338 page_sz = RTE_PGSIZE_4K;
1339 n_segs = internal_config.memory / page_sz;
1341 if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
1342 sizeof(struct rte_memseg))) {
1343 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
1347 /* set up parameters for anonymous mmap */
1349 flags = MAP_PRIVATE | MAP_ANONYMOUS;
1351 #ifdef MEMFD_SUPPORTED
1352 /* create a memfd and store it in the segment fd table */
1353 memfd = memfd_create("nohuge", 0);
1355 RTE_LOG(DEBUG, EAL, "Cannot create memfd: %s\n",
1357 RTE_LOG(DEBUG, EAL, "Falling back to anonymous map\n");
1359 /* we got an fd - now resize it */
1360 if (ftruncate(memfd, internal_config.memory) < 0) {
1361 RTE_LOG(ERR, EAL, "Cannot resize memfd: %s\n",
1363 RTE_LOG(ERR, EAL, "Falling back to anonymous map\n");
1366 /* creating memfd-backed file was successful.
1367 * we want changes to memfd to be visible to
1368 * other processes (such as vhost backend), so
1369 * map it as shared memory.
1371 RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1377 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1379 if (addr == MAP_FAILED) {
1380 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1384 msl->base_va = addr;
1385 msl->page_sz = page_sz;
1387 msl->len = internal_config.memory;
1389 /* we're in single-file segments mode, so only the segment list
1390 * fd needs to be set up.
1393 if (eal_memalloc_set_seg_list_fd(0, fd) < 0) {
1394 RTE_LOG(ERR, EAL, "Cannot set up segment list fd\n");
1395 /* not a serious error, proceed */
1399 /* populate memsegs. each memseg is one page long */
1400 for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
1401 arr = &msl->memseg_arr;
1403 ms = rte_fbarray_get(arr, cur_seg);
1404 if (rte_eal_iova_mode() == RTE_IOVA_VA)
1405 ms->iova = (uintptr_t)addr;
1407 ms->iova = RTE_BAD_IOVA;
1409 ms->hugepage_sz = page_sz;
1413 rte_fbarray_set_used(arr, cur_seg);
1415 addr = RTE_PTR_ADD(addr, (size_t)page_sz);
1417 if (mcfg->dma_maskbits &&
1418 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1420 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1422 if (rte_eal_iova_mode() == RTE_IOVA_VA &&
1423 rte_eal_using_phys_addrs())
1425 "%s(): Please try initializing EAL with --iova-mode=pa parameter.\n",
1432 /* calculate total number of hugepages available. at this point we haven't
1433 * yet started sorting them so they all are on socket 0 */
1434 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1435 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1436 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1438 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1442 * allocate a memory area for hugepage table.
1443 * this isn't shared memory yet. due to the fact that we need some
1444 * processing done on these pages, shared memory will be created
1447 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1451 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1453 hp_offset = 0; /* where we start the current page size entries */
1455 huge_register_sigbus();
1457 /* make a copy of socket_mem, needed for balanced allocation. */
1458 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1459 memory[i] = internal_config.socket_mem[i];
1461 /* map all hugepages and sort them */
1462 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1463 unsigned pages_old, pages_new;
1464 struct hugepage_info *hpi;
1467 * we don't yet mark hugepages as used at this stage, so
1468 * we just map all hugepages available to the system
1469 * all hugepages are still located on socket 0
1471 hpi = &internal_config.hugepage_info[i];
1473 if (hpi->num_pages[0] == 0)
1476 /* map all hugepages available */
1477 pages_old = hpi->num_pages[0];
1478 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1479 if (pages_new < pages_old) {
1481 "%d not %d hugepages of size %u MB allocated\n",
1482 pages_new, pages_old,
1483 (unsigned)(hpi->hugepage_sz / 0x100000));
1485 int pages = pages_old - pages_new;
1487 nr_hugepages -= pages;
1488 hpi->num_pages[0] = pages_new;
1493 if (rte_eal_using_phys_addrs() &&
1494 rte_eal_iova_mode() != RTE_IOVA_VA) {
1495 /* find physical addresses for each hugepage */
1496 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1497 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1498 "for %u MB pages\n",
1499 (unsigned int)(hpi->hugepage_sz / 0x100000));
1503 /* set physical addresses for each hugepage */
1504 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1505 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1506 "for %u MB pages\n",
1507 (unsigned int)(hpi->hugepage_sz / 0x100000));
1512 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1513 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1514 (unsigned)(hpi->hugepage_sz / 0x100000));
1518 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1519 sizeof(struct hugepage_file), cmp_physaddr);
1521 /* we have processed a num of hugepages of this size, so inc offset */
1522 hp_offset += hpi->num_pages[0];
1525 huge_recover_sigbus();
1527 if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1528 internal_config.memory = eal_get_hugepage_mem_size();
1530 nr_hugefiles = nr_hugepages;
1533 /* clean out the numbers of pages */
1534 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1535 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1536 internal_config.hugepage_info[i].num_pages[j] = 0;
1538 /* get hugepages for each socket */
1539 for (i = 0; i < nr_hugefiles; i++) {
1540 int socket = tmp_hp[i].socket_id;
1542 /* find a hugepage info with right size and increment num_pages */
1543 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1544 (int)internal_config.num_hugepage_sizes);
1545 for (j = 0; j < nb_hpsizes; j++) {
1546 if (tmp_hp[i].size ==
1547 internal_config.hugepage_info[j].hugepage_sz) {
1548 internal_config.hugepage_info[j].num_pages[socket]++;
1553 /* make a copy of socket_mem, needed for number of pages calculation */
1554 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1555 memory[i] = internal_config.socket_mem[i];
1557 /* calculate final number of pages */
1558 nr_hugepages = calc_num_pages_per_socket(memory,
1559 internal_config.hugepage_info, used_hp,
1560 internal_config.num_hugepage_sizes);
1562 /* error if not enough memory available */
1563 if (nr_hugepages < 0)
1567 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1568 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1569 if (used_hp[i].num_pages[j] > 0) {
1571 "Requesting %u pages of size %uMB"
1572 " from socket %i\n",
1573 used_hp[i].num_pages[j],
1575 (used_hp[i].hugepage_sz / 0x100000),
1581 /* create shared memory */
1582 hugepage = create_shared_memory(eal_hugepage_data_path(),
1583 nr_hugefiles * sizeof(struct hugepage_file));
1585 if (hugepage == NULL) {
1586 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1589 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1592 * unmap pages that we won't need (looks at used_hp).
1593 * also, sets final_va to NULL on pages that were unmapped.
1595 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1596 internal_config.num_hugepage_sizes) < 0) {
1597 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1602 * copy stuff from malloc'd hugepage* to the actual shared memory.
1603 * this procedure only copies those hugepages that have orig_va
1604 * not NULL. has overflow protection.
1606 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1607 tmp_hp, nr_hugefiles) < 0) {
1608 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1613 /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1614 if (internal_config.legacy_mem &&
1615 prealloc_segments(hugepage, nr_hugefiles)) {
1616 RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
1621 /* remap all pages we do need into memseg list VA space, so that those
1622 * pages become first-class citizens in DPDK memory subsystem
1624 if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1625 RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
1629 /* free the hugepage backing files */
1630 if (internal_config.hugepage_unlink &&
1631 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1632 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1636 /* free the temporary hugepage table */
1640 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1643 /* we're not going to allocate more pages, so release VA space for
1644 * unused memseg lists
1646 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1647 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1650 /* skip inactive lists */
1651 if (msl->base_va == NULL)
1653 /* skip lists where there is at least one page allocated */
1654 if (msl->memseg_arr.count > 0)
1656 /* this is an unused list, deallocate it */
1658 munmap(msl->base_va, mem_sz);
1659 msl->base_va = NULL;
1661 /* destroy backing fbarray */
1662 rte_fbarray_destroy(&msl->memseg_arr);
1665 if (mcfg->dma_maskbits &&
1666 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1668 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1676 huge_recover_sigbus();
1678 if (hugepage != NULL)
1679 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1684 static int __rte_unused
1685 hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
1687 struct hugepage_info *hpi = arg;
1689 if (msl->page_sz != hpi->hugepage_sz)
1692 hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
1697 limits_callback(int socket_id, size_t cur_limit, size_t new_len)
1699 RTE_SET_USED(socket_id);
1700 RTE_SET_USED(cur_limit);
1701 RTE_SET_USED(new_len);
1706 eal_hugepage_init(void)
1708 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1709 uint64_t memory[RTE_MAX_NUMA_NODES];
1710 int hp_sz_idx, socket_id;
1712 memset(used_hp, 0, sizeof(used_hp));
1715 hp_sz_idx < (int) internal_config.num_hugepage_sizes;
1718 struct hugepage_info dummy;
1721 /* also initialize used_hp hugepage sizes in used_hp */
1722 struct hugepage_info *hpi;
1723 hpi = &internal_config.hugepage_info[hp_sz_idx];
1724 used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
1727 /* for 32-bit, limit number of pages on socket to whatever we've
1728 * preallocated, as we cannot allocate more.
1730 memset(&dummy, 0, sizeof(dummy));
1731 dummy.hugepage_sz = hpi->hugepage_sz;
1732 if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
1735 for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
1736 hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
1737 dummy.num_pages[i]);
1742 /* make a copy of socket_mem, needed for balanced allocation. */
1743 for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
1744 memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
1746 /* calculate final number of pages */
1747 if (calc_num_pages_per_socket(memory,
1748 internal_config.hugepage_info, used_hp,
1749 internal_config.num_hugepage_sizes) < 0)
1753 hp_sz_idx < (int)internal_config.num_hugepage_sizes;
1755 for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
1757 struct rte_memseg **pages;
1758 struct hugepage_info *hpi = &used_hp[hp_sz_idx];
1759 unsigned int num_pages = hpi->num_pages[socket_id];
1760 unsigned int num_pages_alloc;
1765 RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
1766 num_pages, hpi->hugepage_sz >> 20, socket_id);
1768 /* we may not be able to allocate all pages in one go,
1769 * because we break up our memory map into multiple
1770 * memseg lists. therefore, try allocating multiple
1771 * times and see if we can get the desired number of
1772 * pages from multiple allocations.
1775 num_pages_alloc = 0;
1777 int i, cur_pages, needed;
1779 needed = num_pages - num_pages_alloc;
1781 pages = malloc(sizeof(*pages) * needed);
1783 /* do not request exact number of pages */
1784 cur_pages = eal_memalloc_alloc_seg_bulk(pages,
1785 needed, hpi->hugepage_sz,
1787 if (cur_pages <= 0) {
1792 /* mark preallocated pages as unfreeable */
1793 for (i = 0; i < cur_pages; i++) {
1794 struct rte_memseg *ms = pages[i];
1795 ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
1799 num_pages_alloc += cur_pages;
1800 } while (num_pages_alloc != num_pages);
1803 /* if socket limits were specified, set them */
1804 if (internal_config.force_socket_limits) {
1806 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
1807 uint64_t limit = internal_config.socket_limit[i];
1810 if (rte_mem_alloc_validator_register("socket-limit",
1811 limits_callback, i, limit))
1812 RTE_LOG(ERR, EAL, "Failed to register socket limits validator callback\n");
1819 * uses fstat to report the size of a file on disk
1825 if (fstat(fd, &st) < 0)
1831 * This creates the memory mappings in the secondary process to match that of
1832 * the server process. It goes through each memory segment in the DPDK runtime
1833 * configuration and finds the hugepages which form that segment, mapping them
1834 * in order to form a contiguous block in the virtual memory space
1837 eal_legacy_hugepage_attach(void)
1839 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1840 struct hugepage_file *hp = NULL;
1841 unsigned int num_hp = 0;
1843 unsigned int cur_seg;
1845 int fd, fd_hugepage = -1;
1847 if (aslr_enabled() > 0) {
1848 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1849 "(ASLR) is enabled in the kernel.\n");
1850 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1851 "into secondary processes\n");
1854 fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
1855 if (fd_hugepage < 0) {
1856 RTE_LOG(ERR, EAL, "Could not open %s\n",
1857 eal_hugepage_data_path());
1861 size = getFileSize(fd_hugepage);
1862 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1863 if (hp == MAP_FAILED) {
1864 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1865 eal_hugepage_data_path());
1869 num_hp = size / sizeof(struct hugepage_file);
1870 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1872 /* map all segments into memory to make sure we get the addrs. the
1873 * segments themselves are already in memseg list (which is shared and
1874 * has its VA space already preallocated), so we just need to map
1875 * everything into correct addresses.
1877 for (i = 0; i < num_hp; i++) {
1878 struct hugepage_file *hf = &hp[i];
1879 size_t map_sz = hf->size;
1880 void *map_addr = hf->final_va;
1881 int msl_idx, ms_idx;
1882 struct rte_memseg_list *msl;
1883 struct rte_memseg *ms;
1885 /* if size is zero, no more pages left */
1889 fd = open(hf->filepath, O_RDWR);
1891 RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
1892 hf->filepath, strerror(errno));
1896 map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1897 MAP_SHARED | MAP_FIXED, fd, 0);
1898 if (map_addr == MAP_FAILED) {
1899 RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
1900 hf->filepath, strerror(errno));
1904 /* set shared lock on the file. */
1905 if (flock(fd, LOCK_SH) < 0) {
1906 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
1907 __func__, strerror(errno));
1911 /* find segment data */
1912 msl = rte_mem_virt2memseg_list(map_addr);
1914 RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
1918 ms = rte_mem_virt2memseg(map_addr, msl);
1920 RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
1925 msl_idx = msl - mcfg->memsegs;
1926 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
1928 RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg idx\n",
1933 /* store segment fd internally */
1934 if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
1935 RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
1936 rte_strerror(rte_errno));
1938 /* unmap the hugepage config file, since we are done using it */
1946 /* map all segments into memory to make sure we get the addrs */
1948 for (cur_seg = 0; cur_seg < i; cur_seg++) {
1949 struct hugepage_file *hf = &hp[i];
1950 size_t map_sz = hf->size;
1951 void *map_addr = hf->final_va;
1953 munmap(map_addr, map_sz);
1955 if (hp != NULL && hp != MAP_FAILED)
1957 if (fd_hugepage >= 0)
1963 eal_hugepage_attach(void)
1965 if (eal_memalloc_sync_with_primary()) {
1966 RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
1967 if (aslr_enabled() > 0)
1968 RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
1975 rte_eal_hugepage_init(void)
1977 return internal_config.legacy_mem ?
1978 eal_legacy_hugepage_init() :
1979 eal_hugepage_init();
1983 rte_eal_hugepage_attach(void)
1985 return internal_config.legacy_mem ?
1986 eal_legacy_hugepage_attach() :
1987 eal_hugepage_attach();
1991 rte_eal_using_phys_addrs(void)
1993 if (phys_addrs_available == -1) {
1996 if (rte_eal_has_hugepages() != 0 &&
1997 rte_mem_virt2phy(&tmp) != RTE_BAD_PHYS_ADDR)
1998 phys_addrs_available = 1;
2000 phys_addrs_available = 0;
2002 return phys_addrs_available;
2005 static int __rte_unused
2006 memseg_primary_init_32(void)
2008 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
2009 int active_sockets, hpi_idx, msl_idx = 0;
2010 unsigned int socket_id, i;
2011 struct rte_memseg_list *msl;
2012 uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
2015 /* no-huge does not need this at all */
2016 if (internal_config.no_hugetlbfs)
2019 /* this is a giant hack, but desperate times call for desperate
2020 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
2021 * because having upwards of 2 gigabytes of VA space already mapped will
2022 * interfere with our ability to map and sort hugepages.
2024 * therefore, in legacy 32-bit mode, we will be initializing memseg
2025 * lists much later - in eal_memory.c, right after we unmap all the
2026 * unneeded pages. this will not affect secondary processes, as those
2027 * should be able to mmap the space without (too many) problems.
2029 if (internal_config.legacy_mem)
2032 /* 32-bit mode is a very special case. we cannot know in advance where
2033 * the user will want to allocate their memory, so we have to do some
2037 total_requested_mem = 0;
2038 if (internal_config.force_sockets)
2039 for (i = 0; i < rte_socket_count(); i++) {
2042 socket_id = rte_socket_id_by_idx(i);
2043 mem = internal_config.socket_mem[socket_id];
2049 total_requested_mem += mem;
2052 total_requested_mem = internal_config.memory;
2054 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
2055 if (total_requested_mem > max_mem) {
2056 RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
2057 (unsigned int)(max_mem >> 20));
2060 total_extra_mem = max_mem - total_requested_mem;
2061 extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
2062 total_extra_mem / active_sockets;
2064 /* the allocation logic is a little bit convoluted, but here's how it
2065 * works, in a nutshell:
2066 * - if user hasn't specified on which sockets to allocate memory via
2067 * --socket-mem, we allocate all of our memory on master core socket.
2068 * - if user has specified sockets to allocate memory on, there may be
2069 * some "unused" memory left (e.g. if user has specified --socket-mem
2070 * such that not all memory adds up to 2 gigabytes), so add it to all
2071 * sockets that are in use equally.
2073 * page sizes are sorted by size in descending order, so we can safely
2074 * assume that we dispense with bigger page sizes first.
2077 /* create memseg lists */
2078 for (i = 0; i < rte_socket_count(); i++) {
2079 int hp_sizes = (int) internal_config.num_hugepage_sizes;
2080 uint64_t max_socket_mem, cur_socket_mem;
2081 unsigned int master_lcore_socket;
2082 struct rte_config *cfg = rte_eal_get_configuration();
2085 socket_id = rte_socket_id_by_idx(i);
2087 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
2088 /* we can still sort pages by socket in legacy mode */
2089 if (!internal_config.legacy_mem && socket_id > 0)
2093 /* if we didn't specifically request memory on this socket */
2094 skip = active_sockets != 0 &&
2095 internal_config.socket_mem[socket_id] == 0;
2096 /* ...or if we didn't specifically request memory on *any*
2097 * socket, and this is not master lcore
2099 master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
2100 skip |= active_sockets == 0 && socket_id != master_lcore_socket;
2103 RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
2108 /* max amount of memory on this socket */
2109 max_socket_mem = (active_sockets != 0 ?
2110 internal_config.socket_mem[socket_id] :
2111 internal_config.memory) +
2112 extra_mem_per_socket;
2115 for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
2116 uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
2117 uint64_t hugepage_sz;
2118 struct hugepage_info *hpi;
2119 int type_msl_idx, max_segs, total_segs = 0;
2121 hpi = &internal_config.hugepage_info[hpi_idx];
2122 hugepage_sz = hpi->hugepage_sz;
2124 /* check if pages are actually available */
2125 if (hpi->num_pages[socket_id] == 0)
2128 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
2129 max_pagesz_mem = max_socket_mem - cur_socket_mem;
2131 /* make it multiple of page size */
2132 max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
2135 RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
2136 "%" PRIu64 "M on socket %i\n",
2137 max_pagesz_mem >> 20, socket_id);
2140 while (cur_pagesz_mem < max_pagesz_mem &&
2141 total_segs < max_segs) {
2143 unsigned int n_segs;
2145 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
2147 "No more space in memseg lists, please increase %s\n",
2148 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
2152 msl = &mcfg->memsegs[msl_idx];
2154 cur_mem = get_mem_amount(hugepage_sz,
2156 n_segs = cur_mem / hugepage_sz;
2158 if (alloc_memseg_list(msl, hugepage_sz, n_segs,
2159 socket_id, type_msl_idx)) {
2160 /* failing to allocate a memseg list is
2163 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
2167 if (alloc_va_space(msl)) {
2168 /* if we couldn't allocate VA space, we
2169 * can try with smaller page sizes.
2171 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
2172 /* deallocate memseg list */
2173 if (free_memseg_list(msl))
2178 total_segs += msl->memseg_arr.len;
2179 cur_pagesz_mem = total_segs * hugepage_sz;
2183 cur_socket_mem += cur_pagesz_mem;
2185 if (cur_socket_mem == 0) {
2186 RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
2195 static int __rte_unused
2196 memseg_primary_init(void)
2198 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
2203 int i, hpi_idx, msl_idx, ret = -1; /* fail unless told to succeed */
2204 struct rte_memseg_list *msl;
2205 uint64_t max_mem, max_mem_per_type;
2206 unsigned int max_seglists_per_type;
2207 unsigned int n_memtypes, cur_type;
2209 /* no-huge does not need this at all */
2210 if (internal_config.no_hugetlbfs)
2214 * figuring out amount of memory we're going to have is a long and very
2215 * involved process. the basic element we're operating with is a memory
2216 * type, defined as a combination of NUMA node ID and page size (so that
2217 * e.g. 2 sockets with 2 page sizes yield 4 memory types in total).
2219 * deciding amount of memory going towards each memory type is a
2220 * balancing act between maximum segments per type, maximum memory per
2221 * type, and number of detected NUMA nodes. the goal is to make sure
2222 * each memory type gets at least one memseg list.
2224 * the total amount of memory is limited by RTE_MAX_MEM_MB value.
2226 * the total amount of memory per type is limited by either
2227 * RTE_MAX_MEM_MB_PER_TYPE, or by RTE_MAX_MEM_MB divided by the number
2228 * of detected NUMA nodes. additionally, maximum number of segments per
2229 * type is also limited by RTE_MAX_MEMSEG_PER_TYPE. this is because for
2230 * smaller page sizes, it can take hundreds of thousands of segments to
2231 * reach the above specified per-type memory limits.
2233 * additionally, each type may have multiple memseg lists associated
2234 * with it, each limited by either RTE_MAX_MEM_MB_PER_LIST for bigger
2235 * page sizes, or RTE_MAX_MEMSEG_PER_LIST segments for smaller ones.
2237 * the number of memseg lists per type is decided based on the above
2238 * limits, and also taking number of detected NUMA nodes, to make sure
2239 * that we don't run out of memseg lists before we populate all NUMA
2240 * nodes with memory.
2242 * we do this in three stages. first, we collect the number of types.
2243 * then, we figure out memory constraints and populate the list of
2244 * would-be memseg lists. then, we go ahead and allocate the memseg
2248 /* create space for mem types */
2249 n_memtypes = internal_config.num_hugepage_sizes * rte_socket_count();
2250 memtypes = calloc(n_memtypes, sizeof(*memtypes));
2251 if (memtypes == NULL) {
2252 RTE_LOG(ERR, EAL, "Cannot allocate space for memory types\n");
2256 /* populate mem types */
2258 for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
2260 struct hugepage_info *hpi;
2261 uint64_t hugepage_sz;
2263 hpi = &internal_config.hugepage_info[hpi_idx];
2264 hugepage_sz = hpi->hugepage_sz;
2266 for (i = 0; i < (int) rte_socket_count(); i++, cur_type++) {
2267 int socket_id = rte_socket_id_by_idx(i);
2269 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
2270 /* we can still sort pages by socket in legacy mode */
2271 if (!internal_config.legacy_mem && socket_id > 0)
2274 memtypes[cur_type].page_sz = hugepage_sz;
2275 memtypes[cur_type].socket_id = socket_id;
2277 RTE_LOG(DEBUG, EAL, "Detected memory type: "
2278 "socket_id:%u hugepage_sz:%" PRIu64 "\n",
2279 socket_id, hugepage_sz);
2282 /* number of memtypes could have been lower due to no NUMA support */
2283 n_memtypes = cur_type;
2285 /* set up limits for types */
2286 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
2287 max_mem_per_type = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20,
2288 max_mem / n_memtypes);
2290 * limit maximum number of segment lists per type to ensure there's
2291 * space for memseg lists for all NUMA nodes with all page sizes
2293 max_seglists_per_type = RTE_MAX_MEMSEG_LISTS / n_memtypes;
2295 if (max_seglists_per_type == 0) {
2296 RTE_LOG(ERR, EAL, "Cannot accommodate all memory types, please increase %s\n",
2297 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
2301 /* go through all mem types and create segment lists */
2303 for (cur_type = 0; cur_type < n_memtypes; cur_type++) {
2304 unsigned int cur_seglist, n_seglists, n_segs;
2305 unsigned int max_segs_per_type, max_segs_per_list;
2306 struct memtype *type = &memtypes[cur_type];
2307 uint64_t max_mem_per_list, pagesz;
2310 pagesz = type->page_sz;
2311 socket_id = type->socket_id;
2314 * we need to create segment lists for this type. we must take
2315 * into account the following things:
2317 * 1. total amount of memory we can use for this memory type
2318 * 2. total amount of memory per memseg list allowed
2319 * 3. number of segments needed to fit the amount of memory
2320 * 4. number of segments allowed per type
2321 * 5. number of segments allowed per memseg list
2322 * 6. number of memseg lists we are allowed to take up
2325 /* calculate how much segments we will need in total */
2326 max_segs_per_type = max_mem_per_type / pagesz;
2327 /* limit number of segments to maximum allowed per type */
2328 max_segs_per_type = RTE_MIN(max_segs_per_type,
2329 (unsigned int)RTE_MAX_MEMSEG_PER_TYPE);
2330 /* limit number of segments to maximum allowed per list */
2331 max_segs_per_list = RTE_MIN(max_segs_per_type,
2332 (unsigned int)RTE_MAX_MEMSEG_PER_LIST);
2334 /* calculate how much memory we can have per segment list */
2335 max_mem_per_list = RTE_MIN(max_segs_per_list * pagesz,
2336 (uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20);
2338 /* calculate how many segments each segment list will have */
2339 n_segs = RTE_MIN(max_segs_per_list, max_mem_per_list / pagesz);
2341 /* calculate how many segment lists we can have */
2342 n_seglists = RTE_MIN(max_segs_per_type / n_segs,
2343 max_mem_per_type / max_mem_per_list);
2345 /* limit number of segment lists according to our maximum */
2346 n_seglists = RTE_MIN(n_seglists, max_seglists_per_type);
2348 RTE_LOG(DEBUG, EAL, "Creating %i segment lists: "
2349 "n_segs:%i socket_id:%i hugepage_sz:%" PRIu64 "\n",
2350 n_seglists, n_segs, socket_id, pagesz);
2352 /* create all segment lists */
2353 for (cur_seglist = 0; cur_seglist < n_seglists; cur_seglist++) {
2354 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
2356 "No more space in memseg lists, please increase %s\n",
2357 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
2360 msl = &mcfg->memsegs[msl_idx++];
2362 if (alloc_memseg_list(msl, pagesz, n_segs,
2363 socket_id, cur_seglist))
2366 if (alloc_va_space(msl)) {
2367 RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
2372 /* we're successful */
2380 memseg_secondary_init(void)
2382 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
2384 struct rte_memseg_list *msl;
2386 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
2388 msl = &mcfg->memsegs[msl_idx];
2390 /* skip empty memseg lists */
2391 if (msl->memseg_arr.len == 0)
2394 if (rte_fbarray_attach(&msl->memseg_arr)) {
2395 RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
2399 /* preallocate VA space */
2400 if (alloc_va_space(msl)) {
2401 RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
2410 rte_eal_memseg_init(void)
2412 /* increase rlimit to maximum */
2415 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
2416 /* set limit to maximum */
2417 lim.rlim_cur = lim.rlim_max;
2419 if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
2420 RTE_LOG(DEBUG, EAL, "Setting maximum number of open files failed: %s\n",
2423 RTE_LOG(DEBUG, EAL, "Setting maximum number of open files to %"
2425 (uint64_t)lim.rlim_cur);
2428 RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
2430 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
2431 if (!internal_config.legacy_mem && rte_socket_count() > 1) {
2432 RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
2433 RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
2434 RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
2438 return rte_eal_process_type() == RTE_PROC_PRIMARY ?
2440 memseg_primary_init_32() :
2442 memseg_primary_init() :
2444 memseg_secondary_init();