1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2013 6WIND S.A.
6 #define _FILE_OFFSET_BITS 64
16 #include <sys/types.h>
18 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
31 #include <rte_errno.h>
33 #include <rte_memory.h>
34 #include <rte_launch.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_common.h>
40 #include <rte_string_fns.h>
42 #include "eal_private.h"
43 #include "eal_internal_cfg.h"
44 #include "eal_filesystem.h"
45 #include "eal_hugepages.h"
47 #define PFN_MASK_SIZE 8
51 * Huge page mapping under linux
53 * To reserve a big contiguous amount of memory, we use the hugepage
54 * feature of linux. For that, we need to have hugetlbfs mounted. This
55 * code will create many files in this directory (one per page) and
56 * map them in virtual memory. For each page, we will retrieve its
57 * physical address and remap it in order to have a virtual contiguous
58 * zone as well as a physical contiguous zone.
61 static bool phys_addrs_available = true;
63 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
66 test_phys_addrs_available(void)
71 if (!rte_eal_has_hugepages()) {
73 "Started without hugepages support, physical addresses not available\n");
74 phys_addrs_available = false;
78 physaddr = rte_mem_virt2phy(&tmp);
79 if (physaddr == RTE_BAD_PHYS_ADDR) {
80 if (rte_eal_iova_mode() == RTE_IOVA_PA)
82 "Cannot obtain physical addresses: %s. "
83 "Only vfio will function.\n",
85 phys_addrs_available = false;
90 * Get physical address of any mapped virtual address in the current process.
93 rte_mem_virt2phy(const void *virtaddr)
96 uint64_t page, physaddr;
97 unsigned long virt_pfn;
101 /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
102 if (!phys_addrs_available)
105 /* standard page size */
106 page_size = getpagesize();
108 fd = open("/proc/self/pagemap", O_RDONLY);
110 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
111 __func__, strerror(errno));
115 virt_pfn = (unsigned long)virtaddr / page_size;
116 offset = sizeof(uint64_t) * virt_pfn;
117 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
118 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
119 __func__, strerror(errno));
124 retval = read(fd, &page, PFN_MASK_SIZE);
127 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
128 __func__, strerror(errno));
130 } else if (retval != PFN_MASK_SIZE) {
131 RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
132 "but expected %d:\n",
133 __func__, retval, PFN_MASK_SIZE);
138 * the pfn (page frame number) are bits 0-54 (see
139 * pagemap.txt in linux Documentation)
141 if ((page & 0x7fffffffffffffULL) == 0)
144 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
145 + ((unsigned long)virtaddr % page_size);
151 rte_mem_virt2iova(const void *virtaddr)
153 if (rte_eal_iova_mode() == RTE_IOVA_VA)
154 return (uintptr_t)virtaddr;
155 return rte_mem_virt2phy(virtaddr);
159 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
160 * it by browsing the /proc/self/pagemap special file.
163 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
168 for (i = 0; i < hpi->num_pages[0]; i++) {
169 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
170 if (addr == RTE_BAD_PHYS_ADDR)
172 hugepg_tbl[i].physaddr = addr;
178 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
181 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
184 static phys_addr_t addr;
186 for (i = 0; i < hpi->num_pages[0]; i++) {
187 hugepg_tbl[i].physaddr = addr;
188 addr += hugepg_tbl[i].size;
194 * Check whether address-space layout randomization is enabled in
195 * the kernel. This is important for multi-process as it can prevent
196 * two processes mapping data to the same virtual address
198 * 0 - address space randomization disabled
199 * 1/2 - address space randomization enabled
200 * negative error code on error
206 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
209 retval = read(fd, &c, 1);
219 default: return -EINVAL;
223 static sigjmp_buf huge_jmpenv;
225 static void huge_sigbus_handler(int signo __rte_unused)
227 siglongjmp(huge_jmpenv, 1);
230 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
231 * non-static local variable in the stack frame calling sigsetjmp might be
232 * clobbered by a call to longjmp.
234 static int huge_wrap_sigsetjmp(void)
236 return sigsetjmp(huge_jmpenv, 1);
239 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
240 /* Callback for numa library. */
241 void numa_error(char *where)
243 RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
248 * Mmap all hugepages of hugepage table: it first open a file in
249 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
250 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
251 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
252 * map contiguous physical blocks in contiguous virtual blocks.
255 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
256 uint64_t *essential_memory __rte_unused)
261 struct flock lck = {0};
262 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
264 int essential_prev = 0;
266 struct bitmask *oldmask = numa_allocate_nodemask();
267 bool have_numa = true;
268 unsigned long maxnode = 0;
270 /* Check if kernel supports NUMA. */
271 if (numa_available() != 0) {
272 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
277 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
278 if (get_mempolicy(&oldpolicy, oldmask->maskp,
279 oldmask->size + 1, 0, 0) < 0) {
281 "Failed to get current mempolicy: %s. "
282 "Assuming MPOL_DEFAULT.\n", strerror(errno));
283 oldpolicy = MPOL_DEFAULT;
285 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
286 if (internal_config.socket_mem[i])
291 for (i = 0; i < hpi->num_pages[0]; i++) {
292 struct hugepage_file *hf = &hugepg_tbl[i];
293 uint64_t hugepage_sz = hpi->hugepage_sz;
295 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
299 for (j = 0; j < maxnode; j++)
300 if (essential_memory[j])
304 node_id = (node_id + 1) % maxnode;
305 while (!internal_config.socket_mem[node_id]) {
312 essential_prev = essential_memory[j];
314 if (essential_memory[j] < hugepage_sz)
315 essential_memory[j] = 0;
317 essential_memory[j] -= hugepage_sz;
321 "Setting policy MPOL_PREFERRED for socket %d\n",
323 numa_set_preferred(node_id);
328 hf->size = hugepage_sz;
329 eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
330 hpi->hugedir, hf->file_id);
331 hf->filepath[sizeof(hf->filepath) - 1] = '\0';
333 /* try to create hugepage file */
334 fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
336 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
341 /* map the segment, and populate page tables,
342 * the kernel fills this segment with zeros. we don't care where
343 * this gets mapped - we already have contiguous memory areas
344 * ready for us to map into.
346 virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
347 MAP_SHARED | MAP_POPULATE, fd, 0);
348 if (virtaddr == MAP_FAILED) {
349 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
355 hf->orig_va = virtaddr;
357 /* In linux, hugetlb limitations, like cgroup, are
358 * enforced at fault time instead of mmap(), even
359 * with the option of MAP_POPULATE. Kernel will send
360 * a SIGBUS signal. To avoid to be killed, save stack
361 * environment here, if SIGBUS happens, we can jump
364 if (huge_wrap_sigsetjmp()) {
365 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
366 "hugepages of size %u MB\n",
367 (unsigned int)(hugepage_sz / 0x100000));
368 munmap(virtaddr, hugepage_sz);
370 unlink(hugepg_tbl[i].filepath);
371 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
373 essential_memory[node_id] =
378 *(int *)virtaddr = 0;
381 /* set shared lock on the file. */
382 lck.l_type = F_RDLCK;
383 lck.l_whence = SEEK_SET;
385 lck.l_len = hugepage_sz;
386 if (fcntl(fd, F_SETLK, &lck) == -1) {
387 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
388 __func__, strerror(errno));
397 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
400 "Restoring previous memory policy: %d\n", oldpolicy);
401 if (oldpolicy == MPOL_DEFAULT) {
402 numa_set_localalloc();
403 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
404 oldmask->size + 1) < 0) {
405 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
407 numa_set_localalloc();
410 numa_free_cpumask(oldmask);
416 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
420 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
424 unsigned i, hp_count = 0;
427 char hugedir_str[PATH_MAX];
430 f = fopen("/proc/self/numa_maps", "r");
432 RTE_LOG(NOTICE, EAL, "NUMA support not available"
433 " consider that all memory is in socket_id 0\n");
437 snprintf(hugedir_str, sizeof(hugedir_str),
438 "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
441 while (fgets(buf, sizeof(buf), f) != NULL) {
443 /* ignore non huge page */
444 if (strstr(buf, " huge ") == NULL &&
445 strstr(buf, hugedir_str) == NULL)
449 virt_addr = strtoull(buf, &end, 16);
450 if (virt_addr == 0 || end == buf) {
451 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
455 /* get node id (socket id) */
456 nodestr = strstr(buf, " N");
457 if (nodestr == NULL) {
458 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
462 end = strstr(nodestr, "=");
464 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
470 socket_id = strtoul(nodestr, &end, 0);
471 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
472 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
476 /* if we find this page in our mappings, set socket_id */
477 for (i = 0; i < hpi->num_pages[0]; i++) {
478 void *va = (void *)(unsigned long)virt_addr;
479 if (hugepg_tbl[i].orig_va == va) {
480 hugepg_tbl[i].socket_id = socket_id;
482 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
484 "Hugepage %s is on socket %d\n",
485 hugepg_tbl[i].filepath, socket_id);
491 if (hp_count < hpi->num_pages[0])
503 cmp_physaddr(const void *a, const void *b)
505 #ifndef RTE_ARCH_PPC_64
506 const struct hugepage_file *p1 = a;
507 const struct hugepage_file *p2 = b;
509 /* PowerPC needs memory sorted in reverse order from x86 */
510 const struct hugepage_file *p1 = b;
511 const struct hugepage_file *p2 = a;
513 if (p1->physaddr < p2->physaddr)
515 else if (p1->physaddr > p2->physaddr)
522 * Uses mmap to create a shared memory area for storage of data
523 * Used in this file to store the hugepage file map on disk
526 create_shared_memory(const char *filename, const size_t mem_size)
529 int fd = open(filename, O_CREAT | O_RDWR, 0666);
532 if (ftruncate(fd, mem_size) < 0) {
536 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
538 if (retval == MAP_FAILED)
544 * this copies *active* hugepages from one hugepage table to another.
545 * destination is typically the shared memory.
548 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
549 const struct hugepage_file * src, int src_size)
551 int src_pos, dst_pos = 0;
553 for (src_pos = 0; src_pos < src_size; src_pos++) {
554 if (src[src_pos].orig_va != NULL) {
555 /* error on overflow attempt */
556 if (dst_pos == dest_size)
558 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
566 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
567 unsigned num_hp_info)
569 unsigned socket, size;
570 int page, nrpages = 0;
572 /* get total number of hugepages */
573 for (size = 0; size < num_hp_info; size++)
574 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
576 internal_config.hugepage_info[size].num_pages[socket];
578 for (page = 0; page < nrpages; page++) {
579 struct hugepage_file *hp = &hugepg_tbl[page];
581 if (hp->final_va != NULL && unlink(hp->filepath)) {
582 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
583 __func__, hp->filepath, strerror(errno));
590 * unmaps hugepages that are not going to be used. since we originally allocate
591 * ALL hugepages (not just those we need), additional unmapping needs to be done.
594 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
595 struct hugepage_info *hpi,
596 unsigned num_hp_info)
598 unsigned socket, size;
599 int page, nrpages = 0;
601 /* get total number of hugepages */
602 for (size = 0; size < num_hp_info; size++)
603 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
604 nrpages += internal_config.hugepage_info[size].num_pages[socket];
606 for (size = 0; size < num_hp_info; size++) {
607 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
608 unsigned pages_found = 0;
610 /* traverse until we have unmapped all the unused pages */
611 for (page = 0; page < nrpages; page++) {
612 struct hugepage_file *hp = &hugepg_tbl[page];
614 /* find a page that matches the criteria */
615 if ((hp->size == hpi[size].hugepage_sz) &&
616 (hp->socket_id == (int) socket)) {
618 /* if we skipped enough pages, unmap the rest */
619 if (pages_found == hpi[size].num_pages[socket]) {
622 unmap_len = hp->size;
624 /* get start addr and len of the remaining segment */
629 if (unlink(hp->filepath) == -1) {
630 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
631 __func__, hp->filepath, strerror(errno));
635 /* lock the page and skip */
641 } /* foreach socket */
642 } /* foreach pagesize */
648 remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
650 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
651 struct rte_memseg_list *msl;
652 struct rte_fbarray *arr;
653 int cur_page, seg_len;
654 unsigned int msl_idx;
660 page_sz = hugepages[seg_start].size;
661 socket_id = hugepages[seg_start].socket_id;
662 seg_len = seg_end - seg_start;
664 RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
665 (seg_len * page_sz) >> 20ULL, socket_id);
667 /* find free space in memseg lists */
668 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
670 msl = &mcfg->memsegs[msl_idx];
671 arr = &msl->memseg_arr;
673 if (msl->page_sz != page_sz)
675 if (msl->socket_id != socket_id)
678 /* leave space for a hole if array is not empty */
679 empty = arr->count == 0;
680 ms_idx = rte_fbarray_find_next_n_free(arr, 0,
681 seg_len + (empty ? 0 : 1));
683 /* memseg list is full? */
687 /* leave some space between memsegs, they are not IOVA
688 * contiguous, so they shouldn't be VA contiguous either.
694 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
695 RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
696 RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
697 RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
701 #ifdef RTE_ARCH_PPC64
702 /* for PPC64 we go through the list backwards */
703 for (cur_page = seg_end - 1; cur_page >= seg_start;
704 cur_page--, ms_idx++) {
706 for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
708 struct hugepage_file *hfile = &hugepages[cur_page];
709 struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
714 fd = open(hfile->filepath, O_RDWR);
716 RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
717 hfile->filepath, strerror(errno));
720 /* set shared lock on the file. */
721 lck.l_type = F_RDLCK;
722 lck.l_whence = SEEK_SET;
725 if (fcntl(fd, F_SETLK, &lck) == -1) {
726 RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
727 hfile->filepath, strerror(errno));
731 memseg_len = (size_t)page_sz;
732 addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
734 /* we know this address is already mmapped by memseg list, so
735 * using MAP_FIXED here is safe
737 addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
738 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
739 if (addr == MAP_FAILED) {
740 RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
741 hfile->filepath, strerror(errno));
746 /* we have a new address, so unmap previous one */
748 /* in 32-bit legacy mode, we have already unmapped the page */
749 if (!internal_config.legacy_mem)
750 munmap(hfile->orig_va, page_sz);
752 munmap(hfile->orig_va, page_sz);
755 hfile->orig_va = NULL;
756 hfile->final_va = addr;
758 /* rewrite physical addresses in IOVA as VA mode */
759 if (rte_eal_iova_mode() == RTE_IOVA_VA)
760 hfile->physaddr = (uintptr_t)addr;
762 /* set up memseg data */
764 ms->hugepage_sz = page_sz;
765 ms->len = memseg_len;
766 ms->iova = hfile->physaddr;
767 ms->socket_id = hfile->socket_id;
768 ms->nchannel = rte_memory_get_nchannel();
769 ms->nrank = rte_memory_get_nrank();
771 rte_fbarray_set_used(arr, ms_idx);
775 RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
776 (seg_len * page_sz) >> 20, socket_id);
780 #define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
782 alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
783 int n_segs, int socket_id, int type_msl_idx)
785 char name[RTE_FBARRAY_NAME_LEN];
787 snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
789 if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
790 sizeof(struct rte_memseg))) {
791 RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
792 rte_strerror(rte_errno));
796 msl->page_sz = page_sz;
797 msl->socket_id = socket_id;
800 RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
801 (size_t)page_sz >> 10, socket_id);
807 alloc_va_space(struct rte_memseg_list *msl)
814 #ifdef RTE_ARCH_PPC_64
815 flags |= MAP_HUGETLB;
818 page_sz = msl->page_sz;
819 mem_sz = page_sz * msl->memseg_arr.len;
821 addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
823 if (rte_errno == EADDRNOTAVAIL)
824 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
825 (unsigned long long)mem_sz, msl->base_va);
827 RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
836 * Our VA space is not preallocated yet, so preallocate it here. We need to know
837 * how many segments there are in order to map all pages into one address space,
838 * and leave appropriate holes between segments so that rte_malloc does not
839 * concatenate them into one big segment.
841 * we also need to unmap original pages to free up address space.
843 static int __rte_unused
844 prealloc_segments(struct hugepage_file *hugepages, int n_pages)
846 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
847 int cur_page, seg_start_page, end_seg, new_memseg;
848 unsigned int hpi_idx, socket, i;
849 int n_contig_segs, n_segs;
852 /* before we preallocate segments, we need to free up our VA space.
853 * we're not removing files, and we already have information about
854 * PA-contiguousness, so it is safe to unmap everything.
856 for (cur_page = 0; cur_page < n_pages; cur_page++) {
857 struct hugepage_file *hpi = &hugepages[cur_page];
858 munmap(hpi->orig_va, hpi->size);
862 /* we cannot know how many page sizes and sockets we have discovered, so
863 * loop over all of them
865 for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes;
868 internal_config.hugepage_info[hpi_idx].hugepage_sz;
870 for (i = 0; i < rte_socket_count(); i++) {
871 struct rte_memseg_list *msl;
873 socket = rte_socket_id_by_idx(i);
878 for (cur_page = 0; cur_page < n_pages; cur_page++) {
879 struct hugepage_file *prev, *cur;
880 int prev_seg_start_page = -1;
882 cur = &hugepages[cur_page];
883 prev = cur_page == 0 ? NULL :
884 &hugepages[cur_page - 1];
891 else if (cur->socket_id != (int) socket)
893 else if (cur->size != page_sz)
895 else if (cur_page == 0)
897 #ifdef RTE_ARCH_PPC_64
898 /* On PPC64 architecture, the mmap always start
899 * from higher address to lower address. Here,
900 * physical addresses are in descending order.
902 else if ((prev->physaddr - cur->physaddr) !=
906 else if ((cur->physaddr - prev->physaddr) !=
911 /* if we're already inside a segment,
912 * new segment means end of current one
914 if (seg_start_page != -1) {
916 prev_seg_start_page =
919 seg_start_page = cur_page;
923 if (prev_seg_start_page != -1) {
924 /* we've found a new segment */
928 } else if (seg_start_page != -1) {
929 /* we didn't find new segment,
930 * but did end current one
938 /* we're skipping this page */
942 /* segment continues */
944 /* check if we missed last segment */
945 if (seg_start_page != -1) {
947 n_segs += cur_page - seg_start_page;
950 /* if no segments were found, do not preallocate */
954 /* we now have total number of pages that we will
955 * allocate for this segment list. add separator pages
956 * to the total count, and preallocate VA space.
958 n_segs += n_contig_segs - 1;
960 /* now, preallocate VA space for these segments */
962 /* first, find suitable memseg list for this */
963 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
965 msl = &mcfg->memsegs[msl_idx];
967 if (msl->base_va != NULL)
971 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
972 RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
973 RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
977 /* now, allocate fbarray itself */
978 if (alloc_memseg_list(msl, page_sz, n_segs, socket,
982 /* finally, allocate VA space */
983 if (alloc_va_space(msl) < 0)
991 * We cannot reallocate memseg lists on the fly because PPC64 stores pages
992 * backwards, therefore we have to process the entire memseg first before
993 * remapping it into memseg list VA space.
996 remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
998 int cur_page, seg_start_page, new_memseg, ret;
1001 for (cur_page = 0; cur_page < n_pages; cur_page++) {
1002 struct hugepage_file *prev, *cur;
1006 cur = &hugepages[cur_page];
1007 prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1009 /* if size is zero, no more pages left */
1015 else if (cur->socket_id != prev->socket_id)
1017 else if (cur->size != prev->size)
1019 #ifdef RTE_ARCH_PPC_64
1020 /* On PPC64 architecture, the mmap always start from higher
1021 * address to lower address. Here, physical addresses are in
1024 else if ((prev->physaddr - cur->physaddr) != cur->size)
1027 else if ((cur->physaddr - prev->physaddr) != cur->size)
1032 /* if this isn't the first time, remap segment */
1033 if (cur_page != 0) {
1034 ret = remap_segment(hugepages, seg_start_page,
1039 /* remember where we started */
1040 seg_start_page = cur_page;
1042 /* continuation of previous memseg */
1044 /* we were stopped, but we didn't remap the last segment, do it now */
1045 if (cur_page != 0) {
1046 ret = remap_segment(hugepages, seg_start_page,
1054 static inline uint64_t
1055 get_socket_mem_size(int socket)
1060 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
1061 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1062 if (hpi->hugedir != NULL)
1063 size += hpi->hugepage_sz * hpi->num_pages[socket];
1070 * This function is a NUMA-aware equivalent of calc_num_pages.
1071 * It takes in the list of hugepage sizes and the
1072 * number of pages thereof, and calculates the best number of
1073 * pages of each size to fulfill the request for <memory> ram
1076 calc_num_pages_per_socket(uint64_t * memory,
1077 struct hugepage_info *hp_info,
1078 struct hugepage_info *hp_used,
1079 unsigned num_hp_info)
1081 unsigned socket, j, i = 0;
1082 unsigned requested, available;
1083 int total_num_pages = 0;
1084 uint64_t remaining_mem, cur_mem;
1085 uint64_t total_mem = internal_config.memory;
1087 if (num_hp_info == 0)
1090 /* if specific memory amounts per socket weren't requested */
1091 if (internal_config.force_sockets == 0) {
1094 int cpu_per_socket[RTE_MAX_NUMA_NODES];
1095 size_t default_size;
1098 /* Compute number of cores per socket */
1099 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
1100 RTE_LCORE_FOREACH(lcore_id) {
1101 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
1105 * Automatically spread requested memory amongst detected sockets according
1106 * to number of cores from cpu mask present on each socket
1108 total_size = internal_config.memory;
1109 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1111 /* Set memory amount per socket */
1112 default_size = (internal_config.memory * cpu_per_socket[socket])
1113 / rte_lcore_count();
1115 /* Limit to maximum available memory on socket */
1116 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
1119 memory[socket] = default_size;
1120 total_size -= default_size;
1124 * If some memory is remaining, try to allocate it by getting all
1125 * available memory from sockets, one after the other
1127 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1128 /* take whatever is available */
1129 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
1133 memory[socket] += default_size;
1134 total_size -= default_size;
1137 /* in 32-bit mode, allocate all of the memory only on master
1140 total_size = internal_config.memory;
1141 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
1143 struct rte_config *cfg = rte_eal_get_configuration();
1144 unsigned int master_lcore_socket;
1146 master_lcore_socket =
1147 rte_lcore_to_socket_id(cfg->master_lcore);
1149 if (master_lcore_socket != socket)
1153 memory[socket] = total_size;
1159 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
1160 /* skips if the memory on specific socket wasn't requested */
1161 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
1162 hp_used[i].hugedir = hp_info[i].hugedir;
1163 hp_used[i].num_pages[socket] = RTE_MIN(
1164 memory[socket] / hp_info[i].hugepage_sz,
1165 hp_info[i].num_pages[socket]);
1167 cur_mem = hp_used[i].num_pages[socket] *
1168 hp_used[i].hugepage_sz;
1170 memory[socket] -= cur_mem;
1171 total_mem -= cur_mem;
1173 total_num_pages += hp_used[i].num_pages[socket];
1175 /* check if we have met all memory requests */
1176 if (memory[socket] == 0)
1179 /* check if we have any more pages left at this size, if so
1180 * move on to next size */
1181 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
1183 /* At this point we know that there are more pages available that are
1184 * bigger than the memory we want, so lets see if we can get enough
1185 * from other page sizes.
1188 for (j = i+1; j < num_hp_info; j++)
1189 remaining_mem += hp_info[j].hugepage_sz *
1190 hp_info[j].num_pages[socket];
1192 /* is there enough other memory, if not allocate another page and quit */
1193 if (remaining_mem < memory[socket]){
1194 cur_mem = RTE_MIN(memory[socket],
1195 hp_info[i].hugepage_sz);
1196 memory[socket] -= cur_mem;
1197 total_mem -= cur_mem;
1198 hp_used[i].num_pages[socket]++;
1200 break; /* we are done with this socket*/
1203 /* if we didn't satisfy all memory requirements per socket */
1204 if (memory[socket] > 0 &&
1205 internal_config.socket_mem[socket] != 0) {
1206 /* to prevent icc errors */
1207 requested = (unsigned) (internal_config.socket_mem[socket] /
1209 available = requested -
1210 ((unsigned) (memory[socket] / 0x100000));
1211 RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
1212 "Requested: %uMB, available: %uMB\n", socket,
1213 requested, available);
1218 /* if we didn't satisfy total memory requirements */
1219 if (total_mem > 0) {
1220 requested = (unsigned) (internal_config.memory / 0x100000);
1221 available = requested - (unsigned) (total_mem / 0x100000);
1222 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
1223 " available: %uMB\n", requested, available);
1226 return total_num_pages;
1229 static inline size_t
1230 eal_get_hugepage_mem_size(void)
1235 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1236 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1237 if (hpi->hugedir != NULL) {
1238 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1239 size += hpi->hugepage_sz * hpi->num_pages[j];
1244 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1247 static struct sigaction huge_action_old;
1248 static int huge_need_recover;
1251 huge_register_sigbus(void)
1254 struct sigaction action;
1257 sigaddset(&mask, SIGBUS);
1258 action.sa_flags = 0;
1259 action.sa_mask = mask;
1260 action.sa_handler = huge_sigbus_handler;
1262 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1266 huge_recover_sigbus(void)
1268 if (huge_need_recover) {
1269 sigaction(SIGBUS, &huge_action_old, NULL);
1270 huge_need_recover = 0;
1275 * Prepare physical memory mapping: fill configuration structure with
1276 * these infos, return 0 on success.
1277 * 1. map N huge pages in separate files in hugetlbfs
1278 * 2. find associated physical addr
1279 * 3. find associated NUMA socket ID
1280 * 4. sort all huge pages by physical address
1281 * 5. remap these N huge pages in the correct order
1282 * 6. unmap the first mapping
1283 * 7. fill memsegs in configuration with contiguous zones
1286 eal_legacy_hugepage_init(void)
1288 struct rte_mem_config *mcfg;
1289 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1290 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1291 struct rte_fbarray *arr;
1292 struct rte_memseg *ms;
1294 uint64_t memory[RTE_MAX_NUMA_NODES];
1298 int nr_hugefiles, nr_hugepages = 0;
1301 test_phys_addrs_available();
1303 memset(used_hp, 0, sizeof(used_hp));
1305 /* get pointer to global configuration */
1306 mcfg = rte_eal_get_configuration()->mem_config;
1308 /* hugetlbfs can be disabled */
1309 if (internal_config.no_hugetlbfs) {
1310 struct rte_memseg_list *msl;
1312 int n_segs, cur_seg;
1314 /* nohuge mode is legacy mode */
1315 internal_config.legacy_mem = 1;
1317 /* create a memseg list */
1318 msl = &mcfg->memsegs[0];
1320 page_sz = RTE_PGSIZE_4K;
1321 n_segs = internal_config.memory / page_sz;
1323 if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
1324 sizeof(struct rte_memseg))) {
1325 RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
1329 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1330 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1331 if (addr == MAP_FAILED) {
1332 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1336 msl->base_va = addr;
1337 msl->page_sz = page_sz;
1340 /* populate memsegs. each memseg is one page long */
1341 for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
1342 arr = &msl->memseg_arr;
1344 ms = rte_fbarray_get(arr, cur_seg);
1345 if (rte_eal_iova_mode() == RTE_IOVA_VA)
1346 ms->iova = (uintptr_t)addr;
1348 ms->iova = RTE_BAD_IOVA;
1350 ms->hugepage_sz = page_sz;
1354 rte_fbarray_set_used(arr, cur_seg);
1356 addr = RTE_PTR_ADD(addr, (size_t)page_sz);
1361 /* calculate total number of hugepages available. at this point we haven't
1362 * yet started sorting them so they all are on socket 0 */
1363 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1364 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1365 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1367 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1371 * allocate a memory area for hugepage table.
1372 * this isn't shared memory yet. due to the fact that we need some
1373 * processing done on these pages, shared memory will be created
1376 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1380 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1382 hp_offset = 0; /* where we start the current page size entries */
1384 huge_register_sigbus();
1386 /* make a copy of socket_mem, needed for balanced allocation. */
1387 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1388 memory[i] = internal_config.socket_mem[i];
1390 /* map all hugepages and sort them */
1391 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1392 unsigned pages_old, pages_new;
1393 struct hugepage_info *hpi;
1396 * we don't yet mark hugepages as used at this stage, so
1397 * we just map all hugepages available to the system
1398 * all hugepages are still located on socket 0
1400 hpi = &internal_config.hugepage_info[i];
1402 if (hpi->num_pages[0] == 0)
1405 /* map all hugepages available */
1406 pages_old = hpi->num_pages[0];
1407 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1408 if (pages_new < pages_old) {
1410 "%d not %d hugepages of size %u MB allocated\n",
1411 pages_new, pages_old,
1412 (unsigned)(hpi->hugepage_sz / 0x100000));
1414 int pages = pages_old - pages_new;
1416 nr_hugepages -= pages;
1417 hpi->num_pages[0] = pages_new;
1422 if (phys_addrs_available &&
1423 rte_eal_iova_mode() != RTE_IOVA_VA) {
1424 /* find physical addresses for each hugepage */
1425 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1426 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1427 "for %u MB pages\n",
1428 (unsigned int)(hpi->hugepage_sz / 0x100000));
1432 /* set physical addresses for each hugepage */
1433 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1434 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1435 "for %u MB pages\n",
1436 (unsigned int)(hpi->hugepage_sz / 0x100000));
1441 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1442 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1443 (unsigned)(hpi->hugepage_sz / 0x100000));
1447 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1448 sizeof(struct hugepage_file), cmp_physaddr);
1450 /* we have processed a num of hugepages of this size, so inc offset */
1451 hp_offset += hpi->num_pages[0];
1454 huge_recover_sigbus();
1456 if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1457 internal_config.memory = eal_get_hugepage_mem_size();
1459 nr_hugefiles = nr_hugepages;
1462 /* clean out the numbers of pages */
1463 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1464 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1465 internal_config.hugepage_info[i].num_pages[j] = 0;
1467 /* get hugepages for each socket */
1468 for (i = 0; i < nr_hugefiles; i++) {
1469 int socket = tmp_hp[i].socket_id;
1471 /* find a hugepage info with right size and increment num_pages */
1472 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1473 (int)internal_config.num_hugepage_sizes);
1474 for (j = 0; j < nb_hpsizes; j++) {
1475 if (tmp_hp[i].size ==
1476 internal_config.hugepage_info[j].hugepage_sz) {
1477 internal_config.hugepage_info[j].num_pages[socket]++;
1482 /* make a copy of socket_mem, needed for number of pages calculation */
1483 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1484 memory[i] = internal_config.socket_mem[i];
1486 /* calculate final number of pages */
1487 nr_hugepages = calc_num_pages_per_socket(memory,
1488 internal_config.hugepage_info, used_hp,
1489 internal_config.num_hugepage_sizes);
1491 /* error if not enough memory available */
1492 if (nr_hugepages < 0)
1496 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1497 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1498 if (used_hp[i].num_pages[j] > 0) {
1500 "Requesting %u pages of size %uMB"
1501 " from socket %i\n",
1502 used_hp[i].num_pages[j],
1504 (used_hp[i].hugepage_sz / 0x100000),
1510 /* create shared memory */
1511 hugepage = create_shared_memory(eal_hugepage_info_path(),
1512 nr_hugefiles * sizeof(struct hugepage_file));
1514 if (hugepage == NULL) {
1515 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1518 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1521 * unmap pages that we won't need (looks at used_hp).
1522 * also, sets final_va to NULL on pages that were unmapped.
1524 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1525 internal_config.num_hugepage_sizes) < 0) {
1526 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1531 * copy stuff from malloc'd hugepage* to the actual shared memory.
1532 * this procedure only copies those hugepages that have orig_va
1533 * not NULL. has overflow protection.
1535 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1536 tmp_hp, nr_hugefiles) < 0) {
1537 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1542 /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1543 if (internal_config.legacy_mem &&
1544 prealloc_segments(hugepage, nr_hugefiles)) {
1545 RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
1550 /* remap all pages we do need into memseg list VA space, so that those
1551 * pages become first-class citizens in DPDK memory subsystem
1553 if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1554 RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
1558 /* free the hugepage backing files */
1559 if (internal_config.hugepage_unlink &&
1560 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1561 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1565 /* free the temporary hugepage table */
1569 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1571 /* we're not going to allocate more pages, so release VA space for
1572 * unused memseg lists
1574 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1575 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1578 /* skip inactive lists */
1579 if (msl->base_va == NULL)
1581 /* skip lists where there is at least one page allocated */
1582 if (msl->memseg_arr.count > 0)
1584 /* this is an unused list, deallocate it */
1585 mem_sz = (size_t)msl->page_sz * msl->memseg_arr.len;
1586 munmap(msl->base_va, mem_sz);
1587 msl->base_va = NULL;
1589 /* destroy backing fbarray */
1590 rte_fbarray_destroy(&msl->memseg_arr);
1596 huge_recover_sigbus();
1598 if (hugepage != NULL)
1599 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1605 * uses fstat to report the size of a file on disk
1611 if (fstat(fd, &st) < 0)
1617 * This creates the memory mappings in the secondary process to match that of
1618 * the server process. It goes through each memory segment in the DPDK runtime
1619 * configuration and finds the hugepages which form that segment, mapping them
1620 * in order to form a contiguous block in the virtual memory space
1623 eal_legacy_hugepage_attach(void)
1625 struct hugepage_file *hp = NULL;
1626 unsigned int num_hp = 0;
1628 unsigned int cur_seg;
1630 int fd, fd_hugepage = -1;
1632 if (aslr_enabled() > 0) {
1633 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1634 "(ASLR) is enabled in the kernel.\n");
1635 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1636 "into secondary processes\n");
1639 test_phys_addrs_available();
1641 fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1642 if (fd_hugepage < 0) {
1643 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
1647 size = getFileSize(fd_hugepage);
1648 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1649 if (hp == MAP_FAILED) {
1650 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
1654 num_hp = size / sizeof(struct hugepage_file);
1655 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1657 /* map all segments into memory to make sure we get the addrs. the
1658 * segments themselves are already in memseg list (which is shared and
1659 * has its VA space already preallocated), so we just need to map
1660 * everything into correct addresses.
1662 for (i = 0; i < num_hp; i++) {
1663 struct hugepage_file *hf = &hp[i];
1664 size_t map_sz = hf->size;
1665 void *map_addr = hf->final_va;
1668 /* if size is zero, no more pages left */
1672 fd = open(hf->filepath, O_RDWR);
1674 RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
1675 hf->filepath, strerror(errno));
1679 map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1680 MAP_SHARED | MAP_FIXED, fd, 0);
1681 if (map_addr == MAP_FAILED) {
1682 RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
1683 hf->filepath, strerror(errno));
1687 /* set shared lock on the file. */
1688 lck.l_type = F_RDLCK;
1689 lck.l_whence = SEEK_SET;
1692 if (fcntl(fd, F_SETLK, &lck) == -1) {
1693 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
1694 __func__, strerror(errno));
1701 /* unmap the hugepage config file, since we are done using it */
1707 /* map all segments into memory to make sure we get the addrs */
1709 for (cur_seg = 0; cur_seg < i; cur_seg++) {
1710 struct hugepage_file *hf = &hp[i];
1711 size_t map_sz = hf->size;
1712 void *map_addr = hf->final_va;
1714 munmap(map_addr, map_sz);
1716 if (hp != NULL && hp != MAP_FAILED)
1718 if (fd_hugepage >= 0)
1724 rte_eal_hugepage_init(void)
1726 if (internal_config.legacy_mem)
1727 return eal_legacy_hugepage_init();
1732 rte_eal_hugepage_attach(void)
1734 if (internal_config.legacy_mem)
1735 return eal_legacy_hugepage_attach();
1740 rte_eal_using_phys_addrs(void)
1742 return phys_addrs_available;