1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2013 6WIND S.A.
6 #define _FILE_OFFSET_BITS 64
16 #include <sys/types.h>
18 #include <sys/queue.h>
22 #include <sys/ioctl.h>
26 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
31 #include <rte_errno.h>
33 #include <rte_memory.h>
34 #include <rte_launch.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_common.h>
40 #include <rte_string_fns.h>
42 #include "eal_private.h"
43 #include "eal_internal_cfg.h"
44 #include "eal_filesystem.h"
45 #include "eal_hugepages.h"
47 #define PFN_MASK_SIZE 8
51 * Huge page mapping under linux
53 * To reserve a big contiguous amount of memory, we use the hugepage
54 * feature of linux. For that, we need to have hugetlbfs mounted. This
55 * code will create many files in this directory (one per page) and
56 * map them in virtual memory. For each page, we will retrieve its
57 * physical address and remap it in order to have a virtual contiguous
58 * zone as well as a physical contiguous zone.
61 static bool phys_addrs_available = true;
63 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
66 test_phys_addrs_available(void)
71 if (!rte_eal_has_hugepages()) {
73 "Started without hugepages support, physical addresses not available\n");
74 phys_addrs_available = false;
78 physaddr = rte_mem_virt2phy(&tmp);
79 if (physaddr == RTE_BAD_PHYS_ADDR) {
80 if (rte_eal_iova_mode() == RTE_IOVA_PA)
82 "Cannot obtain physical addresses: %s. "
83 "Only vfio will function.\n",
85 phys_addrs_available = false;
90 * Get physical address of any mapped virtual address in the current process.
93 rte_mem_virt2phy(const void *virtaddr)
96 uint64_t page, physaddr;
97 unsigned long virt_pfn;
101 /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
102 if (!phys_addrs_available)
105 /* standard page size */
106 page_size = getpagesize();
108 fd = open("/proc/self/pagemap", O_RDONLY);
110 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
111 __func__, strerror(errno));
115 virt_pfn = (unsigned long)virtaddr / page_size;
116 offset = sizeof(uint64_t) * virt_pfn;
117 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
118 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
119 __func__, strerror(errno));
124 retval = read(fd, &page, PFN_MASK_SIZE);
127 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
128 __func__, strerror(errno));
130 } else if (retval != PFN_MASK_SIZE) {
131 RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
132 "but expected %d:\n",
133 __func__, retval, PFN_MASK_SIZE);
138 * the pfn (page frame number) are bits 0-54 (see
139 * pagemap.txt in linux Documentation)
141 if ((page & 0x7fffffffffffffULL) == 0)
144 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
145 + ((unsigned long)virtaddr % page_size);
151 rte_mem_virt2iova(const void *virtaddr)
153 if (rte_eal_iova_mode() == RTE_IOVA_VA)
154 return (uintptr_t)virtaddr;
155 return rte_mem_virt2phy(virtaddr);
159 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
160 * it by browsing the /proc/self/pagemap special file.
163 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
168 for (i = 0; i < hpi->num_pages[0]; i++) {
169 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
170 if (addr == RTE_BAD_PHYS_ADDR)
172 hugepg_tbl[i].physaddr = addr;
178 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
181 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
184 static phys_addr_t addr;
186 for (i = 0; i < hpi->num_pages[0]; i++) {
187 hugepg_tbl[i].physaddr = addr;
188 addr += hugepg_tbl[i].size;
194 * Check whether address-space layout randomization is enabled in
195 * the kernel. This is important for multi-process as it can prevent
196 * two processes mapping data to the same virtual address
198 * 0 - address space randomization disabled
199 * 1/2 - address space randomization enabled
200 * negative error code on error
206 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
209 retval = read(fd, &c, 1);
219 default: return -EINVAL;
223 static sigjmp_buf huge_jmpenv;
225 static void huge_sigbus_handler(int signo __rte_unused)
227 siglongjmp(huge_jmpenv, 1);
230 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
231 * non-static local variable in the stack frame calling sigsetjmp might be
232 * clobbered by a call to longjmp.
234 static int huge_wrap_sigsetjmp(void)
236 return sigsetjmp(huge_jmpenv, 1);
239 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
240 /* Callback for numa library. */
241 void numa_error(char *where)
243 RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
248 * Mmap all hugepages of hugepage table: it first open a file in
249 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
250 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
251 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
252 * map contiguous physical blocks in contiguous virtual blocks.
255 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
256 uint64_t *essential_memory __rte_unused, int orig)
261 void *vma_addr = NULL;
263 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
265 int essential_prev = 0;
267 struct bitmask *oldmask = numa_allocate_nodemask();
268 bool have_numa = true;
269 unsigned long maxnode = 0;
271 /* Check if kernel supports NUMA. */
272 if (numa_available() != 0) {
273 RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
277 if (orig && have_numa) {
278 RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
279 if (get_mempolicy(&oldpolicy, oldmask->maskp,
280 oldmask->size + 1, 0, 0) < 0) {
282 "Failed to get current mempolicy: %s. "
283 "Assuming MPOL_DEFAULT.\n", strerror(errno));
284 oldpolicy = MPOL_DEFAULT;
286 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
287 if (internal_config.socket_mem[i])
292 for (i = 0; i < hpi->num_pages[0]; i++) {
293 uint64_t hugepage_sz = hpi->hugepage_sz;
295 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
299 for (j = 0; j < maxnode; j++)
300 if (essential_memory[j])
304 node_id = (node_id + 1) % maxnode;
305 while (!internal_config.socket_mem[node_id]) {
312 essential_prev = essential_memory[j];
314 if (essential_memory[j] < hugepage_sz)
315 essential_memory[j] = 0;
317 essential_memory[j] -= hugepage_sz;
321 "Setting policy MPOL_PREFERRED for socket %d\n",
323 numa_set_preferred(node_id);
328 hugepg_tbl[i].file_id = i;
329 hugepg_tbl[i].size = hugepage_sz;
330 eal_get_hugefile_path(hugepg_tbl[i].filepath,
331 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
332 hugepg_tbl[i].file_id);
333 hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
336 /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
337 * original map address as final map address.
339 else if ((hugepage_sz == RTE_PGSIZE_1G)
340 || (hugepage_sz == RTE_PGSIZE_16G)) {
341 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
342 hugepg_tbl[i].orig_va = NULL;
346 else if (vma_len == 0) {
347 unsigned j, num_pages;
349 /* reserve a virtual area for next contiguous
350 * physical block: count the number of
351 * contiguous physical pages. */
352 for (j = i+1; j < hpi->num_pages[0] ; j++) {
353 #ifdef RTE_ARCH_PPC_64
354 /* The physical addresses are sorted in
355 * descending order on PPC64 */
356 if (hugepg_tbl[j].physaddr !=
357 hugepg_tbl[j-1].physaddr - hugepage_sz)
360 if (hugepg_tbl[j].physaddr !=
361 hugepg_tbl[j-1].physaddr + hugepage_sz)
366 vma_len = num_pages * hugepage_sz;
368 /* get the biggest virtual memory area up to
369 * vma_len. If it fails, vma_addr is NULL, so
370 * let the kernel provide the address. */
371 vma_addr = eal_get_virtual_area(NULL, &vma_len,
373 EAL_VIRTUAL_AREA_ALLOW_SHRINK |
374 EAL_VIRTUAL_AREA_UNMAP,
375 #ifdef RTE_ARCH_PPC_64
381 if (vma_addr == NULL)
382 vma_len = hugepage_sz;
385 /* try to create hugepage file */
386 fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
388 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
393 /* map the segment, and populate page tables,
394 * the kernel fills this segment with zeros */
395 virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
396 MAP_SHARED | MAP_POPULATE, fd, 0);
397 if (virtaddr == MAP_FAILED) {
398 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
405 hugepg_tbl[i].orig_va = virtaddr;
408 /* rewrite physical addresses in IOVA as VA mode */
409 if (rte_eal_iova_mode() == RTE_IOVA_VA)
410 hugepg_tbl[i].physaddr = (uintptr_t)virtaddr;
411 hugepg_tbl[i].final_va = virtaddr;
415 /* In linux, hugetlb limitations, like cgroup, are
416 * enforced at fault time instead of mmap(), even
417 * with the option of MAP_POPULATE. Kernel will send
418 * a SIGBUS signal. To avoid to be killed, save stack
419 * environment here, if SIGBUS happens, we can jump
422 if (huge_wrap_sigsetjmp()) {
423 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
424 "hugepages of size %u MB\n",
425 (unsigned)(hugepage_sz / 0x100000));
426 munmap(virtaddr, hugepage_sz);
428 unlink(hugepg_tbl[i].filepath);
429 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
431 essential_memory[node_id] =
436 *(int *)virtaddr = 0;
440 /* set shared flock on the file. */
441 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
442 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
443 __func__, strerror(errno));
450 vma_addr = (char *)vma_addr + hugepage_sz;
451 vma_len -= hugepage_sz;
455 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
458 "Restoring previous memory policy: %d\n", oldpolicy);
459 if (oldpolicy == MPOL_DEFAULT) {
460 numa_set_localalloc();
461 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
462 oldmask->size + 1) < 0) {
463 RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
465 numa_set_localalloc();
468 numa_free_cpumask(oldmask);
473 /* Unmap all hugepages from original mapping */
475 unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
478 for (i = 0; i < hpi->num_pages[0]; i++) {
479 if (hugepg_tbl[i].orig_va) {
480 munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
481 hugepg_tbl[i].orig_va = NULL;
488 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
492 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
496 unsigned i, hp_count = 0;
499 char hugedir_str[PATH_MAX];
502 f = fopen("/proc/self/numa_maps", "r");
504 RTE_LOG(NOTICE, EAL, "NUMA support not available"
505 " consider that all memory is in socket_id 0\n");
509 snprintf(hugedir_str, sizeof(hugedir_str),
510 "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
513 while (fgets(buf, sizeof(buf), f) != NULL) {
515 /* ignore non huge page */
516 if (strstr(buf, " huge ") == NULL &&
517 strstr(buf, hugedir_str) == NULL)
521 virt_addr = strtoull(buf, &end, 16);
522 if (virt_addr == 0 || end == buf) {
523 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
527 /* get node id (socket id) */
528 nodestr = strstr(buf, " N");
529 if (nodestr == NULL) {
530 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
534 end = strstr(nodestr, "=");
536 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
542 socket_id = strtoul(nodestr, &end, 0);
543 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
544 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
548 /* if we find this page in our mappings, set socket_id */
549 for (i = 0; i < hpi->num_pages[0]; i++) {
550 void *va = (void *)(unsigned long)virt_addr;
551 if (hugepg_tbl[i].orig_va == va) {
552 hugepg_tbl[i].socket_id = socket_id;
554 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
556 "Hugepage %s is on socket %d\n",
557 hugepg_tbl[i].filepath, socket_id);
563 if (hp_count < hpi->num_pages[0])
575 cmp_physaddr(const void *a, const void *b)
577 #ifndef RTE_ARCH_PPC_64
578 const struct hugepage_file *p1 = a;
579 const struct hugepage_file *p2 = b;
581 /* PowerPC needs memory sorted in reverse order from x86 */
582 const struct hugepage_file *p1 = b;
583 const struct hugepage_file *p2 = a;
585 if (p1->physaddr < p2->physaddr)
587 else if (p1->physaddr > p2->physaddr)
594 * Uses mmap to create a shared memory area for storage of data
595 * Used in this file to store the hugepage file map on disk
598 create_shared_memory(const char *filename, const size_t mem_size)
601 int fd = open(filename, O_CREAT | O_RDWR, 0666);
604 if (ftruncate(fd, mem_size) < 0) {
608 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
610 if (retval == MAP_FAILED)
616 * this copies *active* hugepages from one hugepage table to another.
617 * destination is typically the shared memory.
620 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
621 const struct hugepage_file * src, int src_size)
623 int src_pos, dst_pos = 0;
625 for (src_pos = 0; src_pos < src_size; src_pos++) {
626 if (src[src_pos].final_va != NULL) {
627 /* error on overflow attempt */
628 if (dst_pos == dest_size)
630 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
638 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
639 unsigned num_hp_info)
641 unsigned socket, size;
642 int page, nrpages = 0;
644 /* get total number of hugepages */
645 for (size = 0; size < num_hp_info; size++)
646 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
648 internal_config.hugepage_info[size].num_pages[socket];
650 for (page = 0; page < nrpages; page++) {
651 struct hugepage_file *hp = &hugepg_tbl[page];
653 if (hp->final_va != NULL && unlink(hp->filepath)) {
654 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
655 __func__, hp->filepath, strerror(errno));
662 * unmaps hugepages that are not going to be used. since we originally allocate
663 * ALL hugepages (not just those we need), additional unmapping needs to be done.
666 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
667 struct hugepage_info *hpi,
668 unsigned num_hp_info)
670 unsigned socket, size;
671 int page, nrpages = 0;
673 /* get total number of hugepages */
674 for (size = 0; size < num_hp_info; size++)
675 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
676 nrpages += internal_config.hugepage_info[size].num_pages[socket];
678 for (size = 0; size < num_hp_info; size++) {
679 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
680 unsigned pages_found = 0;
682 /* traverse until we have unmapped all the unused pages */
683 for (page = 0; page < nrpages; page++) {
684 struct hugepage_file *hp = &hugepg_tbl[page];
686 /* find a page that matches the criteria */
687 if ((hp->size == hpi[size].hugepage_sz) &&
688 (hp->socket_id == (int) socket)) {
690 /* if we skipped enough pages, unmap the rest */
691 if (pages_found == hpi[size].num_pages[socket]) {
694 unmap_len = hp->size;
696 /* get start addr and len of the remaining segment */
697 munmap(hp->final_va, (size_t) unmap_len);
700 if (unlink(hp->filepath) == -1) {
701 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
702 __func__, hp->filepath, strerror(errno));
706 /* lock the page and skip */
712 } /* foreach socket */
713 } /* foreach pagesize */
718 static inline uint64_t
719 get_socket_mem_size(int socket)
724 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
725 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
726 if (hpi->hugedir != NULL)
727 size += hpi->hugepage_sz * hpi->num_pages[socket];
734 * This function is a NUMA-aware equivalent of calc_num_pages.
735 * It takes in the list of hugepage sizes and the
736 * number of pages thereof, and calculates the best number of
737 * pages of each size to fulfill the request for <memory> ram
740 calc_num_pages_per_socket(uint64_t * memory,
741 struct hugepage_info *hp_info,
742 struct hugepage_info *hp_used,
743 unsigned num_hp_info)
745 unsigned socket, j, i = 0;
746 unsigned requested, available;
747 int total_num_pages = 0;
748 uint64_t remaining_mem, cur_mem;
749 uint64_t total_mem = internal_config.memory;
751 if (num_hp_info == 0)
754 /* if specific memory amounts per socket weren't requested */
755 if (internal_config.force_sockets == 0) {
756 int cpu_per_socket[RTE_MAX_NUMA_NODES];
757 size_t default_size, total_size;
760 /* Compute number of cores per socket */
761 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
762 RTE_LCORE_FOREACH(lcore_id) {
763 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
767 * Automatically spread requested memory amongst detected sockets according
768 * to number of cores from cpu mask present on each socket
770 total_size = internal_config.memory;
771 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
773 /* Set memory amount per socket */
774 default_size = (internal_config.memory * cpu_per_socket[socket])
777 /* Limit to maximum available memory on socket */
778 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
781 memory[socket] = default_size;
782 total_size -= default_size;
786 * If some memory is remaining, try to allocate it by getting all
787 * available memory from sockets, one after the other
789 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
790 /* take whatever is available */
791 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
795 memory[socket] += default_size;
796 total_size -= default_size;
800 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
801 /* skips if the memory on specific socket wasn't requested */
802 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
803 hp_used[i].hugedir = hp_info[i].hugedir;
804 hp_used[i].num_pages[socket] = RTE_MIN(
805 memory[socket] / hp_info[i].hugepage_sz,
806 hp_info[i].num_pages[socket]);
808 cur_mem = hp_used[i].num_pages[socket] *
809 hp_used[i].hugepage_sz;
811 memory[socket] -= cur_mem;
812 total_mem -= cur_mem;
814 total_num_pages += hp_used[i].num_pages[socket];
816 /* check if we have met all memory requests */
817 if (memory[socket] == 0)
820 /* check if we have any more pages left at this size, if so
821 * move on to next size */
822 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
824 /* At this point we know that there are more pages available that are
825 * bigger than the memory we want, so lets see if we can get enough
826 * from other page sizes.
829 for (j = i+1; j < num_hp_info; j++)
830 remaining_mem += hp_info[j].hugepage_sz *
831 hp_info[j].num_pages[socket];
833 /* is there enough other memory, if not allocate another page and quit */
834 if (remaining_mem < memory[socket]){
835 cur_mem = RTE_MIN(memory[socket],
836 hp_info[i].hugepage_sz);
837 memory[socket] -= cur_mem;
838 total_mem -= cur_mem;
839 hp_used[i].num_pages[socket]++;
841 break; /* we are done with this socket*/
844 /* if we didn't satisfy all memory requirements per socket */
845 if (memory[socket] > 0) {
846 /* to prevent icc errors */
847 requested = (unsigned) (internal_config.socket_mem[socket] /
849 available = requested -
850 ((unsigned) (memory[socket] / 0x100000));
851 RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
852 "Requested: %uMB, available: %uMB\n", socket,
853 requested, available);
858 /* if we didn't satisfy total memory requirements */
860 requested = (unsigned) (internal_config.memory / 0x100000);
861 available = requested - (unsigned) (total_mem / 0x100000);
862 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
863 " available: %uMB\n", requested, available);
866 return total_num_pages;
870 eal_get_hugepage_mem_size(void)
875 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
876 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
877 if (hpi->hugedir != NULL) {
878 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
879 size += hpi->hugepage_sz * hpi->num_pages[j];
884 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
887 static struct sigaction huge_action_old;
888 static int huge_need_recover;
891 huge_register_sigbus(void)
894 struct sigaction action;
897 sigaddset(&mask, SIGBUS);
899 action.sa_mask = mask;
900 action.sa_handler = huge_sigbus_handler;
902 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
906 huge_recover_sigbus(void)
908 if (huge_need_recover) {
909 sigaction(SIGBUS, &huge_action_old, NULL);
910 huge_need_recover = 0;
915 * Prepare physical memory mapping: fill configuration structure with
916 * these infos, return 0 on success.
917 * 1. map N huge pages in separate files in hugetlbfs
918 * 2. find associated physical addr
919 * 3. find associated NUMA socket ID
920 * 4. sort all huge pages by physical address
921 * 5. remap these N huge pages in the correct order
922 * 6. unmap the first mapping
923 * 7. fill memsegs in configuration with contiguous zones
926 rte_eal_hugepage_init(void)
928 struct rte_mem_config *mcfg;
929 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
930 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
932 uint64_t memory[RTE_MAX_NUMA_NODES];
935 int i, j, new_memseg;
936 int nr_hugefiles, nr_hugepages = 0;
939 test_phys_addrs_available();
941 memset(used_hp, 0, sizeof(used_hp));
943 /* get pointer to global configuration */
944 mcfg = rte_eal_get_configuration()->mem_config;
946 /* hugetlbfs can be disabled */
947 if (internal_config.no_hugetlbfs) {
948 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
949 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
950 if (addr == MAP_FAILED) {
951 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
955 if (rte_eal_iova_mode() == RTE_IOVA_VA)
956 mcfg->memseg[0].iova = (uintptr_t)addr;
958 mcfg->memseg[0].iova = RTE_BAD_IOVA;
959 mcfg->memseg[0].addr = addr;
960 mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
961 mcfg->memseg[0].len = internal_config.memory;
962 mcfg->memseg[0].socket_id = 0;
966 /* calculate total number of hugepages available. at this point we haven't
967 * yet started sorting them so they all are on socket 0 */
968 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
969 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
970 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
972 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
976 * allocate a memory area for hugepage table.
977 * this isn't shared memory yet. due to the fact that we need some
978 * processing done on these pages, shared memory will be created
981 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
985 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
987 hp_offset = 0; /* where we start the current page size entries */
989 huge_register_sigbus();
991 /* make a copy of socket_mem, needed for balanced allocation. */
992 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
993 memory[i] = internal_config.socket_mem[i];
996 /* map all hugepages and sort them */
997 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
998 unsigned pages_old, pages_new;
999 struct hugepage_info *hpi;
1002 * we don't yet mark hugepages as used at this stage, so
1003 * we just map all hugepages available to the system
1004 * all hugepages are still located on socket 0
1006 hpi = &internal_config.hugepage_info[i];
1008 if (hpi->num_pages[0] == 0)
1011 /* map all hugepages available */
1012 pages_old = hpi->num_pages[0];
1013 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
1015 if (pages_new < pages_old) {
1017 "%d not %d hugepages of size %u MB allocated\n",
1018 pages_new, pages_old,
1019 (unsigned)(hpi->hugepage_sz / 0x100000));
1021 int pages = pages_old - pages_new;
1023 nr_hugepages -= pages;
1024 hpi->num_pages[0] = pages_new;
1029 if (phys_addrs_available &&
1030 rte_eal_iova_mode() != RTE_IOVA_VA) {
1031 /* find physical addresses for each hugepage */
1032 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1033 RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1034 "for %u MB pages\n",
1035 (unsigned int)(hpi->hugepage_sz / 0x100000));
1039 /* set physical addresses for each hugepage */
1040 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1041 RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1042 "for %u MB pages\n",
1043 (unsigned int)(hpi->hugepage_sz / 0x100000));
1048 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1049 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1050 (unsigned)(hpi->hugepage_sz / 0x100000));
1054 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1055 sizeof(struct hugepage_file), cmp_physaddr);
1057 /* remap all hugepages */
1058 if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
1059 hpi->num_pages[0]) {
1060 RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
1061 (unsigned)(hpi->hugepage_sz / 0x100000));
1065 /* unmap original mappings */
1066 if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
1069 /* we have processed a num of hugepages of this size, so inc offset */
1070 hp_offset += hpi->num_pages[0];
1073 huge_recover_sigbus();
1075 if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1076 internal_config.memory = eal_get_hugepage_mem_size();
1078 nr_hugefiles = nr_hugepages;
1081 /* clean out the numbers of pages */
1082 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1083 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1084 internal_config.hugepage_info[i].num_pages[j] = 0;
1086 /* get hugepages for each socket */
1087 for (i = 0; i < nr_hugefiles; i++) {
1088 int socket = tmp_hp[i].socket_id;
1090 /* find a hugepage info with right size and increment num_pages */
1091 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1092 (int)internal_config.num_hugepage_sizes);
1093 for (j = 0; j < nb_hpsizes; j++) {
1094 if (tmp_hp[i].size ==
1095 internal_config.hugepage_info[j].hugepage_sz) {
1096 internal_config.hugepage_info[j].num_pages[socket]++;
1101 /* make a copy of socket_mem, needed for number of pages calculation */
1102 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1103 memory[i] = internal_config.socket_mem[i];
1105 /* calculate final number of pages */
1106 nr_hugepages = calc_num_pages_per_socket(memory,
1107 internal_config.hugepage_info, used_hp,
1108 internal_config.num_hugepage_sizes);
1110 /* error if not enough memory available */
1111 if (nr_hugepages < 0)
1115 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1116 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1117 if (used_hp[i].num_pages[j] > 0) {
1119 "Requesting %u pages of size %uMB"
1120 " from socket %i\n",
1121 used_hp[i].num_pages[j],
1123 (used_hp[i].hugepage_sz / 0x100000),
1129 /* create shared memory */
1130 hugepage = create_shared_memory(eal_hugepage_info_path(),
1131 nr_hugefiles * sizeof(struct hugepage_file));
1133 if (hugepage == NULL) {
1134 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1137 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1140 * unmap pages that we won't need (looks at used_hp).
1141 * also, sets final_va to NULL on pages that were unmapped.
1143 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1144 internal_config.num_hugepage_sizes) < 0) {
1145 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1150 * copy stuff from malloc'd hugepage* to the actual shared memory.
1151 * this procedure only copies those hugepages that have final_va
1152 * not NULL. has overflow protection.
1154 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1155 tmp_hp, nr_hugefiles) < 0) {
1156 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1160 /* free the hugepage backing files */
1161 if (internal_config.hugepage_unlink &&
1162 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1163 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1167 /* free the temporary hugepage table */
1171 /* first memseg index shall be 0 after incrementing it below */
1173 for (i = 0; i < nr_hugefiles; i++) {
1176 /* if this is a new section, create a new memseg */
1179 else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
1181 else if (hugepage[i].size != hugepage[i-1].size)
1184 #ifdef RTE_ARCH_PPC_64
1185 /* On PPC64 architecture, the mmap always start from higher
1186 * virtual address to lower address. Here, both the physical
1187 * address and virtual address are in descending order */
1188 else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
1191 else if (((unsigned long)hugepage[i-1].final_va -
1192 (unsigned long)hugepage[i].final_va) != hugepage[i].size)
1195 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
1198 else if (((unsigned long)hugepage[i].final_va -
1199 (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
1205 if (j == RTE_MAX_MEMSEG)
1208 mcfg->memseg[j].iova = hugepage[i].physaddr;
1209 mcfg->memseg[j].addr = hugepage[i].final_va;
1210 mcfg->memseg[j].len = hugepage[i].size;
1211 mcfg->memseg[j].socket_id = hugepage[i].socket_id;
1212 mcfg->memseg[j].hugepage_sz = hugepage[i].size;
1214 /* continuation of previous memseg */
1216 #ifdef RTE_ARCH_PPC_64
1217 /* Use the phy and virt address of the last page as segment
1218 * address for IBM Power architecture */
1219 mcfg->memseg[j].iova = hugepage[i].physaddr;
1220 mcfg->memseg[j].addr = hugepage[i].final_va;
1222 mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
1224 hugepage[i].memseg_id = j;
1227 if (i < nr_hugefiles) {
1228 RTE_LOG(ERR, EAL, "Can only reserve %d pages "
1229 "from %d requested\n"
1230 "Current %s=%d is not enough\n"
1231 "Please either increase it or request less amount "
1233 i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
1238 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1243 huge_recover_sigbus();
1245 if (hugepage != NULL)
1246 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1252 * uses fstat to report the size of a file on disk
1258 if (fstat(fd, &st) < 0)
1264 * This creates the memory mappings in the secondary process to match that of
1265 * the server process. It goes through each memory segment in the DPDK runtime
1266 * configuration and finds the hugepages which form that segment, mapping them
1267 * in order to form a contiguous block in the virtual memory space
1270 rte_eal_hugepage_attach(void)
1272 const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1273 struct hugepage_file *hp = NULL;
1274 unsigned num_hp = 0;
1275 unsigned i, s = 0; /* s used to track the segment number */
1276 unsigned max_seg = RTE_MAX_MEMSEG;
1278 int fd, fd_hugepage = -1;
1280 if (aslr_enabled() > 0) {
1281 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1282 "(ASLR) is enabled in the kernel.\n");
1283 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1284 "into secondary processes\n");
1287 test_phys_addrs_available();
1289 fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1290 if (fd_hugepage < 0) {
1291 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
1295 /* map all segments into memory to make sure we get the addrs */
1296 for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
1302 * the first memory segment with len==0 is the one that
1303 * follows the last valid segment.
1305 if (mcfg->memseg[s].len == 0)
1308 /* get identical addresses as the primary process.
1310 #ifdef RTE_ARCH_PPC_64
1311 mmap_flags |= MAP_HUGETLB;
1313 mmap_sz = mcfg->memseg[s].len;
1314 base_addr = eal_get_virtual_area(mcfg->memseg[s].addr,
1315 &mmap_sz, mcfg->memseg[s].hugepage_sz, 0,
1317 if (base_addr == NULL) {
1319 if (rte_errno == EADDRNOTAVAIL) {
1320 RTE_LOG(ERR, EAL, "Could not mmap %zu bytes at [%p] - please use '--base-virtaddr' option\n",
1321 mcfg->memseg[s].len,
1322 mcfg->memseg[s].addr);
1324 RTE_LOG(ERR, EAL, "Could not mmap %zu bytes at [%p]: '%s'\n",
1325 mcfg->memseg[s].len,
1326 mcfg->memseg[s].addr,
1327 rte_strerror(rte_errno));
1329 if (aslr_enabled() > 0) {
1330 RTE_LOG(ERR, EAL, "It is recommended to "
1331 "disable ASLR in the kernel "
1332 "and retry running both primary "
1333 "and secondary processes\n");
1339 size = getFileSize(fd_hugepage);
1340 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1341 if (hp == MAP_FAILED) {
1342 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
1346 num_hp = size / sizeof(struct hugepage_file);
1347 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1350 while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
1351 void *addr, *base_addr;
1352 uintptr_t offset = 0;
1353 size_t mapping_size;
1355 * free previously mapped memory so we can map the
1356 * hugepages into the space
1358 base_addr = mcfg->memseg[s].addr;
1359 munmap(base_addr, mcfg->memseg[s].len);
1361 /* find the hugepages for this segment and map them
1362 * we don't need to worry about order, as the server sorted the
1363 * entries before it did the second mmap of them */
1364 for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
1365 if (hp[i].memseg_id == (int)s){
1366 fd = open(hp[i].filepath, O_RDWR);
1368 RTE_LOG(ERR, EAL, "Could not open %s\n",
1372 mapping_size = hp[i].size;
1373 addr = mmap(RTE_PTR_ADD(base_addr, offset),
1374 mapping_size, PROT_READ | PROT_WRITE,
1376 close(fd); /* close file both on success and on failure */
1377 if (addr == MAP_FAILED ||
1378 addr != RTE_PTR_ADD(base_addr, offset)) {
1379 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1383 offset+=mapping_size;
1386 RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
1387 (unsigned long long)mcfg->memseg[s].len);
1390 /* unmap the hugepage config file, since we are done using it */
1396 for (i = 0; i < max_seg && mcfg->memseg[i].len > 0; i++)
1397 munmap(mcfg->memseg[i].addr, mcfg->memseg[i].len);
1398 if (hp != NULL && hp != MAP_FAILED)
1400 if (fd_hugepage >= 0)
1406 rte_eal_using_phys_addrs(void)
1408 return phys_addrs_available;