4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright(c) 2013 6WIND.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name of 6WIND S.A. nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 #define _FILE_OFFSET_BITS 64
74 #include <sys/types.h>
76 #include <sys/queue.h>
81 #include <sys/ioctl.h>
87 #include <rte_memory.h>
88 #include <rte_memzone.h>
89 #include <rte_launch.h>
91 #include <rte_eal_memconfig.h>
92 #include <rte_per_lcore.h>
93 #include <rte_lcore.h>
94 #include <rte_common.h>
95 #include <rte_string_fns.h>
97 #include "eal_private.h"
98 #include "eal_internal_cfg.h"
99 #include "eal_filesystem.h"
100 #include "eal_hugepages.h"
102 #define PFN_MASK_SIZE 8
104 #ifdef RTE_LIBRTE_XEN_DOM0
105 int rte_xen_dom0_supported(void)
107 return internal_config.xen_dom0_support;
113 * Huge page mapping under linux
115 * To reserve a big contiguous amount of memory, we use the hugepage
116 * feature of linux. For that, we need to have hugetlbfs mounted. This
117 * code will create many files in this directory (one per page) and
118 * map them in virtual memory. For each page, we will retrieve its
119 * physical address and remap it in order to have a virtual contiguous
120 * zone as well as a physical contiguous zone.
123 static uint64_t baseaddr_offset;
125 static unsigned proc_pagemap_readable;
127 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
130 test_proc_pagemap_readable(void)
132 int fd = open("/proc/self/pagemap", O_RDONLY);
136 "Cannot open /proc/self/pagemap: %s. "
137 "virt2phys address translation will not work\n",
144 proc_pagemap_readable = 1;
147 /* Lock page in physical memory and prevent from swapping. */
149 rte_mem_lock_page(const void *virt)
151 unsigned long virtual = (unsigned long)virt;
152 int page_size = getpagesize();
153 unsigned long aligned = (virtual & ~ (page_size - 1));
154 return mlock((void*)aligned, page_size);
158 * Get physical address of any mapped virtual address in the current process.
161 rte_mem_virt2phy(const void *virtaddr)
164 uint64_t page, physaddr;
165 unsigned long virt_pfn;
169 /* when using dom0, /proc/self/pagemap always returns 0, check in
170 * dpdk memory by browsing the memsegs */
171 if (rte_xen_dom0_supported()) {
172 struct rte_mem_config *mcfg;
173 struct rte_memseg *memseg;
176 mcfg = rte_eal_get_configuration()->mem_config;
177 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
178 memseg = &mcfg->memseg[i];
179 if (memseg->addr == NULL)
181 if (virtaddr > memseg->addr &&
182 virtaddr < RTE_PTR_ADD(memseg->addr,
184 return memseg->phys_addr +
185 RTE_PTR_DIFF(virtaddr, memseg->addr);
189 return RTE_BAD_PHYS_ADDR;
192 /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
193 if (!proc_pagemap_readable)
194 return RTE_BAD_PHYS_ADDR;
196 /* standard page size */
197 page_size = getpagesize();
199 fd = open("/proc/self/pagemap", O_RDONLY);
201 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
202 __func__, strerror(errno));
203 return RTE_BAD_PHYS_ADDR;
206 virt_pfn = (unsigned long)virtaddr / page_size;
207 offset = sizeof(uint64_t) * virt_pfn;
208 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
209 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
210 __func__, strerror(errno));
212 return RTE_BAD_PHYS_ADDR;
215 retval = read(fd, &page, PFN_MASK_SIZE);
218 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
219 __func__, strerror(errno));
220 return RTE_BAD_PHYS_ADDR;
221 } else if (retval != PFN_MASK_SIZE) {
222 RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
223 "but expected %d:\n",
224 __func__, retval, PFN_MASK_SIZE);
225 return RTE_BAD_PHYS_ADDR;
229 * the pfn (page frame number) are bits 0-54 (see
230 * pagemap.txt in linux Documentation)
232 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
233 + ((unsigned long)virtaddr % page_size);
239 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
240 * it by browsing the /proc/self/pagemap special file.
243 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
248 for (i = 0; i < hpi->num_pages[0]; i++) {
249 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
250 if (addr == RTE_BAD_PHYS_ADDR)
252 hugepg_tbl[i].physaddr = addr;
258 * Check whether address-space layout randomization is enabled in
259 * the kernel. This is important for multi-process as it can prevent
260 * two processes mapping data to the same virtual address
262 * 0 - address space randomization disabled
263 * 1/2 - address space randomization enabled
264 * negative error code on error
270 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
273 retval = read(fd, &c, 1);
283 default: return -EINVAL;
288 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
289 * pointer to the mmap'd area and keep *size unmodified. Else, retry
290 * with a smaller zone: decrease *size by hugepage_sz until it reaches
291 * 0. In this case, return NULL. Note: this function returns an address
292 * which is a multiple of hugepage size.
295 get_virtual_area(size_t *size, size_t hugepage_sz)
301 if (internal_config.base_virtaddr != 0) {
302 addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
307 RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
309 fd = open("/dev/zero", O_RDONLY);
311 RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
316 (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
317 if (addr == MAP_FAILED)
318 *size -= hugepage_sz;
319 } while (addr == MAP_FAILED && *size > 0);
321 if (addr == MAP_FAILED) {
323 RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
328 munmap(addr, (*size) + hugepage_sz);
331 /* align addr to a huge page size boundary */
332 aligned_addr = (long)addr;
333 aligned_addr += (hugepage_sz - 1);
334 aligned_addr &= (~(hugepage_sz - 1));
335 addr = (void *)(aligned_addr);
337 RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
340 /* increment offset */
341 baseaddr_offset += *size;
346 static sigjmp_buf huge_jmpenv;
348 static void huge_sigbus_handler(int signo __rte_unused)
350 siglongjmp(huge_jmpenv, 1);
353 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
354 * non-static local variable in the stack frame calling sigsetjmp might be
355 * clobbered by a call to longjmp.
357 static int huge_wrap_sigsetjmp(void)
359 return sigsetjmp(huge_jmpenv, 1);
363 * Mmap all hugepages of hugepage table: it first open a file in
364 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
365 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
366 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
367 * map continguous physical blocks in contiguous virtual blocks.
370 map_all_hugepages(struct hugepage_file *hugepg_tbl,
371 struct hugepage_info *hpi, int orig)
376 void *vma_addr = NULL;
379 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
380 RTE_SET_USED(vma_len);
383 for (i = 0; i < hpi->num_pages[0]; i++) {
384 uint64_t hugepage_sz = hpi->hugepage_sz;
387 hugepg_tbl[i].file_id = i;
388 hugepg_tbl[i].size = hugepage_sz;
389 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
390 eal_get_hugefile_temp_path(hugepg_tbl[i].filepath,
391 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
392 hugepg_tbl[i].file_id);
394 eal_get_hugefile_path(hugepg_tbl[i].filepath,
395 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
396 hugepg_tbl[i].file_id);
398 hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
401 /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
402 * original map address as final map address.
404 else if ((hugepage_sz == RTE_PGSIZE_1G)
405 || (hugepage_sz == RTE_PGSIZE_16G)) {
406 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
407 hugepg_tbl[i].orig_va = NULL;
412 #ifndef RTE_EAL_SINGLE_FILE_SEGMENTS
413 else if (vma_len == 0) {
414 unsigned j, num_pages;
416 /* reserve a virtual area for next contiguous
417 * physical block: count the number of
418 * contiguous physical pages. */
419 for (j = i+1; j < hpi->num_pages[0] ; j++) {
420 #ifdef RTE_ARCH_PPC_64
421 /* The physical addresses are sorted in
422 * descending order on PPC64 */
423 if (hugepg_tbl[j].physaddr !=
424 hugepg_tbl[j-1].physaddr - hugepage_sz)
427 if (hugepg_tbl[j].physaddr !=
428 hugepg_tbl[j-1].physaddr + hugepage_sz)
433 vma_len = num_pages * hugepage_sz;
435 /* get the biggest virtual memory area up to
436 * vma_len. If it fails, vma_addr is NULL, so
437 * let the kernel provide the address. */
438 vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
439 if (vma_addr == NULL)
440 vma_len = hugepage_sz;
444 /* try to create hugepage file */
445 fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
447 RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
452 /* map the segment, and populate page tables,
453 * the kernel fills this segment with zeros */
454 virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
455 MAP_SHARED | MAP_POPULATE, fd, 0);
456 if (virtaddr == MAP_FAILED) {
457 RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
464 hugepg_tbl[i].orig_va = virtaddr;
467 hugepg_tbl[i].final_va = virtaddr;
471 /* In linux, hugetlb limitations, like cgroup, are
472 * enforced at fault time instead of mmap(), even
473 * with the option of MAP_POPULATE. Kernel will send
474 * a SIGBUS signal. To avoid to be killed, save stack
475 * environment here, if SIGBUS happens, we can jump
478 if (huge_wrap_sigsetjmp()) {
479 RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
480 "hugepages of size %u MB\n",
481 (unsigned)(hugepage_sz / 0x100000));
482 munmap(virtaddr, hugepage_sz);
484 unlink(hugepg_tbl[i].filepath);
487 *(int *)virtaddr = 0;
491 /* set shared flock on the file. */
492 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
493 RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
494 __func__, strerror(errno));
501 vma_addr = (char *)vma_addr + hugepage_sz;
502 vma_len -= hugepage_sz;
508 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
511 * Remaps all hugepages into single file segments
514 remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
517 unsigned i = 0, j, num_pages, page_idx = 0;
518 void *vma_addr = NULL, *old_addr = NULL, *page_addr = NULL;
520 size_t hugepage_sz = hpi->hugepage_sz;
521 size_t total_size, offset;
522 char filepath[MAX_HUGEPAGE_PATH];
523 phys_addr_t physaddr;
526 while (i < hpi->num_pages[0]) {
529 /* for 32-bit systems, don't remap 1G pages and 16G pages,
530 * just reuse original map address as final map address.
532 if ((hugepage_sz == RTE_PGSIZE_1G)
533 || (hugepage_sz == RTE_PGSIZE_16G)) {
534 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
535 hugepg_tbl[i].orig_va = NULL;
541 /* reserve a virtual area for next contiguous
542 * physical block: count the number of
543 * contiguous physical pages. */
544 for (j = i+1; j < hpi->num_pages[0] ; j++) {
545 #ifdef RTE_ARCH_PPC_64
546 /* The physical addresses are sorted in descending
548 if (hugepg_tbl[j].physaddr !=
549 hugepg_tbl[j-1].physaddr - hugepage_sz)
552 if (hugepg_tbl[j].physaddr !=
553 hugepg_tbl[j-1].physaddr + hugepage_sz)
558 vma_len = num_pages * hugepage_sz;
560 socket = hugepg_tbl[i].socket_id;
562 /* get the biggest virtual memory area up to
563 * vma_len. If it fails, vma_addr is NULL, so
564 * let the kernel provide the address. */
565 vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
567 /* If we can't find a big enough virtual area, work out how many pages
568 * we are going to get */
569 if (vma_addr == NULL)
571 else if (vma_len != num_pages * hugepage_sz) {
572 num_pages = vma_len / hugepage_sz;
577 hugepg_tbl[page_idx].file_id = page_idx;
578 eal_get_hugefile_path(filepath,
581 hugepg_tbl[page_idx].file_id);
583 /* try to create hugepage file */
584 fd = open(filepath, O_CREAT | O_RDWR, 0600);
586 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__, strerror(errno));
593 /* unmap current segment */
595 munmap(vma_addr, total_size);
597 /* unmap original page */
598 munmap(hugepg_tbl[i].orig_va, hugepage_sz);
599 unlink(hugepg_tbl[i].filepath);
601 total_size += hugepage_sz;
605 /* map new, bigger segment, and populate page tables,
606 * the kernel fills this segment with zeros */
607 vma_addr = mmap(vma_addr, total_size,
608 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, 0);
610 if (vma_addr == MAP_FAILED || vma_addr != old_addr) {
611 RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno));
617 /* set shared flock on the file. */
618 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
619 RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
620 __func__, strerror(errno));
625 snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s",
628 physaddr = rte_mem_virt2phy(vma_addr);
630 if (physaddr == RTE_BAD_PHYS_ADDR)
633 hugepg_tbl[page_idx].final_va = vma_addr;
635 hugepg_tbl[page_idx].physaddr = physaddr;
637 hugepg_tbl[page_idx].repeated = num_pages;
639 hugepg_tbl[page_idx].socket_id = socket;
643 /* verify the memory segment - that is, check that every VA corresponds
644 * to the physical address we expect to see
646 for (offset = 0; offset < vma_len; offset += hugepage_sz) {
647 uint64_t expected_physaddr;
649 expected_physaddr = hugepg_tbl[page_idx].physaddr + offset;
650 page_addr = RTE_PTR_ADD(vma_addr, offset);
651 physaddr = rte_mem_virt2phy(page_addr);
653 if (physaddr != expected_physaddr) {
654 RTE_LOG(ERR, EAL, "Segment sanity check failed: wrong physaddr "
655 "at %p (offset 0x%" PRIx64 ": 0x%" PRIx64
656 " (expected 0x%" PRIx64 ")\n",
657 page_addr, offset, physaddr, expected_physaddr);
665 /* zero out the rest */
666 memset(&hugepg_tbl[page_idx], 0, (hpi->num_pages[0] - page_idx) * sizeof(struct hugepage_file));
669 #else/* RTE_EAL_SINGLE_FILE_SEGMENTS=n */
671 /* Unmap all hugepages from original mapping */
673 unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
676 for (i = 0; i < hpi->num_pages[0]; i++) {
677 if (hugepg_tbl[i].orig_va) {
678 munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
679 hugepg_tbl[i].orig_va = NULL;
684 #endif /* RTE_EAL_SINGLE_FILE_SEGMENTS */
687 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
691 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
695 unsigned i, hp_count = 0;
698 char hugedir_str[PATH_MAX];
701 f = fopen("/proc/self/numa_maps", "r");
703 RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps,"
704 " consider that all memory is in socket_id 0\n");
708 snprintf(hugedir_str, sizeof(hugedir_str),
709 "%s/%s", hpi->hugedir, internal_config.hugefile_prefix);
712 while (fgets(buf, sizeof(buf), f) != NULL) {
714 /* ignore non huge page */
715 if (strstr(buf, " huge ") == NULL &&
716 strstr(buf, hugedir_str) == NULL)
720 virt_addr = strtoull(buf, &end, 16);
721 if (virt_addr == 0 || end == buf) {
722 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
726 /* get node id (socket id) */
727 nodestr = strstr(buf, " N");
728 if (nodestr == NULL) {
729 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
733 end = strstr(nodestr, "=");
735 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
741 socket_id = strtoul(nodestr, &end, 0);
742 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
743 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
747 /* if we find this page in our mappings, set socket_id */
748 for (i = 0; i < hpi->num_pages[0]; i++) {
749 void *va = (void *)(unsigned long)virt_addr;
750 if (hugepg_tbl[i].orig_va == va) {
751 hugepg_tbl[i].socket_id = socket_id;
757 if (hp_count < hpi->num_pages[0])
769 cmp_physaddr(const void *a, const void *b)
771 #ifndef RTE_ARCH_PPC_64
772 const struct hugepage_file *p1 = (const struct hugepage_file *)a;
773 const struct hugepage_file *p2 = (const struct hugepage_file *)b;
775 /* PowerPC needs memory sorted in reverse order from x86 */
776 const struct hugepage_file *p1 = (const struct hugepage_file *)b;
777 const struct hugepage_file *p2 = (const struct hugepage_file *)a;
779 if (p1->physaddr < p2->physaddr)
781 else if (p1->physaddr > p2->physaddr)
788 * Uses mmap to create a shared memory area for storage of data
789 * Used in this file to store the hugepage file map on disk
792 create_shared_memory(const char *filename, const size_t mem_size)
795 int fd = open(filename, O_CREAT | O_RDWR, 0666);
798 if (ftruncate(fd, mem_size) < 0) {
802 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
808 * this copies *active* hugepages from one hugepage table to another.
809 * destination is typically the shared memory.
812 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
813 const struct hugepage_file * src, int src_size)
815 int src_pos, dst_pos = 0;
817 for (src_pos = 0; src_pos < src_size; src_pos++) {
818 if (src[src_pos].final_va != NULL) {
819 /* error on overflow attempt */
820 if (dst_pos == dest_size)
822 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
830 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
831 unsigned num_hp_info)
833 unsigned socket, size;
834 int page, nrpages = 0;
836 /* get total number of hugepages */
837 for (size = 0; size < num_hp_info; size++)
838 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
840 internal_config.hugepage_info[size].num_pages[socket];
842 for (page = 0; page < nrpages; page++) {
843 struct hugepage_file *hp = &hugepg_tbl[page];
845 if (hp->final_va != NULL && unlink(hp->filepath)) {
846 RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
847 __func__, hp->filepath, strerror(errno));
854 * unmaps hugepages that are not going to be used. since we originally allocate
855 * ALL hugepages (not just those we need), additional unmapping needs to be done.
858 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
859 struct hugepage_info *hpi,
860 unsigned num_hp_info)
862 unsigned socket, size;
863 int page, nrpages = 0;
865 /* get total number of hugepages */
866 for (size = 0; size < num_hp_info; size++)
867 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
868 nrpages += internal_config.hugepage_info[size].num_pages[socket];
870 for (size = 0; size < num_hp_info; size++) {
871 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
872 unsigned pages_found = 0;
874 /* traverse until we have unmapped all the unused pages */
875 for (page = 0; page < nrpages; page++) {
876 struct hugepage_file *hp = &hugepg_tbl[page];
878 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
879 /* if this page was already cleared */
880 if (hp->final_va == NULL)
884 /* find a page that matches the criteria */
885 if ((hp->size == hpi[size].hugepage_sz) &&
886 (hp->socket_id == (int) socket)) {
888 /* if we skipped enough pages, unmap the rest */
889 if (pages_found == hpi[size].num_pages[socket]) {
892 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
893 unmap_len = hp->size * hp->repeated;
895 unmap_len = hp->size;
898 /* get start addr and len of the remaining segment */
899 munmap(hp->final_va, (size_t) unmap_len);
902 if (unlink(hp->filepath) == -1) {
903 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
904 __func__, hp->filepath, strerror(errno));
908 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
909 /* else, check how much do we need to map */
912 hpi[size].num_pages[socket] - pages_found;
914 /* if we need enough memory to fit into the segment */
915 if (hp->repeated <= nr_pg_left) {
916 pages_found += hp->repeated;
918 /* truncate the segment */
920 uint64_t final_size = nr_pg_left * hp->size;
921 uint64_t seg_size = hp->repeated * hp->size;
923 void * unmap_va = RTE_PTR_ADD(hp->final_va,
927 munmap(unmap_va, seg_size - final_size);
929 fd = open(hp->filepath, O_RDWR);
931 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
932 hp->filepath, strerror(errno));
935 if (ftruncate(fd, final_size) < 0) {
936 RTE_LOG(ERR, EAL, "Cannot truncate %s: %s\n",
937 hp->filepath, strerror(errno));
942 pages_found += nr_pg_left;
943 hp->repeated = nr_pg_left;
947 /* else, lock the page and skip */
954 } /* foreach socket */
955 } /* foreach pagesize */
960 static inline uint64_t
961 get_socket_mem_size(int socket)
966 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
967 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
968 if (hpi->hugedir != NULL)
969 size += hpi->hugepage_sz * hpi->num_pages[socket];
976 * This function is a NUMA-aware equivalent of calc_num_pages.
977 * It takes in the list of hugepage sizes and the
978 * number of pages thereof, and calculates the best number of
979 * pages of each size to fulfill the request for <memory> ram
982 calc_num_pages_per_socket(uint64_t * memory,
983 struct hugepage_info *hp_info,
984 struct hugepage_info *hp_used,
985 unsigned num_hp_info)
987 unsigned socket, j, i = 0;
988 unsigned requested, available;
989 int total_num_pages = 0;
990 uint64_t remaining_mem, cur_mem;
991 uint64_t total_mem = internal_config.memory;
993 if (num_hp_info == 0)
996 /* if specific memory amounts per socket weren't requested */
997 if (internal_config.force_sockets == 0) {
998 int cpu_per_socket[RTE_MAX_NUMA_NODES];
999 size_t default_size, total_size;
1002 /* Compute number of cores per socket */
1003 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
1004 RTE_LCORE_FOREACH(lcore_id) {
1005 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
1009 * Automatically spread requested memory amongst detected sockets according
1010 * to number of cores from cpu mask present on each socket
1012 total_size = internal_config.memory;
1013 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1015 /* Set memory amount per socket */
1016 default_size = (internal_config.memory * cpu_per_socket[socket])
1017 / rte_lcore_count();
1019 /* Limit to maximum available memory on socket */
1020 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
1023 memory[socket] = default_size;
1024 total_size -= default_size;
1028 * If some memory is remaining, try to allocate it by getting all
1029 * available memory from sockets, one after the other
1031 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
1032 /* take whatever is available */
1033 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
1037 memory[socket] += default_size;
1038 total_size -= default_size;
1042 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
1043 /* skips if the memory on specific socket wasn't requested */
1044 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
1045 hp_used[i].hugedir = hp_info[i].hugedir;
1046 hp_used[i].num_pages[socket] = RTE_MIN(
1047 memory[socket] / hp_info[i].hugepage_sz,
1048 hp_info[i].num_pages[socket]);
1050 cur_mem = hp_used[i].num_pages[socket] *
1051 hp_used[i].hugepage_sz;
1053 memory[socket] -= cur_mem;
1054 total_mem -= cur_mem;
1056 total_num_pages += hp_used[i].num_pages[socket];
1058 /* check if we have met all memory requests */
1059 if (memory[socket] == 0)
1062 /* check if we have any more pages left at this size, if so
1063 * move on to next size */
1064 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
1066 /* At this point we know that there are more pages available that are
1067 * bigger than the memory we want, so lets see if we can get enough
1068 * from other page sizes.
1071 for (j = i+1; j < num_hp_info; j++)
1072 remaining_mem += hp_info[j].hugepage_sz *
1073 hp_info[j].num_pages[socket];
1075 /* is there enough other memory, if not allocate another page and quit */
1076 if (remaining_mem < memory[socket]){
1077 cur_mem = RTE_MIN(memory[socket],
1078 hp_info[i].hugepage_sz);
1079 memory[socket] -= cur_mem;
1080 total_mem -= cur_mem;
1081 hp_used[i].num_pages[socket]++;
1083 break; /* we are done with this socket*/
1086 /* if we didn't satisfy all memory requirements per socket */
1087 if (memory[socket] > 0) {
1088 /* to prevent icc errors */
1089 requested = (unsigned) (internal_config.socket_mem[socket] /
1091 available = requested -
1092 ((unsigned) (memory[socket] / 0x100000));
1093 RTE_LOG(ERR, EAL, "Not enough memory available on socket %u! "
1094 "Requested: %uMB, available: %uMB\n", socket,
1095 requested, available);
1100 /* if we didn't satisfy total memory requirements */
1101 if (total_mem > 0) {
1102 requested = (unsigned) (internal_config.memory / 0x100000);
1103 available = requested - (unsigned) (total_mem / 0x100000);
1104 RTE_LOG(ERR, EAL, "Not enough memory available! Requested: %uMB,"
1105 " available: %uMB\n", requested, available);
1108 return total_num_pages;
1111 static inline size_t
1112 eal_get_hugepage_mem_size(void)
1117 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
1118 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
1119 if (hpi->hugedir != NULL) {
1120 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1121 size += hpi->hugepage_sz * hpi->num_pages[j];
1126 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1129 static struct sigaction huge_action_old;
1130 static int huge_need_recover;
1133 huge_register_sigbus(void)
1136 struct sigaction action;
1139 sigaddset(&mask, SIGBUS);
1140 action.sa_flags = 0;
1141 action.sa_mask = mask;
1142 action.sa_handler = huge_sigbus_handler;
1144 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1148 huge_recover_sigbus(void)
1150 if (huge_need_recover) {
1151 sigaction(SIGBUS, &huge_action_old, NULL);
1152 huge_need_recover = 0;
1157 * Prepare physical memory mapping: fill configuration structure with
1158 * these infos, return 0 on success.
1159 * 1. map N huge pages in separate files in hugetlbfs
1160 * 2. find associated physical addr
1161 * 3. find associated NUMA socket ID
1162 * 4. sort all huge pages by physical address
1163 * 5. remap these N huge pages in the correct order
1164 * 6. unmap the first mapping
1165 * 7. fill memsegs in configuration with contiguous zones
1168 rte_eal_hugepage_init(void)
1170 struct rte_mem_config *mcfg;
1171 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1172 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1174 uint64_t memory[RTE_MAX_NUMA_NODES];
1177 int i, j, new_memseg;
1178 int nr_hugefiles, nr_hugepages = 0;
1180 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1181 int new_pages_count[MAX_HUGEPAGE_SIZES];
1184 test_proc_pagemap_readable();
1186 memset(used_hp, 0, sizeof(used_hp));
1188 /* get pointer to global configuration */
1189 mcfg = rte_eal_get_configuration()->mem_config;
1191 /* hugetlbfs can be disabled */
1192 if (internal_config.no_hugetlbfs) {
1193 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1194 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1195 if (addr == MAP_FAILED) {
1196 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1200 mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
1201 mcfg->memseg[0].addr = addr;
1202 mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
1203 mcfg->memseg[0].len = internal_config.memory;
1204 mcfg->memseg[0].socket_id = 0;
1208 /* check if app runs on Xen Dom0 */
1209 if (internal_config.xen_dom0_support) {
1210 #ifdef RTE_LIBRTE_XEN_DOM0
1211 /* use dom0_mm kernel driver to init memory */
1212 if (rte_xen_dom0_memory_init() < 0)
1219 /* calculate total number of hugepages available. at this point we haven't
1220 * yet started sorting them so they all are on socket 0 */
1221 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1222 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1223 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1225 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1229 * allocate a memory area for hugepage table.
1230 * this isn't shared memory yet. due to the fact that we need some
1231 * processing done on these pages, shared memory will be created
1234 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1238 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1240 hp_offset = 0; /* where we start the current page size entries */
1242 huge_register_sigbus();
1244 /* map all hugepages and sort them */
1245 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1246 unsigned pages_old, pages_new;
1247 struct hugepage_info *hpi;
1250 * we don't yet mark hugepages as used at this stage, so
1251 * we just map all hugepages available to the system
1252 * all hugepages are still located on socket 0
1254 hpi = &internal_config.hugepage_info[i];
1256 if (hpi->num_pages[0] == 0)
1259 /* map all hugepages available */
1260 pages_old = hpi->num_pages[0];
1261 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
1262 if (pages_new < pages_old) {
1263 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1265 "%d not %d hugepages of size %u MB allocated\n",
1266 pages_new, pages_old,
1267 (unsigned)(hpi->hugepage_sz / 0x100000));
1271 "%d not %d hugepages of size %u MB allocated\n",
1272 pages_new, pages_old,
1273 (unsigned)(hpi->hugepage_sz / 0x100000));
1275 int pages = pages_old - pages_new;
1277 nr_hugepages -= pages;
1278 hpi->num_pages[0] = pages_new;
1284 /* find physical addresses and sockets for each hugepage */
1285 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){
1286 RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
1287 (unsigned)(hpi->hugepage_sz / 0x100000));
1291 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1292 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1293 (unsigned)(hpi->hugepage_sz / 0x100000));
1297 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1298 sizeof(struct hugepage_file), cmp_physaddr);
1300 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1301 /* remap all hugepages into single file segments */
1302 new_pages_count[i] = remap_all_hugepages(&tmp_hp[hp_offset], hpi);
1303 if (new_pages_count[i] < 0){
1304 RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
1305 (unsigned)(hpi->hugepage_sz / 0x100000));
1309 /* we have processed a num of hugepages of this size, so inc offset */
1310 hp_offset += new_pages_count[i];
1312 /* remap all hugepages */
1313 if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
1314 hpi->num_pages[0]) {
1315 RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
1316 (unsigned)(hpi->hugepage_sz / 0x100000));
1320 /* unmap original mappings */
1321 if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
1324 /* we have processed a num of hugepages of this size, so inc offset */
1325 hp_offset += hpi->num_pages[0];
1329 huge_recover_sigbus();
1331 if (internal_config.memory == 0 && internal_config.force_sockets == 0)
1332 internal_config.memory = eal_get_hugepage_mem_size();
1334 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1336 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1337 nr_hugefiles += new_pages_count[i];
1340 nr_hugefiles = nr_hugepages;
1344 /* clean out the numbers of pages */
1345 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1346 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1347 internal_config.hugepage_info[i].num_pages[j] = 0;
1349 /* get hugepages for each socket */
1350 for (i = 0; i < nr_hugefiles; i++) {
1351 int socket = tmp_hp[i].socket_id;
1353 /* find a hugepage info with right size and increment num_pages */
1354 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1355 (int)internal_config.num_hugepage_sizes);
1356 for (j = 0; j < nb_hpsizes; j++) {
1357 if (tmp_hp[i].size ==
1358 internal_config.hugepage_info[j].hugepage_sz) {
1359 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1360 internal_config.hugepage_info[j].num_pages[socket] +=
1363 internal_config.hugepage_info[j].num_pages[socket]++;
1369 /* make a copy of socket_mem, needed for number of pages calculation */
1370 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1371 memory[i] = internal_config.socket_mem[i];
1373 /* calculate final number of pages */
1374 nr_hugepages = calc_num_pages_per_socket(memory,
1375 internal_config.hugepage_info, used_hp,
1376 internal_config.num_hugepage_sizes);
1378 /* error if not enough memory available */
1379 if (nr_hugepages < 0)
1383 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1384 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1385 if (used_hp[i].num_pages[j] > 0) {
1387 "Requesting %u pages of size %uMB"
1388 " from socket %i\n",
1389 used_hp[i].num_pages[j],
1391 (used_hp[i].hugepage_sz / 0x100000),
1397 /* create shared memory */
1398 hugepage = create_shared_memory(eal_hugepage_info_path(),
1399 nr_hugefiles * sizeof(struct hugepage_file));
1401 if (hugepage == NULL) {
1402 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1405 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1408 * unmap pages that we won't need (looks at used_hp).
1409 * also, sets final_va to NULL on pages that were unmapped.
1411 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1412 internal_config.num_hugepage_sizes) < 0) {
1413 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1418 * copy stuff from malloc'd hugepage* to the actual shared memory.
1419 * this procedure only copies those hugepages that have final_va
1420 * not NULL. has overflow protection.
1422 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1423 tmp_hp, nr_hugefiles) < 0) {
1424 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1428 /* free the hugepage backing files */
1429 if (internal_config.hugepage_unlink &&
1430 unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
1431 RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1435 /* free the temporary hugepage table */
1439 /* first memseg index shall be 0 after incrementing it below */
1441 for (i = 0; i < nr_hugefiles; i++) {
1444 /* if this is a new section, create a new memseg */
1447 else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
1449 else if (hugepage[i].size != hugepage[i-1].size)
1452 #ifdef RTE_ARCH_PPC_64
1453 /* On PPC64 architecture, the mmap always start from higher
1454 * virtual address to lower address. Here, both the physical
1455 * address and virtual address are in descending order */
1456 else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
1459 else if (((unsigned long)hugepage[i-1].final_va -
1460 (unsigned long)hugepage[i].final_va) != hugepage[i].size)
1463 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
1466 else if (((unsigned long)hugepage[i].final_va -
1467 (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
1473 if (j == RTE_MAX_MEMSEG)
1476 mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
1477 mcfg->memseg[j].addr = hugepage[i].final_va;
1478 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1479 mcfg->memseg[j].len = hugepage[i].size * hugepage[i].repeated;
1481 mcfg->memseg[j].len = hugepage[i].size;
1483 mcfg->memseg[j].socket_id = hugepage[i].socket_id;
1484 mcfg->memseg[j].hugepage_sz = hugepage[i].size;
1486 /* continuation of previous memseg */
1488 #ifdef RTE_ARCH_PPC_64
1489 /* Use the phy and virt address of the last page as segment
1490 * address for IBM Power architecture */
1491 mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
1492 mcfg->memseg[j].addr = hugepage[i].final_va;
1494 mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
1496 hugepage[i].memseg_id = j;
1499 if (i < nr_hugefiles) {
1500 RTE_LOG(ERR, EAL, "Can only reserve %d pages "
1501 "from %d requested\n"
1502 "Current %s=%d is not enough\n"
1503 "Please either increase it or request less amount "
1505 i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
1510 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1515 huge_recover_sigbus();
1517 if (hugepage != NULL)
1518 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1524 * uses fstat to report the size of a file on disk
1530 if (fstat(fd, &st) < 0)
1536 * This creates the memory mappings in the secondary process to match that of
1537 * the server process. It goes through each memory segment in the DPDK runtime
1538 * configuration and finds the hugepages which form that segment, mapping them
1539 * in order to form a contiguous block in the virtual memory space
1542 rte_eal_hugepage_attach(void)
1544 const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1545 struct hugepage_file *hp = NULL;
1546 unsigned num_hp = 0;
1547 unsigned i, s = 0; /* s used to track the segment number */
1549 int fd, fd_zero = -1, fd_hugepage = -1;
1551 if (aslr_enabled() > 0) {
1552 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1553 "(ASLR) is enabled in the kernel.\n");
1554 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1555 "into secondary processes\n");
1558 test_proc_pagemap_readable();
1560 if (internal_config.xen_dom0_support) {
1561 #ifdef RTE_LIBRTE_XEN_DOM0
1562 if (rte_xen_dom0_memory_attach() < 0) {
1563 RTE_LOG(ERR, EAL, "Failed to attach memory segments of primary "
1571 fd_zero = open("/dev/zero", O_RDONLY);
1573 RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
1576 fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1577 if (fd_hugepage < 0) {
1578 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
1582 /* map all segments into memory to make sure we get the addrs */
1583 for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
1587 * the first memory segment with len==0 is the one that
1588 * follows the last valid segment.
1590 if (mcfg->memseg[s].len == 0)
1594 * fdzero is mmapped to get a contiguous block of virtual
1595 * addresses of the appropriate memseg size.
1596 * use mmap to get identical addresses as the primary process.
1598 base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
1599 PROT_READ, MAP_PRIVATE, fd_zero, 0);
1600 if (base_addr == MAP_FAILED ||
1601 base_addr != mcfg->memseg[s].addr) {
1602 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
1603 "in /dev/zero to requested address [%p]: '%s'\n",
1604 (unsigned long long)mcfg->memseg[s].len,
1605 mcfg->memseg[s].addr, strerror(errno));
1606 if (aslr_enabled() > 0) {
1607 RTE_LOG(ERR, EAL, "It is recommended to "
1608 "disable ASLR in the kernel "
1609 "and retry running both primary "
1610 "and secondary processes\n");
1616 size = getFileSize(fd_hugepage);
1617 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1618 if (hp == MAP_FAILED) {
1619 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
1623 num_hp = size / sizeof(struct hugepage_file);
1624 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1627 while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
1628 void *addr, *base_addr;
1629 uintptr_t offset = 0;
1630 size_t mapping_size;
1632 * free previously mapped memory so we can map the
1633 * hugepages into the space
1635 base_addr = mcfg->memseg[s].addr;
1636 munmap(base_addr, mcfg->memseg[s].len);
1638 /* find the hugepages for this segment and map them
1639 * we don't need to worry about order, as the server sorted the
1640 * entries before it did the second mmap of them */
1641 for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
1642 if (hp[i].memseg_id == (int)s){
1643 fd = open(hp[i].filepath, O_RDWR);
1645 RTE_LOG(ERR, EAL, "Could not open %s\n",
1649 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1650 mapping_size = hp[i].size * hp[i].repeated;
1652 mapping_size = hp[i].size;
1654 addr = mmap(RTE_PTR_ADD(base_addr, offset),
1655 mapping_size, PROT_READ | PROT_WRITE,
1657 close(fd); /* close file both on success and on failure */
1658 if (addr == MAP_FAILED ||
1659 addr != RTE_PTR_ADD(base_addr, offset)) {
1660 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1664 offset+=mapping_size;
1667 RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
1668 (unsigned long long)mcfg->memseg[s].len);
1671 /* unmap the hugepage config file, since we are done using it */
1679 while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0) {
1680 munmap(mcfg->memseg[s].addr, mcfg->memseg[s].len);
1683 if (hp != NULL && hp != MAP_FAILED)
1687 if (fd_hugepage >= 0)