4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright(c) 2013 6WIND.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name of 6WIND S.A. nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 #define _FILE_OFFSET_BITS 64
74 #include <sys/types.h>
76 #include <sys/queue.h>
81 #include <sys/ioctl.h>
85 #include <rte_memory.h>
86 #include <rte_memzone.h>
87 #include <rte_launch.h>
88 #include <rte_tailq.h>
90 #include <rte_eal_memconfig.h>
91 #include <rte_per_lcore.h>
92 #include <rte_lcore.h>
93 #include <rte_common.h>
94 #include <rte_string_fns.h>
96 #include "eal_private.h"
97 #include "eal_internal_cfg.h"
98 #include "eal_filesystem.h"
99 #include "eal_hugepages.h"
103 * Huge page mapping under linux
105 * To reserve a big contiguous amount of memory, we use the hugepage
106 * feature of linux. For that, we need to have hugetlbfs mounted. This
107 * code will create many files in this directory (one per page) and
108 * map them in virtual memory. For each page, we will retrieve its
109 * physical address and remap it in order to have a virtual contiguous
110 * zone as well as a physical contiguous zone.
113 static uint64_t baseaddr_offset;
115 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
117 /* Lock page in physical memory and prevent from swapping. */
119 rte_mem_lock_page(const void *virt)
121 unsigned long virtual = (unsigned long)virt;
122 int page_size = getpagesize();
123 unsigned long aligned = (virtual & ~ (page_size - 1));
124 return mlock((void*)aligned, page_size);
128 * Get physical address of any mapped virtual address in the current process.
131 rte_mem_virt2phy(const void *virtaddr)
134 uint64_t page, physaddr;
135 unsigned long virt_pfn;
139 /* standard page size */
140 page_size = getpagesize();
142 fd = open("/proc/self/pagemap", O_RDONLY);
144 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
145 __func__, strerror(errno));
146 return RTE_BAD_PHYS_ADDR;
149 virt_pfn = (unsigned long)virtaddr / page_size;
150 offset = sizeof(uint64_t) * virt_pfn;
151 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
152 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
153 __func__, strerror(errno));
155 return RTE_BAD_PHYS_ADDR;
157 if (read(fd, &page, sizeof(uint64_t)) < 0) {
158 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
159 __func__, strerror(errno));
161 return RTE_BAD_PHYS_ADDR;
165 * the pfn (page frame number) are bits 0-54 (see
166 * pagemap.txt in linux Documentation)
168 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
169 + ((unsigned long)virtaddr % page_size);
175 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
176 * it by browsing the /proc/self/pagemap special file.
179 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
184 for (i = 0; i < hpi->num_pages[0]; i++) {
185 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
186 if (addr == RTE_BAD_PHYS_ADDR)
188 hugepg_tbl[i].physaddr = addr;
194 * Check whether address-space layout randomization is enabled in
195 * the kernel. This is important for multi-process as it can prevent
196 * two processes mapping data to the same virtual address
198 * 0 - address space randomization disabled
199 * 1/2 - address space randomization enabled
200 * negative error code on error
206 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
209 retval = read(fd, &c, 1);
219 default: return -EINVAL;
224 * Try to mmap *size bytes in /dev/zero. If it is successful, return the
225 * pointer to the mmap'd area and keep *size unmodified. Else, retry
226 * with a smaller zone: decrease *size by hugepage_sz until it reaches
227 * 0. In this case, return NULL. Note: this function returns an address
228 * which is a multiple of hugepage size.
231 get_virtual_area(size_t *size, size_t hugepage_sz)
237 if (internal_config.base_virtaddr != 0) {
238 addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
243 RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
245 fd = open("/dev/zero", O_RDONLY);
247 RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
252 (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
253 if (addr == MAP_FAILED)
254 *size -= hugepage_sz;
255 } while (addr == MAP_FAILED && *size > 0);
257 if (addr == MAP_FAILED) {
259 RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
263 munmap(addr, (*size) + hugepage_sz);
266 /* align addr to a huge page size boundary */
267 aligned_addr = (long)addr;
268 aligned_addr += (hugepage_sz - 1);
269 aligned_addr &= (~(hugepage_sz - 1));
270 addr = (void *)(aligned_addr);
272 RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n",
275 /* increment offset */
276 baseaddr_offset += *size;
282 * Mmap all hugepages of hugepage table: it first open a file in
283 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
284 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
285 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
286 * map continguous physical blocks in contiguous virtual blocks.
289 map_all_hugepages(struct hugepage_file *hugepg_tbl,
290 struct hugepage_info *hpi, int orig)
295 void *vma_addr = NULL;
298 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
299 RTE_SET_USED(vma_len);
302 for (i = 0; i < hpi->num_pages[0]; i++) {
303 size_t hugepage_sz = hpi->hugepage_sz;
306 hugepg_tbl[i].file_id = i;
307 hugepg_tbl[i].size = hugepage_sz;
308 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
309 eal_get_hugefile_temp_path(hugepg_tbl[i].filepath,
310 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
311 hugepg_tbl[i].file_id);
313 eal_get_hugefile_path(hugepg_tbl[i].filepath,
314 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
315 hugepg_tbl[i].file_id);
317 hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
319 #ifndef RTE_ARCH_X86_64
320 /* for 32-bit systems, don't remap 1G pages, just reuse original
321 * map address as final map address.
323 else if (hugepage_sz == RTE_PGSIZE_1G){
324 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
325 hugepg_tbl[i].orig_va = NULL;
330 #ifndef RTE_EAL_SINGLE_FILE_SEGMENTS
331 else if (vma_len == 0) {
332 unsigned j, num_pages;
334 /* reserve a virtual area for next contiguous
335 * physical block: count the number of
336 * contiguous physical pages. */
337 for (j = i+1; j < hpi->num_pages[0] ; j++) {
338 if (hugepg_tbl[j].physaddr !=
339 hugepg_tbl[j-1].physaddr + hugepage_sz)
343 vma_len = num_pages * hugepage_sz;
345 /* get the biggest virtual memory area up to
346 * vma_len. If it fails, vma_addr is NULL, so
347 * let the kernel provide the address. */
348 vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
349 if (vma_addr == NULL)
350 vma_len = hugepage_sz;
354 /* try to create hugepage file */
355 fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
357 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
362 virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
364 if (virtaddr == MAP_FAILED) {
365 RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
372 hugepg_tbl[i].orig_va = virtaddr;
373 memset(virtaddr, 0, hugepage_sz);
376 hugepg_tbl[i].final_va = virtaddr;
379 /* set shared flock on the file. */
380 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
381 RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
382 __func__, strerror(errno));
389 vma_addr = (char *)vma_addr + hugepage_sz;
390 vma_len -= hugepage_sz;
395 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
398 * Remaps all hugepages into single file segments
401 remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
404 unsigned i = 0, j, num_pages, page_idx = 0;
405 void *vma_addr = NULL, *old_addr = NULL, *page_addr = NULL;
407 size_t hugepage_sz = hpi->hugepage_sz;
408 size_t total_size, offset;
409 char filepath[MAX_HUGEPAGE_PATH];
410 phys_addr_t physaddr;
413 while (i < hpi->num_pages[0]) {
415 #ifndef RTE_ARCH_X86_64
416 /* for 32-bit systems, don't remap 1G pages, just reuse original
417 * map address as final map address.
419 if (hugepage_sz == RTE_PGSIZE_1G){
420 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
421 hugepg_tbl[i].orig_va = NULL;
427 /* reserve a virtual area for next contiguous
428 * physical block: count the number of
429 * contiguous physical pages. */
430 for (j = i+1; j < hpi->num_pages[0] ; j++) {
431 if (hugepg_tbl[j].physaddr != hugepg_tbl[j-1].physaddr + hugepage_sz)
435 vma_len = num_pages * hugepage_sz;
437 socket = hugepg_tbl[i].socket_id;
439 /* get the biggest virtual memory area up to
440 * vma_len. If it fails, vma_addr is NULL, so
441 * let the kernel provide the address. */
442 vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
444 /* If we can't find a big enough virtual area, work out how many pages
445 * we are going to get */
446 if (vma_addr == NULL)
448 else if (vma_len != num_pages * hugepage_sz) {
449 num_pages = vma_len / hugepage_sz;
454 hugepg_tbl[page_idx].file_id = page_idx;
455 eal_get_hugefile_path(filepath,
458 hugepg_tbl[page_idx].file_id);
460 /* try to create hugepage file */
461 fd = open(filepath, O_CREAT | O_RDWR, 0755);
463 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__, strerror(errno));
470 /* unmap current segment */
472 munmap(vma_addr, total_size);
474 /* unmap original page */
475 munmap(hugepg_tbl[i].orig_va, hugepage_sz);
476 unlink(hugepg_tbl[i].filepath);
478 total_size += hugepage_sz;
482 /* map new, bigger segment */
483 vma_addr = mmap(vma_addr, total_size,
484 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
486 if (vma_addr == MAP_FAILED || vma_addr != old_addr) {
487 RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno));
492 /* touch the page. this is needed because kernel postpones mapping
493 * creation until the first page fault. with this, we pin down
494 * the page and it is marked as used and gets into process' pagemap.
496 for (offset = 0; offset < total_size; offset += hugepage_sz)
497 *((volatile uint8_t*) RTE_PTR_ADD(vma_addr, offset));
500 /* set shared flock on the file. */
501 if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
502 RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n",
503 __func__, strerror(errno));
508 rte_snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s",
511 physaddr = rte_mem_virt2phy(vma_addr);
513 if (physaddr == RTE_BAD_PHYS_ADDR)
516 hugepg_tbl[page_idx].final_va = vma_addr;
518 hugepg_tbl[page_idx].physaddr = physaddr;
520 hugepg_tbl[page_idx].repeated = num_pages;
522 hugepg_tbl[page_idx].socket_id = socket;
526 /* verify the memory segment - that is, check that every VA corresponds
527 * to the physical address we expect to see
529 for (offset = 0; offset < vma_len; offset += hugepage_sz) {
530 uint64_t expected_physaddr;
532 expected_physaddr = hugepg_tbl[page_idx].physaddr + offset;
533 page_addr = RTE_PTR_ADD(vma_addr, offset);
534 physaddr = rte_mem_virt2phy(page_addr);
536 if (physaddr != expected_physaddr) {
537 RTE_LOG(ERR, EAL, "Segment sanity check failed: wrong physaddr "
538 "at %p (offset 0x%" PRIx64 ": 0x%" PRIx64
539 " (expected 0x%" PRIx64 ")\n",
540 page_addr, offset, physaddr, expected_physaddr);
545 /* zero out the whole segment */
546 memset(hugepg_tbl[page_idx].final_va, 0, total_size);
551 /* zero out the rest */
552 memset(&hugepg_tbl[page_idx], 0, (hpi->num_pages[0] - page_idx) * sizeof(struct hugepage_file));
555 #else/* RTE_EAL_SINGLE_FILE_SEGMENTS=n */
557 /* Unmap all hugepages from original mapping */
559 unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
562 for (i = 0; i < hpi->num_pages[0]; i++) {
563 if (hugepg_tbl[i].orig_va) {
564 munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
565 hugepg_tbl[i].orig_va = NULL;
570 #endif /* RTE_EAL_SINGLE_FILE_SEGMENTS */
573 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
577 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
581 unsigned i, hp_count = 0;
584 char hugedir_str[PATH_MAX];
587 f = fopen("/proc/self/numa_maps", "r");
589 RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps,"
590 " consider that all memory is in socket_id 0\n");
594 rte_snprintf(hugedir_str, sizeof(hugedir_str),
595 "%s/", hpi->hugedir);
598 while (fgets(buf, sizeof(buf), f) != NULL) {
600 /* ignore non huge page */
601 if (strstr(buf, " huge ") == NULL &&
602 strstr(buf, hugedir_str) == NULL)
606 virt_addr = strtoull(buf, &end, 16);
607 if (virt_addr == 0 || end == buf) {
608 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
612 /* get node id (socket id) */
613 nodestr = strstr(buf, " N");
614 if (nodestr == NULL) {
615 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
619 end = strstr(nodestr, "=");
621 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
627 socket_id = strtoul(nodestr, &end, 0);
628 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
629 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
633 /* if we find this page in our mappings, set socket_id */
634 for (i = 0; i < hpi->num_pages[0]; i++) {
635 void *va = (void *)(unsigned long)virt_addr;
636 if (hugepg_tbl[i].orig_va == va) {
637 hugepg_tbl[i].socket_id = socket_id;
643 if (hp_count < hpi->num_pages[0])
655 * Sort the hugepg_tbl by physical address (lower addresses first). We
656 * use a slow algorithm, but we won't have millions of pages, and this
657 * is only done at init time.
660 sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
664 uint64_t smallest_addr;
665 struct hugepage_file tmp;
667 for (i = 0; i < hpi->num_pages[0]; i++) {
672 * browse all entries starting at 'i', and find the
673 * entry with the smallest addr
675 for (j=i; j< hpi->num_pages[0]; j++) {
677 if (smallest_addr == 0 ||
678 hugepg_tbl[j].physaddr < smallest_addr) {
679 smallest_addr = hugepg_tbl[j].physaddr;
684 /* should not happen */
685 if (smallest_idx == -1) {
686 RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__);
690 /* swap the 2 entries in the table */
691 memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage_file));
692 memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i],
693 sizeof(struct hugepage_file));
694 memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file));
700 * Uses mmap to create a shared memory area for storage of data
701 * Used in this file to store the hugepage file map on disk
704 create_shared_memory(const char *filename, const size_t mem_size)
707 int fd = open(filename, O_CREAT | O_RDWR, 0666);
710 if (ftruncate(fd, mem_size) < 0) {
714 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
720 * this copies *active* hugepages from one hugepage table to another.
721 * destination is typically the shared memory.
724 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
725 const struct hugepage_file * src, int src_size)
727 int src_pos, dst_pos = 0;
729 for (src_pos = 0; src_pos < src_size; src_pos++) {
730 if (src[src_pos].final_va != NULL) {
731 /* error on overflow attempt */
732 if (dst_pos == dest_size)
734 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
742 * unmaps hugepages that are not going to be used. since we originally allocate
743 * ALL hugepages (not just those we need), additional unmapping needs to be done.
746 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
747 struct hugepage_info *hpi,
748 unsigned num_hp_info)
750 unsigned socket, size;
751 int page, nrpages = 0;
753 /* get total number of hugepages */
754 for (size = 0; size < num_hp_info; size++)
755 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
756 nrpages += internal_config.hugepage_info[size].num_pages[socket];
758 for (size = 0; size < num_hp_info; size++) {
759 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
760 unsigned pages_found = 0;
762 /* traverse until we have unmapped all the unused pages */
763 for (page = 0; page < nrpages; page++) {
764 struct hugepage_file *hp = &hugepg_tbl[page];
766 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
767 /* if this page was already cleared */
768 if (hp->final_va == NULL)
772 /* find a page that matches the criteria */
773 if ((hp->size == hpi[size].hugepage_sz) &&
774 (hp->socket_id == (int) socket)) {
776 /* if we skipped enough pages, unmap the rest */
777 if (pages_found == hpi[size].num_pages[socket]) {
780 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
781 unmap_len = hp->size * hp->repeated;
783 unmap_len = hp->size;
786 /* get start addr and len of the remaining segment */
787 munmap(hp->final_va, (size_t) unmap_len);
790 if (unlink(hp->filepath) == -1) {
791 RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
792 __func__, hp->filepath, strerror(errno));
796 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
797 /* else, check how much do we need to map */
800 hpi[size].num_pages[socket] - pages_found;
802 /* if we need enough memory to fit into the segment */
803 if (hp->repeated <= nr_pg_left) {
804 pages_found += hp->repeated;
806 /* truncate the segment */
808 uint64_t final_size = nr_pg_left * hp->size;
809 uint64_t seg_size = hp->repeated * hp->size;
811 void * unmap_va = RTE_PTR_ADD(hp->final_va,
815 munmap(unmap_va, seg_size - final_size);
817 fd = open(hp->filepath, O_RDWR);
819 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
820 hp->filepath, strerror(errno));
823 if (ftruncate(fd, final_size) < 0) {
824 RTE_LOG(ERR, EAL, "Cannot truncate %s: %s\n",
825 hp->filepath, strerror(errno));
830 pages_found += nr_pg_left;
831 hp->repeated = nr_pg_left;
835 /* else, lock the page and skip */
842 } /* foreach socket */
843 } /* foreach pagesize */
848 static inline uint64_t
849 get_socket_mem_size(int socket)
854 for (i = 0; i < internal_config.num_hugepage_sizes; i++){
855 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
856 if (hpi->hugedir != NULL)
857 size += hpi->hugepage_sz * hpi->num_pages[socket];
864 * This function is a NUMA-aware equivalent of calc_num_pages.
865 * It takes in the list of hugepage sizes and the
866 * number of pages thereof, and calculates the best number of
867 * pages of each size to fulfill the request for <memory> ram
870 calc_num_pages_per_socket(uint64_t * memory,
871 struct hugepage_info *hp_info,
872 struct hugepage_info *hp_used,
873 unsigned num_hp_info)
875 unsigned socket, j, i = 0;
876 unsigned requested, available;
877 int total_num_pages = 0;
878 uint64_t remaining_mem, cur_mem;
879 uint64_t total_mem = internal_config.memory;
881 if (num_hp_info == 0)
884 /* if specific memory amounts per socket weren't requested */
885 if (internal_config.force_sockets == 0) {
886 int cpu_per_socket[RTE_MAX_NUMA_NODES];
887 size_t default_size, total_size;
890 /* Compute number of cores per socket */
891 memset(cpu_per_socket, 0, sizeof(cpu_per_socket));
892 RTE_LCORE_FOREACH(lcore_id) {
893 cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++;
897 * Automatically spread requested memory amongst detected sockets according
898 * to number of cores from cpu mask present on each socket
900 total_size = internal_config.memory;
901 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
903 /* Set memory amount per socket */
904 default_size = (internal_config.memory * cpu_per_socket[socket])
907 /* Limit to maximum available memory on socket */
908 default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
911 memory[socket] = default_size;
912 total_size -= default_size;
916 * If some memory is remaining, try to allocate it by getting all
917 * available memory from sockets, one after the other
919 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
920 /* take whatever is available */
921 default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
925 memory[socket] += default_size;
926 total_size -= default_size;
930 for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
931 /* skips if the memory on specific socket wasn't requested */
932 for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
933 hp_used[i].hugedir = hp_info[i].hugedir;
934 hp_used[i].num_pages[socket] = RTE_MIN(
935 memory[socket] / hp_info[i].hugepage_sz,
936 hp_info[i].num_pages[socket]);
938 cur_mem = hp_used[i].num_pages[socket] *
939 hp_used[i].hugepage_sz;
941 memory[socket] -= cur_mem;
942 total_mem -= cur_mem;
944 total_num_pages += hp_used[i].num_pages[socket];
946 /* check if we have met all memory requests */
947 if (memory[socket] == 0)
950 /* check if we have any more pages left at this size, if so
951 * move on to next size */
952 if (hp_used[i].num_pages[socket] == hp_info[i].num_pages[socket])
954 /* At this point we know that there are more pages available that are
955 * bigger than the memory we want, so lets see if we can get enough
956 * from other page sizes.
959 for (j = i+1; j < num_hp_info; j++)
960 remaining_mem += hp_info[j].hugepage_sz *
961 hp_info[j].num_pages[socket];
963 /* is there enough other memory, if not allocate another page and quit */
964 if (remaining_mem < memory[socket]){
965 cur_mem = RTE_MIN(memory[socket],
966 hp_info[i].hugepage_sz);
967 memory[socket] -= cur_mem;
968 total_mem -= cur_mem;
969 hp_used[i].num_pages[socket]++;
971 break; /* we are done with this socket*/
974 /* if we didn't satisfy all memory requirements per socket */
975 if (memory[socket] > 0) {
976 /* to prevent icc errors */
977 requested = (unsigned) (internal_config.socket_mem[socket] /
979 available = requested -
980 ((unsigned) (memory[socket] / 0x100000));
981 RTE_LOG(INFO, EAL, "Not enough memory available on socket %u! "
982 "Requested: %uMB, available: %uMB\n", socket,
983 requested, available);
988 /* if we didn't satisfy total memory requirements */
990 requested = (unsigned) (internal_config.memory / 0x100000);
991 available = requested - (unsigned) (total_mem / 0x100000);
992 RTE_LOG(INFO, EAL, "Not enough memory available! Requested: %uMB,"
993 " available: %uMB\n", requested, available);
996 return total_num_pages;
1000 * Prepare physical memory mapping: fill configuration structure with
1001 * these infos, return 0 on success.
1002 * 1. map N huge pages in separate files in hugetlbfs
1003 * 2. find associated physical addr
1004 * 3. find associated NUMA socket ID
1005 * 4. sort all huge pages by physical address
1006 * 5. remap these N huge pages in the correct order
1007 * 6. unmap the first mapping
1008 * 7. fill memsegs in configuration with contiguous zones
1011 rte_eal_hugepage_init(void)
1013 struct rte_mem_config *mcfg;
1014 struct hugepage_file *hugepage, *tmp_hp = NULL;
1015 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1017 uint64_t memory[RTE_MAX_NUMA_NODES];
1020 int i, j, new_memseg;
1021 int nr_hugefiles, nr_hugepages = 0;
1023 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1024 int new_pages_count[MAX_HUGEPAGE_SIZES];
1027 memset(used_hp, 0, sizeof(used_hp));
1029 /* get pointer to global configuration */
1030 mcfg = rte_eal_get_configuration()->mem_config;
1032 /* hugetlbfs can be disabled */
1033 if (internal_config.no_hugetlbfs) {
1034 addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
1035 MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
1036 if (addr == MAP_FAILED) {
1037 RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1041 mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
1042 mcfg->memseg[0].addr = addr;
1043 mcfg->memseg[0].len = internal_config.memory;
1044 mcfg->memseg[0].socket_id = SOCKET_ID_ANY;
1048 /* check if app runs on Xen Dom0 */
1049 if (internal_config.xen_dom0_support) {
1050 #ifdef RTE_LIBRTE_XEN_DOM0
1051 /* use dom0_mm kernel driver to init memory */
1052 if (rte_xen_dom0_memory_init() < 0)
1060 /* calculate total number of hugepages available. at this point we haven't
1061 * yet started sorting them so they all are on socket 0 */
1062 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1063 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1064 used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
1066 nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
1070 * allocate a memory area for hugepage table.
1071 * this isn't shared memory yet. due to the fact that we need some
1072 * processing done on these pages, shared memory will be created
1075 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1079 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1081 hp_offset = 0; /* where we start the current page size entries */
1083 /* map all hugepages and sort them */
1084 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
1085 struct hugepage_info *hpi;
1088 * we don't yet mark hugepages as used at this stage, so
1089 * we just map all hugepages available to the system
1090 * all hugepages are still located on socket 0
1092 hpi = &internal_config.hugepage_info[i];
1094 if (hpi->num_pages[0] == 0)
1097 /* map all hugepages available */
1098 if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 1) < 0){
1099 RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
1100 (unsigned)(hpi->hugepage_sz / 0x100000));
1104 /* find physical addresses and sockets for each hugepage */
1105 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){
1106 RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
1107 (unsigned)(hpi->hugepage_sz / 0x100000));
1111 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1112 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1113 (unsigned)(hpi->hugepage_sz / 0x100000));
1117 if (sort_by_physaddr(&tmp_hp[hp_offset], hpi) < 0)
1120 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1121 /* remap all hugepages into single file segments */
1122 new_pages_count[i] = remap_all_hugepages(&tmp_hp[hp_offset], hpi);
1123 if (new_pages_count[i] < 0){
1124 RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
1125 (unsigned)(hpi->hugepage_sz / 0x100000));
1129 /* we have processed a num of hugepages of this size, so inc offset */
1130 hp_offset += new_pages_count[i];
1132 /* remap all hugepages */
1133 if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0){
1134 RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
1135 (unsigned)(hpi->hugepage_sz / 0x100000));
1139 /* unmap original mappings */
1140 if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
1143 /* we have processed a num of hugepages of this size, so inc offset */
1144 hp_offset += hpi->num_pages[0];
1148 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1150 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1151 nr_hugefiles += new_pages_count[i];
1154 nr_hugefiles = nr_hugepages;
1158 /* clean out the numbers of pages */
1159 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
1160 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1161 internal_config.hugepage_info[i].num_pages[j] = 0;
1163 /* get hugepages for each socket */
1164 for (i = 0; i < nr_hugefiles; i++) {
1165 int socket = tmp_hp[i].socket_id;
1167 /* find a hugepage info with right size and increment num_pages */
1168 for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) {
1169 if (tmp_hp[i].size ==
1170 internal_config.hugepage_info[j].hugepage_sz) {
1171 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1172 internal_config.hugepage_info[j].num_pages[socket] +=
1175 internal_config.hugepage_info[j].num_pages[socket]++;
1181 /* make a copy of socket_mem, needed for number of pages calculation */
1182 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1183 memory[i] = internal_config.socket_mem[i];
1185 /* calculate final number of pages */
1186 nr_hugepages = calc_num_pages_per_socket(memory,
1187 internal_config.hugepage_info, used_hp,
1188 internal_config.num_hugepage_sizes);
1190 /* error if not enough memory available */
1191 if (nr_hugepages < 0)
1195 for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
1196 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1197 if (used_hp[i].num_pages[j] > 0) {
1199 "Requesting %u pages of size %uMB"
1200 " from socket %i\n",
1201 used_hp[i].num_pages[j],
1203 (used_hp[i].hugepage_sz / 0x100000),
1209 /* create shared memory */
1210 hugepage = create_shared_memory(eal_hugepage_info_path(),
1211 nr_hugefiles * sizeof(struct hugepage_file));
1213 if (hugepage == NULL) {
1214 RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1217 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1220 * unmap pages that we won't need (looks at used_hp).
1221 * also, sets final_va to NULL on pages that were unmapped.
1223 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1224 internal_config.num_hugepage_sizes) < 0) {
1225 RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1230 * copy stuff from malloc'd hugepage* to the actual shared memory.
1231 * this procedure only copies those hugepages that have final_va
1232 * not NULL. has overflow protection.
1234 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1235 tmp_hp, nr_hugefiles) < 0) {
1236 RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1240 /* free the temporary hugepage table */
1244 /* find earliest free memseg - this is needed because in case of IVSHMEM,
1245 * segments might have already been initialized */
1246 for (j = 0; j < RTE_MAX_MEMSEG; j++)
1247 if (mcfg->memseg[j].addr == NULL) {
1248 /* move to previous segment and exit loop */
1253 for (i = 0; i < nr_hugefiles; i++) {
1256 /* if this is a new section, create a new memseg */
1259 else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
1261 else if (hugepage[i].size != hugepage[i-1].size)
1263 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
1266 else if (((unsigned long)hugepage[i].final_va -
1267 (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
1272 if (j == RTE_MAX_MEMSEG)
1275 mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
1276 mcfg->memseg[j].addr = hugepage[i].final_va;
1277 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1278 mcfg->memseg[j].len = hugepage[i].size * hugepage[i].repeated;
1280 mcfg->memseg[j].len = hugepage[i].size;
1282 mcfg->memseg[j].socket_id = hugepage[i].socket_id;
1283 mcfg->memseg[j].hugepage_sz = hugepage[i].size;
1285 /* continuation of previous memseg */
1287 mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
1289 hugepage[i].memseg_id = j;
1292 if (i < nr_hugefiles) {
1293 RTE_LOG(ERR, EAL, "Can only reserve %d pages "
1294 "from %d requested\n"
1295 "Current %s=%d is not enough\n"
1296 "Please either increase it or request less amount "
1298 i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
1312 * uses fstat to report the size of a file on disk
1318 if (fstat(fd, &st) < 0)
1324 * This creates the memory mappings in the secondary process to match that of
1325 * the server process. It goes through each memory segment in the DPDK runtime
1326 * configuration and finds the hugepages which form that segment, mapping them
1327 * in order to form a contiguous block in the virtual memory space
1330 rte_eal_hugepage_attach(void)
1332 const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1333 const struct hugepage_file *hp = NULL;
1334 unsigned num_hp = 0;
1335 unsigned i, s = 0; /* s used to track the segment number */
1337 int fd, fd_zero = -1, fd_hugepage = -1;
1339 if (aslr_enabled() > 0) {
1340 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1341 "(ASLR) is enabled in the kernel.\n");
1342 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
1343 "into secondary processes\n");
1346 if (internal_config.xen_dom0_support) {
1347 #ifdef RTE_LIBRTE_XEN_DOM0
1348 if (rte_xen_dom0_memory_attach() < 0) {
1349 RTE_LOG(ERR, EAL,"Failed to attach memory setments of primay "
1357 fd_zero = open("/dev/zero", O_RDONLY);
1359 RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
1362 fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
1363 if (fd_hugepage < 0) {
1364 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
1368 /* map all segments into memory to make sure we get the addrs */
1369 for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
1373 * the first memory segment with len==0 is the one that
1374 * follows the last valid segment.
1376 if (mcfg->memseg[s].len == 0)
1379 #ifdef RTE_LIBRTE_IVSHMEM
1381 * if segment has ioremap address set, it's an IVSHMEM segment and
1382 * doesn't need mapping as it was already mapped earlier
1384 if (mcfg->memseg[s].ioremap_addr != 0)
1389 * fdzero is mmapped to get a contiguous block of virtual
1390 * addresses of the appropriate memseg size.
1391 * use mmap to get identical addresses as the primary process.
1393 base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
1394 PROT_READ, MAP_PRIVATE, fd_zero, 0);
1395 if (base_addr == MAP_FAILED ||
1396 base_addr != mcfg->memseg[s].addr) {
1397 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
1398 "in /dev/zero to requested address [%p]: '%s'\n",
1399 (unsigned long long)mcfg->memseg[s].len,
1400 mcfg->memseg[s].addr, strerror(errno));
1401 if (aslr_enabled() > 0) {
1402 RTE_LOG(ERR, EAL, "It is recommended to "
1403 "disable ASLR in the kernel "
1404 "and retry running both primary "
1405 "and secondary processes\n");
1411 size = getFileSize(fd_hugepage);
1412 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1414 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
1418 num_hp = size / sizeof(struct hugepage_file);
1419 RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1422 while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
1423 void *addr, *base_addr;
1424 uintptr_t offset = 0;
1425 size_t mapping_size;
1426 #ifdef RTE_LIBRTE_IVSHMEM
1428 * if segment has ioremap address set, it's an IVSHMEM segment and
1429 * doesn't need mapping as it was already mapped earlier
1431 if (mcfg->memseg[s].ioremap_addr != 0) {
1437 * free previously mapped memory so we can map the
1438 * hugepages into the space
1440 base_addr = mcfg->memseg[s].addr;
1441 munmap(base_addr, mcfg->memseg[s].len);
1443 /* find the hugepages for this segment and map them
1444 * we don't need to worry about order, as the server sorted the
1445 * entries before it did the second mmap of them */
1446 for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
1447 if (hp[i].memseg_id == (int)s){
1448 fd = open(hp[i].filepath, O_RDWR);
1450 RTE_LOG(ERR, EAL, "Could not open %s\n",
1454 #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS
1455 mapping_size = hp[i].size * hp[i].repeated;
1457 mapping_size = hp[i].size;
1459 addr = mmap(RTE_PTR_ADD(base_addr, offset),
1460 mapping_size, PROT_READ | PROT_WRITE,
1462 close(fd); /* close file both on success and on failure */
1463 if (addr == MAP_FAILED ||
1464 addr != RTE_PTR_ADD(base_addr, offset)) {
1465 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1469 offset+=mapping_size;
1472 RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
1473 (unsigned long long)mcfg->memseg[s].len);
1476 /* unmap the hugepage config file, since we are done using it */
1477 munmap((void *)(uintptr_t)hp, size);
1485 if (fd_hugepage >= 0)
1491 rte_eal_memdevice_init(void)
1493 struct rte_config *config;
1495 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1498 config = rte_eal_get_configuration();
1499 config->mem_config->nchannel = internal_config.force_nchannel;
1500 config->mem_config->nrank = internal_config.force_nrank;
1506 /* init memory subsystem */
1508 rte_eal_memory_init(void)
1510 RTE_LOG(INFO, EAL, "Setting up memory...\n");
1511 const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
1512 rte_eal_hugepage_init() :
1513 rte_eal_hugepage_attach();
1517 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)