4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 #include <sys/types.h>
46 #include <sys/queue.h>
51 #include <sys/ioctl.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_launch.h>
57 #include <rte_tailq.h>
59 #include <rte_per_lcore.h>
60 #include <rte_lcore.h>
61 #include <rte_common.h>
62 #include <rte_string_fns.h>
64 #include "eal_private.h"
65 #include "eal_internal_cfg.h"
66 #include "eal_filesystem.h"
67 #include "eal_hugepages.h"
71 * Huge page mapping under linux
73 * To reserve a big contiguous amount of memory, we use the hugepage
74 * feature of linux. For that, we need to have hugetlbfs mounted. This
75 * code will create many files in this directory (one per page) and
76 * map them in virtual memory. For each page, we will retrieve its
77 * physical address and remap it in order to have a virtual contiguous
78 * zone as well as a physical contiguous zone.
82 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
85 * Check whether address-space layout randomization is enabled in
86 * the kernel. This is important for multi-process as it can prevent
87 * two processes mapping data to the same virtual address
89 * 0 - address space randomization disabled
90 * 1/2 - address space randomization enabled
91 * negative error code on error
97 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
100 retval = read(fd, &c, 1);
110 default: return -EINVAL;
115 * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
116 * pointer to the mmap'd area and keep *size unmodified. Else, retry
117 * with a smaller zone: decrease *size by hugepage_sz until it reaches
118 * 0. In this case, return NULL. Note: this function returns an address
119 * which is a multiple of hugepage size.
122 get_virtual_area(uint64_t *size, uint64_t hugepage_sz)
128 RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%"PRIx64" bytes\n", *size);
130 fd = open("/dev/zero", O_RDONLY);
132 RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
136 addr = mmap(NULL, (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
137 if (addr == MAP_FAILED)
138 *size -= hugepage_sz;
139 } while (addr == MAP_FAILED && *size > 0);
141 if (addr == MAP_FAILED) {
143 RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
147 munmap(addr, (*size) + hugepage_sz);
150 /* align addr to a huge page size boundary */
151 aligned_addr = (long)addr;
152 aligned_addr += (hugepage_sz - 1);
153 aligned_addr &= (~(hugepage_sz - 1));
154 addr = (void *)(aligned_addr);
156 RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%"PRIx64")\n",
163 * Mmap all hugepages of hugepage table: it first open a file in
164 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
165 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
166 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
167 * map continguous physical blocks in contiguous virtual blocks.
170 map_all_hugepages(struct hugepage *hugepg_tbl,
171 struct hugepage_info *hpi, int orig)
176 void *vma_addr = NULL;
177 uint64_t vma_len = 0;
179 for (i = 0; i < hpi->num_pages; i++) {
180 uint64_t hugepage_sz = hpi->hugepage_sz;
183 hugepg_tbl[i].file_id = i;
184 hugepg_tbl[i].size = hugepage_sz;
185 eal_get_hugefile_path(hugepg_tbl[i].filepath,
186 sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
187 hugepg_tbl[i].file_id);
188 hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
190 #ifndef RTE_ARCH_X86_64
191 /* for 32-bit systems, don't remap 1G pages, just reuse original
192 * map address as final map address.
194 else if (hugepage_sz == RTE_PGSIZE_1G){
195 hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
196 hugepg_tbl[i].orig_va = NULL;
200 else if (vma_len == 0) {
201 unsigned j, num_pages;
203 /* reserve a virtual area for next contiguous
204 * physical block: count the number of
205 * contiguous physical pages. */
206 for (j = i+1; j < hpi->num_pages ; j++) {
207 if (hugepg_tbl[j].physaddr !=
208 hugepg_tbl[j-1].physaddr + hugepage_sz)
212 vma_len = num_pages * hugepage_sz;
214 /* get the biggest virtual memory area up to
215 * vma_len. If it fails, vma_addr is NULL, so
216 * let the kernel provide the address. */
217 vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
218 if (vma_addr == NULL)
219 vma_len = hugepage_sz;
222 fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
224 RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__,
229 virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
231 if (virtaddr == MAP_FAILED) {
232 RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__,
239 hugepg_tbl[i].orig_va = virtaddr;
240 memset(virtaddr, 0, hugepage_sz);
243 hugepg_tbl[i].final_va = virtaddr;
246 vma_addr = (char *)vma_addr + hugepage_sz;
247 vma_len -= hugepage_sz;
253 /* Unmap all hugepages from original mapping. */
255 unmap_all_hugepages_orig(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
258 for (i = 0; i < hpi->num_pages; i++) {
259 if (hugepg_tbl[i].orig_va) {
260 munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
261 hugepg_tbl[i].orig_va = NULL;
268 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
269 * it by browsing the /proc/self/pagemap special file.
272 find_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
277 unsigned long virt_pfn;
280 /* standard page size */
281 page_size = getpagesize();
283 fd = open("/proc/self/pagemap", O_RDONLY);
285 RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
286 __func__, strerror(errno));
290 for (i = 0; i < hpi->num_pages; i++) {
292 virt_pfn = (unsigned long)hugepg_tbl[i].orig_va /
294 offset = sizeof(uint64_t) * virt_pfn;
295 if (lseek(fd, offset, SEEK_SET) != offset){
296 RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
297 __func__, strerror(errno));
301 if (read(fd, &page, sizeof(uint64_t)) < 0) {
302 RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
303 __func__, strerror(errno));
309 * the pfn (page frame number) are bits 0-54 (see
310 * pagemap.txt in linux Documentation)
312 hugepg_tbl[i].physaddr = ((page & 0x7fffffffffffffULL) * page_size);
319 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
323 find_numasocket(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
327 unsigned i, hp_count = 0;
330 char hugedir_str[PATH_MAX];
333 f = fopen("/proc/self/numa_maps", "r");
335 RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps,"
336 " consider that all memory is in socket_id 0\n");
340 rte_snprintf(hugedir_str, sizeof(hugedir_str),
341 "%s/", hpi->hugedir);
344 while (fgets(buf, sizeof(buf), f) != NULL) {
346 /* ignore non huge page */
347 if (strstr(buf, " huge ") == NULL &&
348 strstr(buf, hugedir_str) == NULL)
352 virt_addr = strtoull(buf, &end, 16);
353 if (virt_addr == 0 || end == buf) {
354 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
358 /* get node id (socket id) */
359 nodestr = strstr(buf, " N");
360 if (nodestr == NULL) {
361 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
365 end = strstr(nodestr, "=");
367 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
373 socket_id = strtoul(nodestr, &end, 0);
374 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
375 RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
379 /* if we find this page in our mappings, set socket_id */
380 for (i = 0; i < hpi->num_pages; i++) {
381 void *va = (void *)(unsigned long)virt_addr;
382 if (hugepg_tbl[i].orig_va == va) {
383 hugepg_tbl[i].socket_id = socket_id;
388 if (hp_count < hpi->num_pages)
399 * Sort the hugepg_tbl by physical address (lower addresses first). We
400 * use a slow algorithm, but we won't have millions of pages, and this
401 * is only done at init time.
404 sort_by_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
408 uint64_t smallest_addr;
411 for (i = 0; i < hpi->num_pages; i++) {
416 * browse all entries starting at 'i', and find the
417 * entry with the smallest addr
419 for (j=i; j<hpi->num_pages; j++) {
421 if (smallest_addr == 0 ||
422 hugepg_tbl[j].physaddr < smallest_addr) {
423 smallest_addr = hugepg_tbl[j].physaddr;
428 /* should not happen */
429 if (smallest_idx == -1) {
430 RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__);
434 /* swap the 2 entries in the table */
435 memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage));
436 memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i],
437 sizeof(struct hugepage));
438 memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage));
444 * Uses mmap to create a shared memory area for storage of data
445 * Used in this file to store the hugepage file map on disk
448 create_shared_memory(const char *filename, const size_t mem_size)
451 int fd = open(filename, O_CREAT | O_RDWR, 0666);
454 if (ftruncate(fd, mem_size) < 0) {
458 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
464 * This function takes in the list of hugepage sizes and the
465 * number of pages thereof, and calculates the best number of
466 * pages of each size to fulfill the request for <memory> ram
469 calc_num_pages(uint64_t memory,
470 struct hugepage_info *hp_info,
471 struct hugepage_info *hp_used,
472 unsigned num_hp_info)
475 int total_num_pages = 0;
476 if (num_hp_info == 0)
479 for (i = 0; i < num_hp_info; i++){
480 hp_used[i].hugepage_sz = hp_info[i].hugepage_sz;
481 hp_used[i].hugedir = hp_info[i].hugedir;
482 hp_used[i].num_pages = RTE_MIN(memory / hp_info[i].hugepage_sz,
483 hp_info[i].num_pages);
485 memory -= hp_used[i].num_pages * hp_used[i].hugepage_sz;
486 total_num_pages += hp_used[i].num_pages;
488 /* check if we have met all memory requests */
491 /* check if we have any more pages left at this size, if so
492 * move on to next size */
493 if (hp_used[i].num_pages == hp_info[i].num_pages)
495 /* At this point we know that there are more pages available that are
496 * bigger than the memory we want, so lets see if we can get enough
497 * from other page sizes.
500 uint64_t remaining_mem = 0;
501 for (j = i+1; j < num_hp_info; j++)
502 remaining_mem += hp_info[j].hugepage_sz * hp_info[j].num_pages;
504 /* is there enough other memory, if not allocate another page and quit*/
505 if (remaining_mem < memory){
506 memory -= hp_info[i].hugepage_sz;
507 hp_used[i].num_pages++;
509 break; /* we are done */
512 return total_num_pages;
516 * Prepare physical memory mapping: fill configuration structure with
517 * these infos, return 0 on success.
518 * 1. map N huge pages in separate files in hugetlbfs
519 * 2. find associated physical addr
520 * 3. find associated NUMA socket ID
521 * 4. sort all huge pages by physical address
522 * 5. remap these N huge pages in the correct order
523 * 6. unmap the first mapping
524 * 7. fill memsegs in configuration with contiguous zones
527 rte_eal_hugepage_init(void)
529 struct rte_mem_config *mcfg;
530 struct hugepage *hugepage;
531 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
532 int i, j, new_memseg;
536 memset(used_hp, 0, sizeof(used_hp));
538 /* get pointer to global configuration */
539 mcfg = rte_eal_get_configuration()->mem_config;
541 /* for debug purposes, hugetlbfs can be disabled */
542 if (internal_config.no_hugetlbfs) {
543 addr = malloc(internal_config.memory);
544 mcfg->memseg[0].phys_addr = (unsigned long)addr;
545 mcfg->memseg[0].addr = addr;
546 mcfg->memseg[0].len = internal_config.memory;
547 mcfg->memseg[0].socket_id = 0;
551 nrpages = calc_num_pages(internal_config.memory,
552 &internal_config.hugepage_info[0], &used_hp[0],
553 internal_config.num_hugepage_sizes);
554 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i++)
555 RTE_LOG(INFO, EAL, "Requesting %u pages of size %"PRIu64"\n",
556 used_hp[i].num_pages, used_hp[i].hugepage_sz);
558 hugepage = create_shared_memory(eal_hugepage_info_path(),
559 nrpages * sizeof(struct hugepage));
560 if (hugepage == NULL)
562 memset(hugepage, 0, nrpages * sizeof(struct hugepage));
564 unsigned hp_offset = 0; /* where we start the current page size entries */
565 for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
566 struct hugepage_info *hpi = &used_hp[i];
567 if (hpi->num_pages == 0)
570 if (map_all_hugepages(&hugepage[hp_offset], hpi, 1) < 0){
571 RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
572 (unsigned)(hpi->hugepage_sz / 0x100000));
576 if (find_physaddr(&hugepage[hp_offset], hpi) < 0){
577 RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
578 (unsigned)(hpi->hugepage_sz / 0x100000));
582 if (find_numasocket(&hugepage[hp_offset], hpi) < 0){
583 RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
584 (unsigned)(hpi->hugepage_sz / 0x100000));
588 if (sort_by_physaddr(&hugepage[hp_offset], hpi) < 0)
591 if (map_all_hugepages(&hugepage[hp_offset], hpi, 0) < 0){
592 RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
593 (unsigned)(hpi->hugepage_sz / 0x100000));
597 if (unmap_all_hugepages_orig(&hugepage[hp_offset], hpi) < 0)
600 /* we have processed a num of hugepages of this size, so inc offset */
601 hp_offset += hpi->num_pages;
604 memset(mcfg->memseg, 0, sizeof(mcfg->memseg));
606 for (i = 0; i < nrpages; i++) {
609 /* if this is a new section, create a new memseg */
612 else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
614 else if (hugepage[i].size != hugepage[i-1].size)
616 else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
619 else if (((unsigned long)hugepage[i].final_va -
620 (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
625 if (j == RTE_MAX_MEMSEG)
628 mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
629 mcfg->memseg[j].addr = hugepage[i].final_va;
630 mcfg->memseg[j].len = hugepage[i].size;
631 mcfg->memseg[j].socket_id = hugepage[i].socket_id;
632 mcfg->memseg[j].hugepage_sz = hugepage[i].size;
634 /* continuation of previous memseg */
636 mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
638 hugepage[i].memseg_id = j;
649 * uses fstat to report the size of a file on disk
655 if (fstat(fd, &st) < 0)
661 * This creates the memory mappings in the secondary process to match that of
662 * the server process. It goes through each memory segment in the DPDK runtime
663 * configuration and finds the hugepages which form that segment, mapping them
664 * in order to form a contiguous block in the virtual memory space
667 rte_eal_hugepage_attach(void)
669 const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
670 const struct hugepage *hp = NULL;
672 unsigned i, s = 0; /* s used to track the segment number */
674 int fd, fd_zero = -1, fd_hugepage = -1;
676 if (aslr_enabled() > 0) {
677 RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
678 "(ASLR) is enabled in the kernel.\n");
679 RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory "
680 "into secondary processes\n");
683 fd_zero = open("/dev/zero", O_RDONLY);
685 RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
688 fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
689 if (fd_hugepage < 0) {
690 RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
694 size = getFileSize(fd_hugepage);
695 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
697 RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
701 num_hp = size / sizeof(struct hugepage);
702 RTE_LOG(DEBUG, EAL, "Analysing %u hugepages\n", num_hp);
704 while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
705 void *addr, *base_addr;
706 uintptr_t offset = 0;
708 /* fdzero is mmapped to get a contiguous block of virtual addresses
709 * get a block of free memory of the appropriate size -
710 * use mmap to attempt to get an identical address as server.
712 base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
713 PROT_READ, MAP_PRIVATE, fd_zero, 0);
714 if (base_addr == MAP_FAILED || base_addr != mcfg->memseg[s].addr) {
715 RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
716 "in /dev/zero to requested address [%p]\n",
717 (unsigned long long)mcfg->memseg[s].len,
718 mcfg->memseg[s].addr);
719 if (aslr_enabled() > 0)
720 RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel "
721 "and retry running both primary and secondary processes\n");
724 /* free memory so we can map the hugepages into the space */
725 munmap(base_addr, mcfg->memseg[s].len);
727 /* find the hugepages for this segment and map them
728 * we don't need to worry about order, as the server sorted the
729 * entries before it did the second mmap of them */
730 for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
731 if (hp[i].memseg_id == (int)s){
732 fd = open(hp[i].filepath, O_RDWR);
734 RTE_LOG(ERR, EAL, "Could not open %s\n",
738 addr = mmap(RTE_PTR_ADD(base_addr, offset),
739 hp[i].size, PROT_READ | PROT_WRITE,
740 MAP_SHARED | MAP_FIXED, fd, 0);
741 close(fd); /* close file both on success and on failure */
742 if (addr == MAP_FAILED) {
743 RTE_LOG(ERR, EAL, "Could not mmap %s\n",
750 RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
751 (unsigned long long)mcfg->memseg[s].len);
761 if (fd_hugepage >= 0)
767 rte_eal_memdevice_init(void)
769 struct rte_config *config;
771 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
774 config = rte_eal_get_configuration();
775 config->mem_config->nchannel = internal_config.force_nchannel;
776 config->mem_config->nrank = internal_config.force_nrank;
782 /* init memory subsystem */
784 rte_eal_memory_init(void)
786 const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
787 rte_eal_hugepage_init() :
788 rte_eal_hugepage_attach();
792 if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)