X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Feal%2Feal_memory.c;h=e6cb919bcd71b2069bbdea19f7c07a7bccdf003b;hb=8436acd6ba8c37c560171923dca2407c56438c7a;hp=4bd8987cab580ecd7b0f2989b94bb50da65f82bd;hpb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index 4bd8987cab..e6cb919bcd 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -61,6 +61,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#define _FILE_OFFSET_BITS 64 #include #include #include @@ -109,9 +110,86 @@ * zone as well as a physical contiguous zone. */ +static uint64_t baseaddr_offset; #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space" +/* Lock page in physical memory and prevent from swapping. */ +int +rte_mem_lock_page(const void *virt) +{ + unsigned long virtual = (unsigned long)virt; + int page_size = getpagesize(); + unsigned long aligned = (virtual & ~ (page_size - 1)); + return mlock((void*)aligned, page_size); +} + +/* + * Get physical address of any mapped virtual address in the current process. + */ +phys_addr_t +rte_mem_virt2phy(const void *virtaddr) +{ + int fd; + uint64_t page, physaddr; + unsigned long virt_pfn; + int page_size; + off_t offset; + + /* standard page size */ + page_size = getpagesize(); + + fd = open("/proc/self/pagemap", O_RDONLY); + if (fd < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n", + __func__, strerror(errno)); + return RTE_BAD_PHYS_ADDR; + } + + virt_pfn = (unsigned long)virtaddr / page_size; + offset = sizeof(uint64_t) * virt_pfn; + if (lseek(fd, offset, SEEK_SET) == (off_t) -1) { + RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n", + __func__, strerror(errno)); + close(fd); + return RTE_BAD_PHYS_ADDR; + } + if (read(fd, &page, sizeof(uint64_t)) < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n", + __func__, strerror(errno)); + close(fd); + return RTE_BAD_PHYS_ADDR; + } + + /* + * the pfn (page frame number) are bits 0-54 (see + * pagemap.txt in linux Documentation) + */ + physaddr = ((page & 0x7fffffffffffffULL) * page_size) + + ((unsigned long)virtaddr % page_size); + close(fd); + return physaddr; +} + +/* + * For each hugepage in hugepg_tbl, fill the physaddr value. We find + * it by browsing the /proc/self/pagemap special file. + */ +static int +find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) +{ + unsigned i; + phys_addr_t addr; + + for (i = 0; i < hpi->num_pages[0]; i++) { + addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va); + if (addr == RTE_BAD_PHYS_ADDR) + return -1; + hugepg_tbl[i].physaddr = addr; + } + return 0; +} + /* * Check whether address-space layout randomization is enabled in * the kernel. This is important for multi-process as it can prevent @@ -143,7 +221,7 @@ aslr_enabled(void) } /* - * Try to mmap *size bytes in /dev/zero. If it is succesful, return the + * Try to mmap *size bytes in /dev/zero. If it is successful, return the * pointer to the mmap'd area and keep *size unmodified. Else, retry * with a smaller zone: decrease *size by hugepage_sz until it reaches * 0. In this case, return NULL. Note: this function returns an address @@ -156,7 +234,13 @@ get_virtual_area(size_t *size, size_t hugepage_sz) int fd; long aligned_addr; - RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zu bytes\n", *size); + if (internal_config.base_virtaddr != 0) { + addr = (void*) (uintptr_t) (internal_config.base_virtaddr + + baseaddr_offset); + } + else addr = NULL; + + RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%zx bytes\n", *size); fd = open("/dev/zero", O_RDONLY); if (fd < 0){ @@ -164,7 +248,8 @@ get_virtual_area(size_t *size, size_t hugepage_sz) return NULL; } do { - addr = mmap(NULL, (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0); + addr = mmap(addr, + (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0); if (addr == MAP_FAILED) *size -= hugepage_sz; } while (addr == MAP_FAILED && *size > 0); @@ -187,6 +272,9 @@ get_virtual_area(size_t *size, size_t hugepage_sz) RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%zx)\n", addr, *size); + /* increment offset */ + baseaddr_offset += *size; + return addr; } @@ -198,7 +286,7 @@ get_virtual_area(size_t *size, size_t hugepage_sz) * map continguous physical blocks in contiguous virtual blocks. */ static int -map_all_hugepages(struct hugepage *hugepg_tbl, +map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi, int orig) { int fd; @@ -207,27 +295,40 @@ map_all_hugepages(struct hugepage *hugepg_tbl, void *vma_addr = NULL; size_t vma_len = 0; +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + RTE_SET_USED(vma_len); +#endif + for (i = 0; i < hpi->num_pages[0]; i++) { size_t hugepage_sz = hpi->hugepage_sz; if (orig) { hugepg_tbl[i].file_id = i; hugepg_tbl[i].size = hugepage_sz; +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + eal_get_hugefile_temp_path(hugepg_tbl[i].filepath, + sizeof(hugepg_tbl[i].filepath), hpi->hugedir, + hugepg_tbl[i].file_id); +#else eal_get_hugefile_path(hugepg_tbl[i].filepath, sizeof(hugepg_tbl[i].filepath), hpi->hugedir, hugepg_tbl[i].file_id); +#endif hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0'; } -#ifndef RTE_ARCH_X86_64 - /* for 32-bit systems, don't remap 1G pages, just reuse original - * map address as final map address. +#ifndef RTE_ARCH_64 + /* for 32-bit systems, don't remap 1G and 16G pages, just reuse + * original map address as final map address. */ - else if (hugepage_sz == RTE_PGSIZE_1G){ + else if ((hugepage_sz == RTE_PGSIZE_1G) + || (hugepage_sz == RTE_PGSIZE_16G)) { hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va; hugepg_tbl[i].orig_va = NULL; continue; } #endif + +#ifndef RTE_EAL_SINGLE_FILE_SEGMENTS else if (vma_len == 0) { unsigned j, num_pages; @@ -235,9 +336,17 @@ map_all_hugepages(struct hugepage *hugepg_tbl, * physical block: count the number of * contiguous physical pages. */ for (j = i+1; j < hpi->num_pages[0] ; j++) { +#ifdef RTE_ARCH_PPC_64 + /* The physical addresses are sorted in + * descending order on PPC64 */ + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr - hugepage_sz) + break; +#else if (hugepg_tbl[j].physaddr != hugepg_tbl[j-1].physaddr + hugepage_sz) break; +#endif } num_pages = j - i; vma_len = num_pages * hugepage_sz; @@ -249,6 +358,7 @@ map_all_hugepages(struct hugepage *hugepg_tbl, if (vma_addr == NULL) vma_len = hugepage_sz; } +#endif /* try to create hugepage file */ fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755); @@ -291,77 +401,199 @@ map_all_hugepages(struct hugepage *hugepg_tbl, return 0; } -/* Unmap all hugepages from original mapping. */ -static int -unmap_all_hugepages_orig(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) -{ - unsigned i; - for (i = 0; i < hpi->num_pages[0]; i++) { - if (hugepg_tbl[i].orig_va) { - munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz); - hugepg_tbl[i].orig_va = NULL; - } - } - return 0; -} +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS /* - * For each hugepage in hugepg_tbl, fill the physaddr value. We find - * it by browsing the /proc/self/pagemap special file. + * Remaps all hugepages into single file segments */ static int -find_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) { int fd; - unsigned i; - uint64_t page; - unsigned long virt_pfn; - int page_size; + unsigned i = 0, j, num_pages, page_idx = 0; + void *vma_addr = NULL, *old_addr = NULL, *page_addr = NULL; + size_t vma_len = 0; + size_t hugepage_sz = hpi->hugepage_sz; + size_t total_size, offset; + char filepath[MAX_HUGEPAGE_PATH]; + phys_addr_t physaddr; + int socket; - /* standard page size */ - page_size = getpagesize(); + while (i < hpi->num_pages[0]) { - fd = open("/proc/self/pagemap", O_RDONLY); - if (fd < 0) { - RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n", - __func__, strerror(errno)); - return -1; - } +#ifndef RTE_ARCH_64 + /* for 32-bit systems, don't remap 1G pages and 16G pages, + * just reuse original map address as final map address. + */ + if ((hugepage_sz == RTE_PGSIZE_1G) + || (hugepage_sz == RTE_PGSIZE_16G)) { + hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va; + hugepg_tbl[i].orig_va = NULL; + i++; + continue; + } +#endif - for (i = 0; i < hpi->num_pages[0]; i++) { - off_t offset; - virt_pfn = (unsigned long)hugepg_tbl[i].orig_va / - page_size; - offset = sizeof(uint64_t) * virt_pfn; - if (lseek(fd, offset, SEEK_SET) == (off_t) -1) { - RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n", - __func__, strerror(errno)); - close(fd); + /* reserve a virtual area for next contiguous + * physical block: count the number of + * contiguous physical pages. */ + for (j = i+1; j < hpi->num_pages[0] ; j++) { +#ifdef RTE_ARCH_PPC_64 + /* The physical addresses are sorted in descending + * order on PPC64 */ + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr - hugepage_sz) + break; +#else + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr + hugepage_sz) + break; +#endif + } + num_pages = j - i; + vma_len = num_pages * hugepage_sz; + + socket = hugepg_tbl[i].socket_id; + + /* get the biggest virtual memory area up to + * vma_len. If it fails, vma_addr is NULL, so + * let the kernel provide the address. */ + vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz); + + /* If we can't find a big enough virtual area, work out how many pages + * we are going to get */ + if (vma_addr == NULL) + j = i + 1; + else if (vma_len != num_pages * hugepage_sz) { + num_pages = vma_len / hugepage_sz; + j = i + num_pages; + + } + + hugepg_tbl[page_idx].file_id = page_idx; + eal_get_hugefile_path(filepath, + sizeof(filepath), + hpi->hugedir, + hugepg_tbl[page_idx].file_id); + + /* try to create hugepage file */ + fd = open(filepath, O_CREAT | O_RDWR, 0755); + if (fd < 0) { + RTE_LOG(ERR, EAL, "%s(): open failed: %s\n", __func__, strerror(errno)); return -1; } - if (read(fd, &page, sizeof(uint64_t)) < 0) { - RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n", - __func__, strerror(errno)); + + total_size = 0; + for (;i < j; i++) { + + /* unmap current segment */ + if (total_size > 0) + munmap(vma_addr, total_size); + + /* unmap original page */ + munmap(hugepg_tbl[i].orig_va, hugepage_sz); + unlink(hugepg_tbl[i].filepath); + + total_size += hugepage_sz; + + old_addr = vma_addr; + + /* map new, bigger segment */ + vma_addr = mmap(vma_addr, total_size, + PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + + if (vma_addr == MAP_FAILED || vma_addr != old_addr) { + RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno)); + close(fd); + return -1; + } + + /* touch the page. this is needed because kernel postpones mapping + * creation until the first page fault. with this, we pin down + * the page and it is marked as used and gets into process' pagemap. + */ + for (offset = 0; offset < total_size; offset += hugepage_sz) + *((volatile uint8_t*) RTE_PTR_ADD(vma_addr, offset)); + } + + /* set shared flock on the file. */ + if (flock(fd, LOCK_SH | LOCK_NB) == -1) { + RTE_LOG(ERR, EAL, "%s(): Locking file failed:%s \n", + __func__, strerror(errno)); close(fd); return -1; } - /* - * the pfn (page frame number) are bits 0-54 (see - * pagemap.txt in linux Documentation) + snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s", + filepath); + + physaddr = rte_mem_virt2phy(vma_addr); + + if (physaddr == RTE_BAD_PHYS_ADDR) + return -1; + + hugepg_tbl[page_idx].final_va = vma_addr; + + hugepg_tbl[page_idx].physaddr = physaddr; + + hugepg_tbl[page_idx].repeated = num_pages; + + hugepg_tbl[page_idx].socket_id = socket; + + close(fd); + + /* verify the memory segment - that is, check that every VA corresponds + * to the physical address we expect to see */ - hugepg_tbl[i].physaddr = ((page & 0x7fffffffffffffULL) * page_size); + for (offset = 0; offset < vma_len; offset += hugepage_sz) { + uint64_t expected_physaddr; + + expected_physaddr = hugepg_tbl[page_idx].physaddr + offset; + page_addr = RTE_PTR_ADD(vma_addr, offset); + physaddr = rte_mem_virt2phy(page_addr); + + if (physaddr != expected_physaddr) { + RTE_LOG(ERR, EAL, "Segment sanity check failed: wrong physaddr " + "at %p (offset 0x%" PRIx64 ": 0x%" PRIx64 + " (expected 0x%" PRIx64 ")\n", + page_addr, offset, physaddr, expected_physaddr); + return -1; + } + } + + /* zero out the whole segment */ + memset(hugepg_tbl[page_idx].final_va, 0, total_size); + + page_idx++; } - close(fd); - return 0; + + /* zero out the rest */ + memset(&hugepg_tbl[page_idx], 0, (hpi->num_pages[0] - page_idx) * sizeof(struct hugepage_file)); + return page_idx; } +#else/* RTE_EAL_SINGLE_FILE_SEGMENTS=n */ + +/* Unmap all hugepages from original mapping */ +static int +unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) +{ + unsigned i; + for (i = 0; i < hpi->num_pages[0]; i++) { + if (hugepg_tbl[i].orig_va) { + munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz); + hugepg_tbl[i].orig_va = NULL; + } + } + return 0; +} +#endif /* RTE_EAL_SINGLE_FILE_SEGMENTS */ /* * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge * page. */ static int -find_numasocket(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) { int socket_id; char *end, *nodestr; @@ -378,7 +610,7 @@ find_numasocket(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) return 0; } - rte_snprintf(hugedir_str, sizeof(hugedir_str), + snprintf(hugedir_str, sizeof(hugedir_str), "%s/", hpi->hugedir); /* parse numa map */ @@ -439,21 +671,21 @@ error: } /* - * Sort the hugepg_tbl by physical address (lower addresses first). We - * use a slow algorithm, but we won't have millions of pages, and this - * is only done at init time. + * Sort the hugepg_tbl by physical address (lower addresses first on x86, + * higher address first on powerpc). We use a slow algorithm, but we won't + * have millions of pages, and this is only done at init time. */ static int -sort_by_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) { unsigned i, j; - int smallest_idx; - uint64_t smallest_addr; - struct hugepage tmp; + int compare_idx; + uint64_t compare_addr; + struct hugepage_file tmp; for (i = 0; i < hpi->num_pages[0]; i++) { - smallest_addr = 0; - smallest_idx = -1; + compare_addr = 0; + compare_idx = -1; /* * browse all entries starting at 'i', and find the @@ -461,24 +693,29 @@ sort_by_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) */ for (j=i; j< hpi->num_pages[0]; j++) { - if (smallest_addr == 0 || - hugepg_tbl[j].physaddr < smallest_addr) { - smallest_addr = hugepg_tbl[j].physaddr; - smallest_idx = j; + if (compare_addr == 0 || +#ifdef RTE_ARCH_PPC_64 + hugepg_tbl[j].physaddr > compare_addr) { +#else + hugepg_tbl[j].physaddr < compare_addr) { +#endif + compare_addr = hugepg_tbl[j].physaddr; + compare_idx = j; } } /* should not happen */ - if (smallest_idx == -1) { + if (compare_idx == -1) { RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__); return -1; } /* swap the 2 entries in the table */ - memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage)); - memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i], - sizeof(struct hugepage)); - memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage)); + memcpy(&tmp, &hugepg_tbl[compare_idx], + sizeof(struct hugepage_file)); + memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i], + sizeof(struct hugepage_file)); + memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file)); } return 0; } @@ -508,8 +745,8 @@ create_shared_memory(const char *filename, const size_t mem_size) * destination is typically the shared memory. */ static int -copy_hugepages_to_shared_mem(struct hugepage * dst, int dest_size, - const struct hugepage * src, int src_size) +copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size, + const struct hugepage_file * src, int src_size) { int src_pos, dst_pos = 0; @@ -518,7 +755,7 @@ copy_hugepages_to_shared_mem(struct hugepage * dst, int dest_size, /* error on overflow attempt */ if (dst_pos == dest_size) return -1; - memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage)); + memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file)); dst_pos++; } } @@ -530,7 +767,7 @@ copy_hugepages_to_shared_mem(struct hugepage * dst, int dest_size, * ALL hugepages (not just those we need), additional unmapping needs to be done. */ static int -unmap_unneeded_hugepages(struct hugepage *hugepg_tbl, +unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi, unsigned num_hp_info) { @@ -545,9 +782,16 @@ unmap_unneeded_hugepages(struct hugepage *hugepg_tbl, for (size = 0; size < num_hp_info; size++) { for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) { unsigned pages_found = 0; + /* traverse until we have unmapped all the unused pages */ for (page = 0; page < nrpages; page++) { - struct hugepage *hp = &hugepg_tbl[page]; + struct hugepage_file *hp = &hugepg_tbl[page]; + +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + /* if this page was already cleared */ + if (hp->final_va == NULL) + continue; +#endif /* find a page that matches the criteria */ if ((hp->size == hpi[size].hugepage_sz) && @@ -555,17 +799,67 @@ unmap_unneeded_hugepages(struct hugepage *hugepg_tbl, /* if we skipped enough pages, unmap the rest */ if (pages_found == hpi[size].num_pages[socket]) { - munmap(hp->final_va, hp->size); + uint64_t unmap_len; + +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + unmap_len = hp->size * hp->repeated; +#else + unmap_len = hp->size; +#endif + + /* get start addr and len of the remaining segment */ + munmap(hp->final_va, (size_t) unmap_len); + hp->final_va = NULL; - if (remove(hp->filepath) == -1) { + if (unlink(hp->filepath) == -1) { RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n", __func__, hp->filepath, strerror(errno)); return -1; } } - /* lock the page and skip */ +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + /* else, check how much do we need to map */ + else { + int nr_pg_left = + hpi[size].num_pages[socket] - pages_found; + + /* if we need enough memory to fit into the segment */ + if (hp->repeated <= nr_pg_left) { + pages_found += hp->repeated; + } + /* truncate the segment */ + else { + uint64_t final_size = nr_pg_left * hp->size; + uint64_t seg_size = hp->repeated * hp->size; + + void * unmap_va = RTE_PTR_ADD(hp->final_va, + final_size); + int fd; + + munmap(unmap_va, seg_size - final_size); + + fd = open(hp->filepath, O_RDWR); + if (fd < 0) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", + hp->filepath, strerror(errno)); + return -1; + } + if (ftruncate(fd, final_size) < 0) { + RTE_LOG(ERR, EAL, "Cannot truncate %s: %s\n", + hp->filepath, strerror(errno)); + return -1; + } + close(fd); + + pages_found += nr_pg_left; + hp->repeated = nr_pg_left; + } + } +#else + /* else, lock the page and skip */ else pages_found++; +#endif } /* match page */ } /* foreach page */ @@ -611,13 +905,53 @@ calc_num_pages_per_socket(uint64_t * memory, if (num_hp_info == 0) return -1; - for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) { - /* if specific memory amounts per socket weren't requested */ - if (internal_config.force_sockets == 0) { + /* if specific memory amounts per socket weren't requested */ + if (internal_config.force_sockets == 0) { + int cpu_per_socket[RTE_MAX_NUMA_NODES]; + size_t default_size, total_size; + unsigned lcore_id; + + /* Compute number of cores per socket */ + memset(cpu_per_socket, 0, sizeof(cpu_per_socket)); + RTE_LCORE_FOREACH(lcore_id) { + cpu_per_socket[rte_lcore_to_socket_id(lcore_id)]++; + } + + /* + * Automatically spread requested memory amongst detected sockets according + * to number of cores from cpu mask present on each socket + */ + total_size = internal_config.memory; + for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) { + + /* Set memory amount per socket */ + default_size = (internal_config.memory * cpu_per_socket[socket]) + / rte_lcore_count(); + + /* Limit to maximum available memory on socket */ + default_size = RTE_MIN(default_size, get_socket_mem_size(socket)); + + /* Update sizes */ + memory[socket] = default_size; + total_size -= default_size; + } + + /* + * If some memory is remaining, try to allocate it by getting all + * available memory from sockets, one after the other + */ + for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) { /* take whatever is available */ - memory[socket] = RTE_MIN(get_socket_mem_size(socket), - total_mem); + default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket], + total_size); + + /* Update sizes */ + memory[socket] += default_size; + total_size -= default_size; } + } + + for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) { /* skips if the memory on specific socket wasn't requested */ for (i = 0; i < num_hp_info && memory[socket] != 0; i++){ hp_used[i].hugedir = hp_info[i].hugedir; @@ -701,31 +1035,51 @@ static int rte_eal_hugepage_init(void) { struct rte_mem_config *mcfg; - struct hugepage *hugepage, *tmp_hp = NULL; + struct hugepage_file *hugepage, *tmp_hp = NULL; struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES]; uint64_t memory[RTE_MAX_NUMA_NODES]; unsigned hp_offset; int i, j, new_memseg; - int nrpages, total_pages = 0; + int nr_hugefiles, nr_hugepages = 0; void *addr; +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + int new_pages_count[MAX_HUGEPAGE_SIZES]; +#endif memset(used_hp, 0, sizeof(used_hp)); /* get pointer to global configuration */ mcfg = rte_eal_get_configuration()->mem_config; - /* for debug purposes, hugetlbfs can be disabled */ + /* hugetlbfs can be disabled */ if (internal_config.no_hugetlbfs) { - addr = malloc(internal_config.memory); + addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + if (addr == MAP_FAILED) { + RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__, + strerror(errno)); + return -1; + } mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr; mcfg->memseg[0].addr = addr; mcfg->memseg[0].len = internal_config.memory; - mcfg->memseg[0].socket_id = 0; + mcfg->memseg[0].socket_id = SOCKET_ID_ANY; return 0; } +/* check if app runs on Xen Dom0 */ + if (internal_config.xen_dom0_support) { +#ifdef RTE_LIBRTE_XEN_DOM0 + /* use dom0_mm kernel driver to init memory */ + if (rte_xen_dom0_memory_init() < 0) + return -1; + else + return 0; +#endif + } + /* calculate total number of hugepages available. at this point we haven't * yet started sorting them so they all are on socket 0 */ @@ -733,7 +1087,7 @@ rte_eal_hugepage_init(void) /* meanwhile, also initialize used_hp hugepage sizes in used_hp */ used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz; - total_pages += internal_config.hugepage_info[i].num_pages[0]; + nr_hugepages += internal_config.hugepage_info[i].num_pages[0]; } /* @@ -742,11 +1096,11 @@ rte_eal_hugepage_init(void) * processing done on these pages, shared memory will be created * at a later stage. */ - tmp_hp = malloc(total_pages * sizeof(struct hugepage)); + tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file)); if (tmp_hp == NULL) goto fail; - memset(tmp_hp, 0, total_pages * sizeof(struct hugepage)); + memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file)); hp_offset = 0; /* where we start the current page size entries */ @@ -761,7 +1115,7 @@ rte_eal_hugepage_init(void) */ hpi = &internal_config.hugepage_info[i]; - if (hpi->num_pages == 0) + if (hpi->num_pages[0] == 0) continue; /* map all hugepages available */ @@ -772,7 +1126,7 @@ rte_eal_hugepage_init(void) } /* find physical addresses and sockets for each hugepage */ - if (find_physaddr(&tmp_hp[hp_offset], hpi) < 0){ + if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){ RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n", (unsigned)(hpi->hugepage_sz / 0x100000)); goto fail; @@ -787,6 +1141,18 @@ rte_eal_hugepage_init(void) if (sort_by_physaddr(&tmp_hp[hp_offset], hpi) < 0) goto fail; +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + /* remap all hugepages into single file segments */ + new_pages_count[i] = remap_all_hugepages(&tmp_hp[hp_offset], hpi); + if (new_pages_count[i] < 0){ + RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n", + (unsigned)(hpi->hugepage_sz / 0x100000)); + goto fail; + } + + /* we have processed a num of hugepages of this size, so inc offset */ + hp_offset += new_pages_count[i]; +#else /* remap all hugepages */ if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) < 0){ RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n", @@ -800,7 +1166,18 @@ rte_eal_hugepage_init(void) /* we have processed a num of hugepages of this size, so inc offset */ hp_offset += hpi->num_pages[0]; +#endif + } + +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + nr_hugefiles = 0; + for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) { + nr_hugefiles += new_pages_count[i]; } +#else + nr_hugefiles = nr_hugepages; +#endif + /* clean out the numbers of pages */ for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) @@ -808,14 +1185,19 @@ rte_eal_hugepage_init(void) internal_config.hugepage_info[i].num_pages[j] = 0; /* get hugepages for each socket */ - for (i = 0; i < total_pages; i++) { + for (i = 0; i < nr_hugefiles; i++) { int socket = tmp_hp[i].socket_id; /* find a hugepage info with right size and increment num_pages */ for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) { if (tmp_hp[i].size == internal_config.hugepage_info[j].hugepage_sz) { +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + internal_config.hugepage_info[j].num_pages[socket] += + tmp_hp[i].repeated; +#else internal_config.hugepage_info[j].num_pages[socket]++; +#endif } } } @@ -825,12 +1207,12 @@ rte_eal_hugepage_init(void) memory[i] = internal_config.socket_mem[i]; /* calculate final number of pages */ - nrpages = calc_num_pages_per_socket(memory, + nr_hugepages = calc_num_pages_per_socket(memory, internal_config.hugepage_info, used_hp, internal_config.num_hugepage_sizes); /* error if not enough memory available */ - if (nrpages < 0) + if (nr_hugepages < 0) goto fail; /* reporting in! */ @@ -850,12 +1232,13 @@ rte_eal_hugepage_init(void) /* create shared memory */ hugepage = create_shared_memory(eal_hugepage_info_path(), - nrpages * sizeof(struct hugepage)); + nr_hugefiles * sizeof(struct hugepage_file)); if (hugepage == NULL) { RTE_LOG(ERR, EAL, "Failed to create shared memory!\n"); goto fail; } + memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file)); /* * unmap pages that we won't need (looks at used_hp). @@ -872,8 +1255,8 @@ rte_eal_hugepage_init(void) * this procedure only copies those hugepages that have final_va * not NULL. has overflow protection. */ - if (copy_hugepages_to_shared_mem(hugepage, nrpages, - tmp_hp, total_pages) < 0) { + if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles, + tmp_hp, nr_hugefiles) < 0) { RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n"); goto fail; } @@ -882,9 +1265,16 @@ rte_eal_hugepage_init(void) free(tmp_hp); tmp_hp = NULL; - memset(mcfg->memseg, 0, sizeof(mcfg->memseg)); - j = -1; - for (i = 0; i < nrpages; i++) { + /* find earliest free memseg - this is needed because in case of IVSHMEM, + * segments might have already been initialized */ + for (j = 0; j < RTE_MAX_MEMSEG; j++) + if (mcfg->memseg[j].addr == NULL) { + /* move to previous segment and exit loop */ + j--; + break; + } + + for (i = 0; i < nr_hugefiles; i++) { new_memseg = 0; /* if this is a new section, create a new memseg */ @@ -894,12 +1284,25 @@ rte_eal_hugepage_init(void) new_memseg = 1; else if (hugepage[i].size != hugepage[i-1].size) new_memseg = 1; + +#ifdef RTE_ARCH_PPC_64 + /* On PPC64 architecture, the mmap always start from higher + * virtual address to lower address. Here, both the physical + * address and virtual address are in descending order */ + else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) != + hugepage[i].size) + new_memseg = 1; + else if (((unsigned long)hugepage[i-1].final_va - + (unsigned long)hugepage[i].final_va) != hugepage[i].size) + new_memseg = 1; +#else else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) != hugepage[i].size) new_memseg = 1; else if (((unsigned long)hugepage[i].final_va - (unsigned long)hugepage[i-1].final_va) != hugepage[i].size) new_memseg = 1; +#endif if (new_memseg) { j += 1; @@ -908,32 +1311,40 @@ rte_eal_hugepage_init(void) mcfg->memseg[j].phys_addr = hugepage[i].physaddr; mcfg->memseg[j].addr = hugepage[i].final_va; +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + mcfg->memseg[j].len = hugepage[i].size * hugepage[i].repeated; +#else mcfg->memseg[j].len = hugepage[i].size; +#endif mcfg->memseg[j].socket_id = hugepage[i].socket_id; mcfg->memseg[j].hugepage_sz = hugepage[i].size; } /* continuation of previous memseg */ else { +#ifdef RTE_ARCH_PPC_64 + /* Use the phy and virt address of the last page as segment + * address for IBM Power architecture */ + mcfg->memseg[j].phys_addr = hugepage[i].physaddr; + mcfg->memseg[j].addr = hugepage[i].final_va; +#endif mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz; } hugepage[i].memseg_id = j; } - if (i < nrpages) { + if (i < nr_hugefiles) { RTE_LOG(ERR, EAL, "Can only reserve %d pages " "from %d requested\n" "Current %s=%d is not enough\n" "Please either increase it or request less amount " "of memory.\n", - i, nrpages, RTE_STR(CONFIG_RTE_MAX_MEMSEG), + i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG), RTE_MAX_MEMSEG); return (-ENOMEM); } - return 0; - fail: if (tmp_hp) free(tmp_hp); @@ -962,7 +1373,7 @@ static int rte_eal_hugepage_attach(void) { const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; - const struct hugepage *hp = NULL; + const struct hugepage_file *hp = NULL; unsigned num_hp = 0; unsigned i, s = 0; /* s used to track the segment number */ off_t size; @@ -975,6 +1386,17 @@ rte_eal_hugepage_attach(void) "into secondary processes\n"); } + if (internal_config.xen_dom0_support) { +#ifdef RTE_LIBRTE_XEN_DOM0 + if (rte_xen_dom0_memory_attach() < 0) { + RTE_LOG(ERR, EAL,"Failed to attach memory setments of primay " + "process\n"); + return -1; + } + return 0; +#endif + } + fd_zero = open("/dev/zero", O_RDONLY); if (fd_zero < 0) { RTE_LOG(ERR, EAL, "Could not open /dev/zero\n"); @@ -997,6 +1419,15 @@ rte_eal_hugepage_attach(void) if (mcfg->memseg[s].len == 0) break; +#ifdef RTE_LIBRTE_IVSHMEM + /* + * if segment has ioremap address set, it's an IVSHMEM segment and + * doesn't need mapping as it was already mapped earlier + */ + if (mcfg->memseg[s].ioremap_addr != 0) + continue; +#endif + /* * fdzero is mmapped to get a contiguous block of virtual * addresses of the appropriate memseg size. @@ -1007,9 +1438,9 @@ rte_eal_hugepage_attach(void) if (base_addr == MAP_FAILED || base_addr != mcfg->memseg[s].addr) { RTE_LOG(ERR, EAL, "Could not mmap %llu bytes " - "in /dev/zero to requested address [%p]\n", + "in /dev/zero to requested address [%p]: '%s'\n", (unsigned long long)mcfg->memseg[s].len, - mcfg->memseg[s].addr); + mcfg->memseg[s].addr, strerror(errno)); if (aslr_enabled() > 0) { RTE_LOG(ERR, EAL, "It is recommended to " "disable ASLR in the kernel " @@ -1027,14 +1458,24 @@ rte_eal_hugepage_attach(void) goto error; } - num_hp = size / sizeof(struct hugepage); - RTE_LOG(DEBUG, EAL, "Analysing %u hugepages\n", num_hp); + num_hp = size / sizeof(struct hugepage_file); + RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp); s = 0; while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){ void *addr, *base_addr; uintptr_t offset = 0; - + size_t mapping_size; +#ifdef RTE_LIBRTE_IVSHMEM + /* + * if segment has ioremap address set, it's an IVSHMEM segment and + * doesn't need mapping as it was already mapped earlier + */ + if (mcfg->memseg[s].ioremap_addr != 0) { + s++; + continue; + } +#endif /* * free previously mapped memory so we can map the * hugepages into the space @@ -1053,16 +1494,22 @@ rte_eal_hugepage_attach(void) hp[i].filepath); goto error; } +#ifdef RTE_EAL_SINGLE_FILE_SEGMENTS + mapping_size = hp[i].size * hp[i].repeated; +#else + mapping_size = hp[i].size; +#endif addr = mmap(RTE_PTR_ADD(base_addr, offset), - hp[i].size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_FIXED, fd, 0); + mapping_size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); close(fd); /* close file both on success and on failure */ - if (addr == MAP_FAILED) { + if (addr == MAP_FAILED || + addr != RTE_PTR_ADD(base_addr, offset)) { RTE_LOG(ERR, EAL, "Could not mmap %s\n", hp[i].filepath); goto error; } - offset+=hp[i].size; + offset+=mapping_size; } } RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s, @@ -1103,7 +1550,7 @@ rte_eal_memdevice_init(void) int rte_eal_memory_init(void) { - RTE_LOG(INFO, EAL, "Setting up hugepage memory...\n"); + RTE_LOG(INFO, EAL, "Setting up memory...\n"); const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ? rte_eal_hugepage_init() : rte_eal_hugepage_attach();