X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Feal%2Feal_memory.c;h=79d1d2dd8033b0892f7415736ec4cfa1f6d193c2;hb=bd6aa172cf35046e197b3a42a79069d4da15813a;hp=846fd31f5a02bc6602baecf8e3a3ad4101cd3fe5;hpb=5142945b3a72b523bb1ab664c4049b5168c0f7e3;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index 846fd31f5a..79d1d2dd80 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -399,8 +399,10 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, return -1; } + /* map the segment, and populate page tables, + * the kernel fills this segment with zeros */ virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE, - MAP_SHARED, fd, 0); + MAP_SHARED | MAP_POPULATE, fd, 0); if (virtaddr == MAP_FAILED) { RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno)); @@ -410,7 +412,6 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, if (orig) { hugepg_tbl[i].orig_va = virtaddr; - memset(virtaddr, 0, hugepage_sz); } else { hugepg_tbl[i].final_va = virtaddr; @@ -529,22 +530,16 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) old_addr = vma_addr; - /* map new, bigger segment */ + /* map new, bigger segment, and populate page tables, + * the kernel fills this segment with zeros */ vma_addr = mmap(vma_addr, total_size, - PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd, 0); if (vma_addr == MAP_FAILED || vma_addr != old_addr) { RTE_LOG(ERR, EAL, "%s(): mmap failed: %s\n", __func__, strerror(errno)); close(fd); return -1; } - - /* touch the page. this is needed because kernel postpones mapping - * creation until the first page fault. with this, we pin down - * the page and it is marked as used and gets into process' pagemap. - */ - for (offset = 0; offset < total_size; offset += hugepage_sz) - *((volatile uint8_t*) RTE_PTR_ADD(vma_addr, offset)); } /* set shared flock on the file. */ @@ -592,9 +587,6 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) } } - /* zero out the whole segment */ - memset(hugepg_tbl[page_idx].final_va, 0, total_size); - page_idx++; } @@ -701,54 +693,23 @@ error: return -1; } -/* - * Sort the hugepg_tbl by physical address (lower addresses first on x86, - * higher address first on powerpc). We use a slow algorithm, but we won't - * have millions of pages, and this is only done at init time. - */ static int -sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) +cmp_physaddr(const void *a, const void *b) { - unsigned i, j; - int compare_idx; - uint64_t compare_addr; - struct hugepage_file tmp; - - for (i = 0; i < hpi->num_pages[0]; i++) { - compare_addr = 0; - compare_idx = -1; - - /* - * browse all entries starting at 'i', and find the - * entry with the smallest addr - */ - for (j=i; j< hpi->num_pages[0]; j++) { - - if (compare_addr == 0 || -#ifdef RTE_ARCH_PPC_64 - hugepg_tbl[j].physaddr > compare_addr) { +#ifndef RTE_ARCH_PPC_64 + const struct hugepage_file *p1 = (const struct hugepage_file *)a; + const struct hugepage_file *p2 = (const struct hugepage_file *)b; #else - hugepg_tbl[j].physaddr < compare_addr) { + /* PowerPC needs memory sorted in reverse order from x86 */ + const struct hugepage_file *p1 = (const struct hugepage_file *)b; + const struct hugepage_file *p2 = (const struct hugepage_file *)a; #endif - compare_addr = hugepg_tbl[j].physaddr; - compare_idx = j; - } - } - - /* should not happen */ - if (compare_idx == -1) { - RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__); - return -1; - } - - /* swap the 2 entries in the table */ - memcpy(&tmp, &hugepg_tbl[compare_idx], - sizeof(struct hugepage_file)); - memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i], - sizeof(struct hugepage_file)); - memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file)); - } - return 0; + if (p1->physaddr < p2->physaddr) + return -1; + else if (p1->physaddr > p2->physaddr) + return 1; + else + return 0; } /* @@ -1113,7 +1074,7 @@ rte_eal_hugepage_init(void) /* hugetlbfs can be disabled */ if (internal_config.no_hugetlbfs) { addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); + MAP_LOCKED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); if (addr == MAP_FAILED) { RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__, strerror(errno)); @@ -1195,8 +1156,8 @@ rte_eal_hugepage_init(void) goto fail; } - if (sort_by_physaddr(&tmp_hp[hp_offset], hpi) < 0) - goto fail; + qsort(&tmp_hp[hp_offset], hpi->num_pages[0], + sizeof(struct hugepage_file), cmp_physaddr); #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS /* remap all hugepages into single file segments */ @@ -1412,8 +1373,7 @@ rte_eal_hugepage_init(void) return 0; fail: - if (tmp_hp) - free(tmp_hp); + free(tmp_hp); return -1; }