X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=lib%2Flibrte_eal%2Flinuxapp%2Feal%2Feal_memory.c;h=341fefb17ad64be5977a819333c31b9658386796;hb=6307b909b8e0;hp=ae43f9eadbf1f9e5361fa805d6ed1159251c89b8;hpb=f15addb79ebc4f192c4025aab1729bbd8fd7bc7b;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index ae43f9eadb..341fefb17a 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -85,7 +85,6 @@ #include #include #include -#include #include #include #include @@ -300,7 +299,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, #endif for (i = 0; i < hpi->num_pages[0]; i++) { - size_t hugepage_sz = hpi->hugepage_sz; + uint64_t hugepage_sz = hpi->hugepage_sz; if (orig) { hugepg_tbl[i].file_id = i; @@ -316,11 +315,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, #endif hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0'; } -#ifndef RTE_ARCH_X86_64 - /* for 32-bit systems, don't remap 1G pages, just reuse original - * map address as final map address. +#ifndef RTE_ARCH_64 + /* for 32-bit systems, don't remap 1G and 16G pages, just reuse + * original map address as final map address. */ - else if (hugepage_sz == RTE_PGSIZE_1G){ + else if ((hugepage_sz == RTE_PGSIZE_1G) + || (hugepage_sz == RTE_PGSIZE_16G)) { hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va; hugepg_tbl[i].orig_va = NULL; continue; @@ -335,9 +335,17 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, * physical block: count the number of * contiguous physical pages. */ for (j = i+1; j < hpi->num_pages[0] ; j++) { +#ifdef RTE_ARCH_PPC_64 + /* The physical addresses are sorted in + * descending order on PPC64 */ + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr - hugepage_sz) + break; +#else if (hugepg_tbl[j].physaddr != hugepg_tbl[j-1].physaddr + hugepage_sz) break; +#endif } num_pages = j - i; vma_len = num_pages * hugepage_sz; @@ -412,11 +420,12 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) while (i < hpi->num_pages[0]) { -#ifndef RTE_ARCH_X86_64 - /* for 32-bit systems, don't remap 1G pages, just reuse original - * map address as final map address. +#ifndef RTE_ARCH_64 + /* for 32-bit systems, don't remap 1G pages and 16G pages, + * just reuse original map address as final map address. */ - if (hugepage_sz == RTE_PGSIZE_1G){ + if ((hugepage_sz == RTE_PGSIZE_1G) + || (hugepage_sz == RTE_PGSIZE_16G)) { hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va; hugepg_tbl[i].orig_va = NULL; i++; @@ -428,8 +437,17 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) * physical block: count the number of * contiguous physical pages. */ for (j = i+1; j < hpi->num_pages[0] ; j++) { - if (hugepg_tbl[j].physaddr != hugepg_tbl[j-1].physaddr + hugepage_sz) +#ifdef RTE_ARCH_PPC_64 + /* The physical addresses are sorted in descending + * order on PPC64 */ + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr - hugepage_sz) break; +#else + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr + hugepage_sz) + break; +#endif } num_pages = j - i; vma_len = num_pages * hugepage_sz; @@ -505,7 +523,7 @@ remap_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) return -1; } - rte_snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s", + snprintf(hugepg_tbl[page_idx].filepath, MAX_HUGEPAGE_PATH, "%s", filepath); physaddr = rte_mem_virt2phy(vma_addr); @@ -591,8 +609,8 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) return 0; } - rte_snprintf(hugedir_str, sizeof(hugedir_str), - "%s/", hpi->hugedir); + snprintf(hugedir_str, sizeof(hugedir_str), + "%s/%s", hpi->hugedir, internal_config.hugefile_prefix); /* parse numa map */ while (fgets(buf, sizeof(buf), f) != NULL) { @@ -652,21 +670,21 @@ error: } /* - * Sort the hugepg_tbl by physical address (lower addresses first). We - * use a slow algorithm, but we won't have millions of pages, and this - * is only done at init time. + * Sort the hugepg_tbl by physical address (lower addresses first on x86, + * higher address first on powerpc). We use a slow algorithm, but we won't + * have millions of pages, and this is only done at init time. */ static int sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) { unsigned i, j; - int smallest_idx; - uint64_t smallest_addr; + int compare_idx; + uint64_t compare_addr; struct hugepage_file tmp; for (i = 0; i < hpi->num_pages[0]; i++) { - smallest_addr = 0; - smallest_idx = -1; + compare_addr = 0; + compare_idx = -1; /* * browse all entries starting at 'i', and find the @@ -674,23 +692,28 @@ sort_by_physaddr(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi) */ for (j=i; j< hpi->num_pages[0]; j++) { - if (smallest_addr == 0 || - hugepg_tbl[j].physaddr < smallest_addr) { - smallest_addr = hugepg_tbl[j].physaddr; - smallest_idx = j; + if (compare_addr == 0 || +#ifdef RTE_ARCH_PPC_64 + hugepg_tbl[j].physaddr > compare_addr) { +#else + hugepg_tbl[j].physaddr < compare_addr) { +#endif + compare_addr = hugepg_tbl[j].physaddr; + compare_idx = j; } } /* should not happen */ - if (smallest_idx == -1) { + if (compare_idx == -1) { RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__); return -1; } /* swap the 2 entries in the table */ - memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage_file)); - memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i], - sizeof(struct hugepage_file)); + memcpy(&tmp, &hugepg_tbl[compare_idx], + sizeof(struct hugepage_file)); + memcpy(&hugepg_tbl[compare_idx], &hugepg_tbl[i], + sizeof(struct hugepage_file)); memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage_file)); } return 0; @@ -857,7 +880,7 @@ get_socket_mem_size(int socket) size += hpi->hugepage_sz * hpi->num_pages[socket]; } - return (size); + return size; } /* @@ -1165,7 +1188,9 @@ rte_eal_hugepage_init(void) int socket = tmp_hp[i].socket_id; /* find a hugepage info with right size and increment num_pages */ - for (j = 0; j < (int) internal_config.num_hugepage_sizes; j++) { + const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES, + (int)internal_config.num_hugepage_sizes); + for (j = 0; j < nb_hpsizes; j++) { if (tmp_hp[i].size == internal_config.hugepage_info[j].hugepage_sz) { #ifdef RTE_EAL_SINGLE_FILE_SEGMENTS @@ -1260,12 +1285,25 @@ rte_eal_hugepage_init(void) new_memseg = 1; else if (hugepage[i].size != hugepage[i-1].size) new_memseg = 1; + +#ifdef RTE_ARCH_PPC_64 + /* On PPC64 architecture, the mmap always start from higher + * virtual address to lower address. Here, both the physical + * address and virtual address are in descending order */ + else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) != + hugepage[i].size) + new_memseg = 1; + else if (((unsigned long)hugepage[i-1].final_va - + (unsigned long)hugepage[i].final_va) != hugepage[i].size) + new_memseg = 1; +#else else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) != hugepage[i].size) new_memseg = 1; else if (((unsigned long)hugepage[i].final_va - (unsigned long)hugepage[i-1].final_va) != hugepage[i].size) new_memseg = 1; +#endif if (new_memseg) { j += 1; @@ -1284,6 +1322,12 @@ rte_eal_hugepage_init(void) } /* continuation of previous memseg */ else { +#ifdef RTE_ARCH_PPC_64 + /* Use the phy and virt address of the last page as segment + * address for IBM Power architecture */ + mcfg->memseg[j].phys_addr = hugepage[i].physaddr; + mcfg->memseg[j].addr = hugepage[i].final_va; +#endif mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz; } hugepage[i].memseg_id = j; @@ -1297,7 +1341,7 @@ rte_eal_hugepage_init(void) "of memory.\n", i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG), RTE_MAX_MEMSEG); - return (-ENOMEM); + return -ENOMEM; } return 0;