mem: move virtual area function in common directory
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memory.c
index ebe0683..24e6b50 100644 (file)
@@ -1,64 +1,6 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-/*   BSD LICENSE
- *
- *   Copyright(c) 2013 6WIND.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of 6WIND S.A. nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2013 6WIND S.A.
  */
 
 #define _FILE_OFFSET_BITS 64
@@ -70,7 +12,6 @@
 #include <stdint.h>
 #include <inttypes.h>
 #include <string.h>
-#include <stdarg.h>
 #include <sys/mman.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/file.h>
 #include <unistd.h>
 #include <limits.h>
-#include <errno.h>
 #include <sys/ioctl.h>
 #include <sys/time.h>
 #include <signal.h>
 #include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
 
+#include <rte_errno.h>
 #include <rte_log.h>
 #include <rte_memory.h>
-#include <rte_memzone.h>
 #include <rte_launch.h>
 #include <rte_eal.h>
 #include <rte_eal_memconfig.h>
 
 #define PFN_MASK_SIZE  8
 
-#ifdef RTE_LIBRTE_XEN_DOM0
-int rte_xen_dom0_supported(void)
-{
-       return internal_config.xen_dom0_support;
-}
-#endif
-
 /**
  * @file
  * Huge page mapping under linux
@@ -121,8 +58,6 @@ int rte_xen_dom0_supported(void)
  * zone as well as a physical contiguous zone.
  */
 
-static uint64_t baseaddr_offset;
-
 static bool phys_addrs_available = true;
 
 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
@@ -133,30 +68,24 @@ test_phys_addrs_available(void)
        uint64_t tmp;
        phys_addr_t physaddr;
 
-       /* For dom0, phys addresses can always be available */
-       if (rte_xen_dom0_supported())
+       if (!rte_eal_has_hugepages()) {
+               RTE_LOG(ERR, EAL,
+                       "Started without hugepages support, physical addresses not available\n");
+               phys_addrs_available = false;
                return;
+       }
 
        physaddr = rte_mem_virt2phy(&tmp);
        if (physaddr == RTE_BAD_PHYS_ADDR) {
-               RTE_LOG(ERR, EAL,
-                       "Cannot obtain physical addresses: %s. "
-                       "Only vfio will function.\n",
-                       strerror(errno));
+               if (rte_eal_iova_mode() == RTE_IOVA_PA)
+                       RTE_LOG(ERR, EAL,
+                               "Cannot obtain physical addresses: %s. "
+                               "Only vfio will function.\n",
+                               strerror(errno));
                phys_addrs_available = false;
        }
 }
 
-/* Lock page in physical memory and prevent from swapping. */
-int
-rte_mem_lock_page(const void *virt)
-{
-       unsigned long virtual = (unsigned long)virt;
-       int page_size = getpagesize();
-       unsigned long aligned = (virtual & ~ (page_size - 1));
-       return mlock((void*)aligned, page_size);
-}
-
 /*
  * Get physical address of any mapped virtual address in the current process.
  */
@@ -169,32 +98,9 @@ rte_mem_virt2phy(const void *virtaddr)
        int page_size;
        off_t offset;
 
-       /* when using dom0, /proc/self/pagemap always returns 0, check in
-        * dpdk memory by browsing the memsegs */
-       if (rte_xen_dom0_supported()) {
-               struct rte_mem_config *mcfg;
-               struct rte_memseg *memseg;
-               unsigned i;
-
-               mcfg = rte_eal_get_configuration()->mem_config;
-               for (i = 0; i < RTE_MAX_MEMSEG; i++) {
-                       memseg = &mcfg->memseg[i];
-                       if (memseg->addr == NULL)
-                               break;
-                       if (virtaddr > memseg->addr &&
-                                       virtaddr < RTE_PTR_ADD(memseg->addr,
-                                               memseg->len)) {
-                               return memseg->phys_addr +
-                                       RTE_PTR_DIFF(virtaddr, memseg->addr);
-                       }
-               }
-
-               return RTE_BAD_PHYS_ADDR;
-       }
-
        /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
        if (!phys_addrs_available)
-               return RTE_BAD_PHYS_ADDR;
+               return RTE_BAD_IOVA;
 
        /* standard page size */
        page_size = getpagesize();
@@ -203,7 +109,7 @@ rte_mem_virt2phy(const void *virtaddr)
        if (fd < 0) {
                RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
                        __func__, strerror(errno));
-               return RTE_BAD_PHYS_ADDR;
+               return RTE_BAD_IOVA;
        }
 
        virt_pfn = (unsigned long)virtaddr / page_size;
@@ -212,7 +118,7 @@ rte_mem_virt2phy(const void *virtaddr)
                RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
                                __func__, strerror(errno));
                close(fd);
-               return RTE_BAD_PHYS_ADDR;
+               return RTE_BAD_IOVA;
        }
 
        retval = read(fd, &page, PFN_MASK_SIZE);
@@ -220,12 +126,12 @@ rte_mem_virt2phy(const void *virtaddr)
        if (retval < 0) {
                RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
                                __func__, strerror(errno));
-               return RTE_BAD_PHYS_ADDR;
+               return RTE_BAD_IOVA;
        } else if (retval != PFN_MASK_SIZE) {
                RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
                                "but expected %d:\n",
                                __func__, retval, PFN_MASK_SIZE);
-               return RTE_BAD_PHYS_ADDR;
+               return RTE_BAD_IOVA;
        }
 
        /*
@@ -233,7 +139,7 @@ rte_mem_virt2phy(const void *virtaddr)
         * pagemap.txt in linux Documentation)
         */
        if ((page & 0x7fffffffffffffULL) == 0)
-               return RTE_BAD_PHYS_ADDR;
+               return RTE_BAD_IOVA;
 
        physaddr = ((page & 0x7fffffffffffffULL) * page_size)
                + ((unsigned long)virtaddr % page_size);
@@ -241,6 +147,14 @@ rte_mem_virt2phy(const void *virtaddr)
        return physaddr;
 }
 
+rte_iova_t
+rte_mem_virt2iova(const void *virtaddr)
+{
+       if (rte_eal_iova_mode() == RTE_IOVA_VA)
+               return (uintptr_t)virtaddr;
+       return rte_mem_virt2phy(virtaddr);
+}
+
 /*
  * For each hugepage in hugepg_tbl, fill the physaddr value. We find
  * it by browsing the /proc/self/pagemap special file.
@@ -306,71 +220,6 @@ aslr_enabled(void)
        }
 }
 
-/*
- * Try to mmap *size bytes in /dev/zero. If it is successful, return the
- * pointer to the mmap'd area and keep *size unmodified. Else, retry
- * with a smaller zone: decrease *size by hugepage_sz until it reaches
- * 0. In this case, return NULL. Note: this function returns an address
- * which is a multiple of hugepage size.
- */
-static void *
-get_virtual_area(size_t *size, size_t hugepage_sz)
-{
-       void *addr;
-       int fd;
-       long aligned_addr;
-
-       if (internal_config.base_virtaddr != 0) {
-               addr = (void*) (uintptr_t) (internal_config.base_virtaddr +
-                               baseaddr_offset);
-       }
-       else addr = NULL;
-
-       RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
-
-       fd = open("/dev/zero", O_RDONLY);
-       if (fd < 0){
-               RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
-               return NULL;
-       }
-       do {
-               addr = mmap(addr,
-                               (*size) + hugepage_sz, PROT_READ,
-#ifdef RTE_ARCH_PPC_64
-                               MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-#else
-                               MAP_PRIVATE,
-#endif
-                               fd, 0);
-               if (addr == MAP_FAILED)
-                       *size -= hugepage_sz;
-       } while (addr == MAP_FAILED && *size > 0);
-
-       if (addr == MAP_FAILED) {
-               close(fd);
-               RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
-                       strerror(errno));
-               return NULL;
-       }
-
-       munmap(addr, (*size) + hugepage_sz);
-       close(fd);
-
-       /* align addr to a huge page size boundary */
-       aligned_addr = (long)addr;
-       aligned_addr += (hugepage_sz - 1);
-       aligned_addr &= (~(hugepage_sz - 1));
-       addr = (void *)(aligned_addr);
-
-       RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
-               addr, *size);
-
-       /* increment offset */
-       baseaddr_offset += *size;
-
-       return addr;
-}
-
 static sigjmp_buf huge_jmpenv;
 
 static void huge_sigbus_handler(int signo __rte_unused)
@@ -387,26 +236,94 @@ static int huge_wrap_sigsetjmp(void)
        return sigsetjmp(huge_jmpenv, 1);
 }
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+/* Callback for numa library. */
+void numa_error(char *where)
+{
+       RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
+}
+#endif
+
 /*
  * Mmap all hugepages of hugepage table: it first open a file in
  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
  * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
  * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
- * map continguous physical blocks in contiguous virtual blocks.
+ * map contiguous physical blocks in contiguous virtual blocks.
  */
 static unsigned
-map_all_hugepages(struct hugepage_file *hugepg_tbl,
-               struct hugepage_info *hpi, int orig)
+map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
+                 uint64_t *essential_memory __rte_unused, int orig)
 {
        int fd;
        unsigned i;
        void *virtaddr;
        void *vma_addr = NULL;
        size_t vma_len = 0;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+       int node_id = -1;
+       int essential_prev = 0;
+       int oldpolicy;
+       struct bitmask *oldmask = numa_allocate_nodemask();
+       bool have_numa = true;
+       unsigned long maxnode = 0;
+
+       /* Check if kernel supports NUMA. */
+       if (numa_available() != 0) {
+               RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+               have_numa = false;
+       }
+
+       if (orig && have_numa) {
+               RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+               if (get_mempolicy(&oldpolicy, oldmask->maskp,
+                                 oldmask->size + 1, 0, 0) < 0) {
+                       RTE_LOG(ERR, EAL,
+                               "Failed to get current mempolicy: %s. "
+                               "Assuming MPOL_DEFAULT.\n", strerror(errno));
+                       oldpolicy = MPOL_DEFAULT;
+               }
+               for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+                       if (internal_config.socket_mem[i])
+                               maxnode = i + 1;
+       }
+#endif
 
        for (i = 0; i < hpi->num_pages[0]; i++) {
                uint64_t hugepage_sz = hpi->hugepage_sz;
 
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+               if (maxnode) {
+                       unsigned int j;
+
+                       for (j = 0; j < maxnode; j++)
+                               if (essential_memory[j])
+                                       break;
+
+                       if (j == maxnode) {
+                               node_id = (node_id + 1) % maxnode;
+                               while (!internal_config.socket_mem[node_id]) {
+                                       node_id++;
+                                       node_id %= maxnode;
+                               }
+                               essential_prev = 0;
+                       } else {
+                               node_id = j;
+                               essential_prev = essential_memory[j];
+
+                               if (essential_memory[j] < hugepage_sz)
+                                       essential_memory[j] = 0;
+                               else
+                                       essential_memory[j] -= hugepage_sz;
+                       }
+
+                       RTE_LOG(DEBUG, EAL,
+                               "Setting policy MPOL_PREFERRED for socket %d\n",
+                               node_id);
+                       numa_set_preferred(node_id);
+               }
+#endif
+
                if (orig) {
                        hugepg_tbl[i].file_id = i;
                        hugepg_tbl[i].size = hugepage_sz;
@@ -451,7 +368,16 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                        /* get the biggest virtual memory area up to
                         * vma_len. If it fails, vma_addr is NULL, so
                         * let the kernel provide the address. */
-                       vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
+                       vma_addr = eal_get_virtual_area(NULL, &vma_len,
+                                       hpi->hugepage_sz,
+                                       EAL_VIRTUAL_AREA_ALLOW_SHRINK |
+                                       EAL_VIRTUAL_AREA_UNMAP,
+#ifdef RTE_ARCH_PPC_64
+                                       MAP_HUGETLB
+#else
+                                       0
+#endif
+                                       );
                        if (vma_addr == NULL)
                                vma_len = hugepage_sz;
                }
@@ -461,7 +387,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                if (fd < 0) {
                        RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
                                        strerror(errno));
-                       return i;
+                       goto out;
                }
 
                /* map the segment, and populate page tables,
@@ -472,13 +398,16 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                        RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
                                        strerror(errno));
                        close(fd);
-                       return i;
+                       goto out;
                }
 
                if (orig) {
                        hugepg_tbl[i].orig_va = virtaddr;
                }
                else {
+                       /* rewrite physical addresses in IOVA as VA mode */
+                       if (rte_eal_iova_mode() == RTE_IOVA_VA)
+                               hugepg_tbl[i].physaddr = (uintptr_t)virtaddr;
                        hugepg_tbl[i].final_va = virtaddr;
                }
 
@@ -497,7 +426,12 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                                munmap(virtaddr, hugepage_sz);
                                close(fd);
                                unlink(hugepg_tbl[i].filepath);
-                               return i;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+                               if (maxnode)
+                                       essential_memory[node_id] =
+                                               essential_prev;
+#endif
+                               goto out;
                        }
                        *(int *)virtaddr = 0;
                }
@@ -508,7 +442,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                        RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
                                __func__, strerror(errno));
                        close(fd);
-                       return i;
+                       goto out;
                }
 
                close(fd);
@@ -517,6 +451,22 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl,
                vma_len -= hugepage_sz;
        }
 
+out:
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+       if (maxnode) {
+               RTE_LOG(DEBUG, EAL,
+                       "Restoring previous memory policy: %d\n", oldpolicy);
+               if (oldpolicy == MPOL_DEFAULT) {
+                       numa_set_localalloc();
+               } else if (set_mempolicy(oldpolicy, oldmask->maskp,
+                                        oldmask->size + 1) < 0) {
+                       RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+                               strerror(errno));
+                       numa_set_localalloc();
+               }
+       }
+       numa_free_cpumask(oldmask);
+#endif
        return i;
 }
 
@@ -551,8 +501,8 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
 
        f = fopen("/proc/self/numa_maps", "r");
        if (f == NULL) {
-               RTE_LOG(NOTICE, EAL, "cannot open /proc/self/numa_maps,"
-                               " consider that all memory is in socket_id 0\n");
+               RTE_LOG(NOTICE, EAL, "NUMA support not available"
+                       " consider that all memory is in socket_id 0\n");
                return 0;
        }
 
@@ -601,6 +551,11 @@ find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
                        if (hugepg_tbl[i].orig_va == va) {
                                hugepg_tbl[i].socket_id = socket_id;
                                hp_count++;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+                               RTE_LOG(DEBUG, EAL,
+                                       "Hugepage %s is on socket %d\n",
+                                       hugepg_tbl[i].filepath, socket_id);
+#endif
                        }
                }
        }
@@ -652,6 +607,8 @@ create_shared_memory(const char *filename, const size_t mem_size)
        }
        retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
        close(fd);
+       if (retval == MAP_FAILED)
+               return NULL;
        return retval;
 }
 
@@ -995,7 +952,10 @@ rte_eal_hugepage_init(void)
                                        strerror(errno));
                        return -1;
                }
-               mcfg->memseg[0].phys_addr = (phys_addr_t)(uintptr_t)addr;
+               if (rte_eal_iova_mode() == RTE_IOVA_VA)
+                       mcfg->memseg[0].iova = (uintptr_t)addr;
+               else
+                       mcfg->memseg[0].iova = RTE_BAD_IOVA;
                mcfg->memseg[0].addr = addr;
                mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
                mcfg->memseg[0].len = internal_config.memory;
@@ -1003,17 +963,6 @@ rte_eal_hugepage_init(void)
                return 0;
        }
 
-/* check if app runs on Xen Dom0 */
-       if (internal_config.xen_dom0_support) {
-#ifdef RTE_LIBRTE_XEN_DOM0
-               /* use dom0_mm kernel driver to init memory */
-               if (rte_xen_dom0_memory_init() < 0)
-                       return -1;
-               else
-                       return 0;
-#endif
-       }
-
        /* calculate total number of hugepages available. at this point we haven't
         * yet started sorting them so they all are on socket 0 */
        for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
@@ -1039,6 +988,11 @@ rte_eal_hugepage_init(void)
 
        huge_register_sigbus();
 
+       /* make a copy of socket_mem, needed for balanced allocation. */
+       for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+               memory[i] = internal_config.socket_mem[i];
+
+
        /* map all hugepages and sort them */
        for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
                unsigned pages_old, pages_new;
@@ -1056,7 +1010,8 @@ rte_eal_hugepage_init(void)
 
                /* map all hugepages available */
                pages_old = hpi->num_pages[0];
-               pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, 1);
+               pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
+                                             memory, 1);
                if (pages_new < pages_old) {
                        RTE_LOG(DEBUG, EAL,
                                "%d not %d hugepages of size %u MB allocated\n",
@@ -1071,7 +1026,8 @@ rte_eal_hugepage_init(void)
                                continue;
                }
 
-               if (phys_addrs_available) {
+               if (phys_addrs_available &&
+                               rte_eal_iova_mode() != RTE_IOVA_VA) {
                        /* find physical addresses for each hugepage */
                        if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
                                RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
@@ -1099,7 +1055,7 @@ rte_eal_hugepage_init(void)
                      sizeof(struct hugepage_file), cmp_physaddr);
 
                /* remap all hugepages */
-               if (map_all_hugepages(&tmp_hp[hp_offset], hpi, 0) !=
+               if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
                    hpi->num_pages[0]) {
                        RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
                                        (unsigned)(hpi->hugepage_sz / 0x100000));
@@ -1249,7 +1205,7 @@ rte_eal_hugepage_init(void)
                        if (j == RTE_MAX_MEMSEG)
                                break;
 
-                       mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+                       mcfg->memseg[j].iova = hugepage[i].physaddr;
                        mcfg->memseg[j].addr = hugepage[i].final_va;
                        mcfg->memseg[j].len = hugepage[i].size;
                        mcfg->memseg[j].socket_id = hugepage[i].socket_id;
@@ -1260,7 +1216,7 @@ rte_eal_hugepage_init(void)
 #ifdef RTE_ARCH_PPC_64
                /* Use the phy and virt address of the last page as segment
                 * address for IBM Power architecture */
-                       mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+                       mcfg->memseg[j].iova = hugepage[i].physaddr;
                        mcfg->memseg[j].addr = hugepage[i].final_va;
 #endif
                        mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
@@ -1319,7 +1275,7 @@ rte_eal_hugepage_attach(void)
        unsigned i, s = 0; /* s used to track the segment number */
        unsigned max_seg = RTE_MAX_MEMSEG;
        off_t size = 0;
-       int fd, fd_zero = -1, fd_hugepage = -1;
+       int fd, fd_hugepage = -1;
 
        if (aslr_enabled() > 0) {
                RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
@@ -1330,22 +1286,6 @@ rte_eal_hugepage_attach(void)
 
        test_phys_addrs_available();
 
-       if (internal_config.xen_dom0_support) {
-#ifdef RTE_LIBRTE_XEN_DOM0
-               if (rte_xen_dom0_memory_attach() < 0) {
-                       RTE_LOG(ERR, EAL, "Failed to attach memory segments of primary "
-                                       "process\n");
-                       return -1;
-               }
-               return 0;
-#endif
-       }
-
-       fd_zero = open("/dev/zero", O_RDONLY);
-       if (fd_zero < 0) {
-               RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
-               goto error;
-       }
        fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
        if (fd_hugepage < 0) {
                RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
@@ -1355,6 +1295,8 @@ rte_eal_hugepage_attach(void)
        /* map all segments into memory to make sure we get the addrs */
        for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
                void *base_addr;
+               size_t mmap_sz;
+               int mmap_flags = 0;
 
                /*
                 * the first memory segment with len==0 is the one that
@@ -1363,35 +1305,26 @@ rte_eal_hugepage_attach(void)
                if (mcfg->memseg[s].len == 0)
                        break;
 
-               /*
-                * fdzero is mmapped to get a contiguous block of virtual
-                * addresses of the appropriate memseg size.
-                * use mmap to get identical addresses as the primary process.
+               /* get identical addresses as the primary process.
                 */
-               base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
-                                PROT_READ,
 #ifdef RTE_ARCH_PPC_64
-                                MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-#else
-                                MAP_PRIVATE,
+               mmap_flags |= MAP_HUGETLB;
 #endif
-                                fd_zero, 0);
-               if (base_addr == MAP_FAILED ||
-                   base_addr != mcfg->memseg[s].addr) {
+               mmap_sz = mcfg->memseg[s].len;
+               base_addr = eal_get_virtual_area(mcfg->memseg[s].addr,
+                               &mmap_sz, mcfg->memseg[s].hugepage_sz, 0,
+                               mmap_flags);
+               if (base_addr == NULL) {
                        max_seg = s;
-                       if (base_addr != MAP_FAILED) {
-                               /* errno is stale, don't use */
-                               RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
-                                       "in /dev/zero at [%p], got [%p] - "
-                                       "please use '--base-virtaddr' option\n",
-                                       (unsigned long long)mcfg->memseg[s].len,
-                                       mcfg->memseg[s].addr, base_addr);
-                               munmap(base_addr, mcfg->memseg[s].len);
+                       if (rte_errno == EADDRNOTAVAIL) {
+                               RTE_LOG(ERR, EAL, "Could not mmap %zu bytes at [%p] - please use '--base-virtaddr' option\n",
+                                       mcfg->memseg[s].len,
+                                       mcfg->memseg[s].addr);
                        } else {
-                               RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
-                                       "in /dev/zero at [%p]: '%s'\n",
-                                       (unsigned long long)mcfg->memseg[s].len,
-                                       mcfg->memseg[s].addr, strerror(errno));
+                               RTE_LOG(ERR, EAL, "Could not mmap %zu bytes at [%p]: '%s'\n",
+                                       mcfg->memseg[s].len,
+                                       mcfg->memseg[s].addr,
+                                       rte_strerror(rte_errno));
                        }
                        if (aslr_enabled() > 0) {
                                RTE_LOG(ERR, EAL, "It is recommended to "
@@ -1456,7 +1389,6 @@ rte_eal_hugepage_attach(void)
        }
        /* unmap the hugepage config file, since we are done using it */
        munmap(hp, size);
-       close(fd_zero);
        close(fd_hugepage);
        return 0;
 
@@ -1465,14 +1397,12 @@ error:
                munmap(mcfg->memseg[i].addr, mcfg->memseg[i].len);
        if (hp != NULL && hp != MAP_FAILED)
                munmap(hp, size);
-       if (fd_zero >= 0)
-               close(fd_zero);
        if (fd_hugepage >= 0)
                close(fd_hugepage);
        return -1;
 }
 
-bool
+int
 rte_eal_using_phys_addrs(void)
 {
        return phys_addrs_available;