ivshmem: remove library and its EAL integration
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memory.c
index 1220677..992a1b1 100644 (file)
@@ -99,6 +99,8 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 
+#define PFN_MASK_SIZE  8
+
 #ifdef RTE_LIBRTE_XEN_DOM0
 int rte_xen_dom0_supported(void)
 {
@@ -158,12 +160,35 @@ rte_mem_lock_page(const void *virt)
 phys_addr_t
 rte_mem_virt2phy(const void *virtaddr)
 {
-       int fd;
+       int fd, retval;
        uint64_t page, physaddr;
        unsigned long virt_pfn;
        int page_size;
        off_t offset;
 
+       /* when using dom0, /proc/self/pagemap always returns 0, check in
+        * dpdk memory by browsing the memsegs */
+       if (rte_xen_dom0_supported()) {
+               struct rte_mem_config *mcfg;
+               struct rte_memseg *memseg;
+               unsigned i;
+
+               mcfg = rte_eal_get_configuration()->mem_config;
+               for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+                       memseg = &mcfg->memseg[i];
+                       if (memseg->addr == NULL)
+                               break;
+                       if (virtaddr > memseg->addr &&
+                                       virtaddr < RTE_PTR_ADD(memseg->addr,
+                                               memseg->len)) {
+                               return memseg->phys_addr +
+                                       RTE_PTR_DIFF(virtaddr, memseg->addr);
+                       }
+               }
+
+               return RTE_BAD_PHYS_ADDR;
+       }
+
        /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
        if (!proc_pagemap_readable)
                return RTE_BAD_PHYS_ADDR;
@@ -186,10 +211,17 @@ rte_mem_virt2phy(const void *virtaddr)
                close(fd);
                return RTE_BAD_PHYS_ADDR;
        }
-       if (read(fd, &page, sizeof(uint64_t)) < 0) {
+
+       retval = read(fd, &page, PFN_MASK_SIZE);
+       close(fd);
+       if (retval < 0) {
                RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
                                __func__, strerror(errno));
-               close(fd);
+               return RTE_BAD_PHYS_ADDR;
+       } else if (retval != PFN_MASK_SIZE) {
+               RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
+                               "but expected %d:\n",
+                               __func__, retval, PFN_MASK_SIZE);
                return RTE_BAD_PHYS_ADDR;
        }
 
@@ -199,7 +231,7 @@ rte_mem_virt2phy(const void *virtaddr)
         */
        physaddr = ((page & 0x7fffffffffffffULL) * page_size)
                + ((unsigned long)virtaddr % page_size);
-       close(fd);
+
        return physaddr;
 }
 
@@ -1136,7 +1168,7 @@ int
 rte_eal_hugepage_init(void)
 {
        struct rte_mem_config *mcfg;
-       struct hugepage_file *hugepage, *tmp_hp = NULL;
+       struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
        struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
 
        uint64_t memory[RTE_MAX_NUMA_NODES];
@@ -1159,7 +1191,7 @@ rte_eal_hugepage_init(void)
        /* hugetlbfs can be disabled */
        if (internal_config.no_hugetlbfs) {
                addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
-                       MAP_LOCKED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+                               MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
                if (addr == MAP_FAILED) {
                        RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
                                        strerror(errno));
@@ -1404,15 +1436,8 @@ rte_eal_hugepage_init(void)
        free(tmp_hp);
        tmp_hp = NULL;
 
-       /* find earliest free memseg - this is needed because in case of IVSHMEM,
-        * segments might have already been initialized */
-       for (j = 0; j < RTE_MAX_MEMSEG; j++)
-               if (mcfg->memseg[j].addr == NULL) {
-                       /* move to previous segment and exit loop */
-                       j--;
-                       break;
-               }
-
+       /* first memseg index shall be 0 after incrementing it below */
+       j = -1;
        for (i = 0; i < nr_hugefiles; i++) {
                new_memseg = 0;
 
@@ -1479,14 +1504,19 @@ rte_eal_hugepage_init(void)
                        "of memory.\n",
                        i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
                        RTE_MAX_MEMSEG);
-               return -ENOMEM;
+               goto fail;
        }
 
+       munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
+
        return 0;
 
 fail:
        huge_recover_sigbus();
        free(tmp_hp);
+       if (hugepage != NULL)
+               munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
+
        return -1;
 }
 
@@ -1560,15 +1590,6 @@ rte_eal_hugepage_attach(void)
                if (mcfg->memseg[s].len == 0)
                        break;
 
-#ifdef RTE_LIBRTE_IVSHMEM
-               /*
-                * if segment has ioremap address set, it's an IVSHMEM segment and
-                * doesn't need mapping as it was already mapped earlier
-                */
-               if (mcfg->memseg[s].ioremap_addr != 0)
-                       continue;
-#endif
-
                /*
                 * fdzero is mmapped to get a contiguous block of virtual
                 * addresses of the appropriate memseg size.
@@ -1607,16 +1628,6 @@ rte_eal_hugepage_attach(void)
                void *addr, *base_addr;
                uintptr_t offset = 0;
                size_t mapping_size;
-#ifdef RTE_LIBRTE_IVSHMEM
-               /*
-                * if segment has ioremap address set, it's an IVSHMEM segment and
-                * doesn't need mapping as it was already mapped earlier
-                */
-               if (mcfg->memseg[s].ioremap_addr != 0) {
-                       s++;
-                       continue;
-               }
-#endif
                /*
                 * free previously mapped memory so we can map the
                 * hugepages into the space