ivshmem: remove library and its EAL integration
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memory.c
index 42a29fa..992a1b1 100644 (file)
@@ -99,6 +99,8 @@
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 
+#define PFN_MASK_SIZE  8
+
 #ifdef RTE_LIBRTE_XEN_DOM0
 int rte_xen_dom0_supported(void)
 {
@@ -158,7 +160,7 @@ rte_mem_lock_page(const void *virt)
 phys_addr_t
 rte_mem_virt2phy(const void *virtaddr)
 {
-       int fd;
+       int fd, retval;
        uint64_t page, physaddr;
        unsigned long virt_pfn;
        int page_size;
@@ -209,10 +211,17 @@ rte_mem_virt2phy(const void *virtaddr)
                close(fd);
                return RTE_BAD_PHYS_ADDR;
        }
-       if (read(fd, &page, sizeof(uint64_t)) < 0) {
+
+       retval = read(fd, &page, PFN_MASK_SIZE);
+       close(fd);
+       if (retval < 0) {
                RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
                                __func__, strerror(errno));
-               close(fd);
+               return RTE_BAD_PHYS_ADDR;
+       } else if (retval != PFN_MASK_SIZE) {
+               RTE_LOG(ERR, EAL, "%s(): read %d bytes from /proc/self/pagemap "
+                               "but expected %d:\n",
+                               __func__, retval, PFN_MASK_SIZE);
                return RTE_BAD_PHYS_ADDR;
        }
 
@@ -222,7 +231,7 @@ rte_mem_virt2phy(const void *virtaddr)
         */
        physaddr = ((page & 0x7fffffffffffffULL) * page_size)
                + ((unsigned long)virtaddr % page_size);
-       close(fd);
+
        return physaddr;
 }
 
@@ -1427,15 +1436,8 @@ rte_eal_hugepage_init(void)
        free(tmp_hp);
        tmp_hp = NULL;
 
-       /* find earliest free memseg - this is needed because in case of IVSHMEM,
-        * segments might have already been initialized */
-       for (j = 0; j < RTE_MAX_MEMSEG; j++)
-               if (mcfg->memseg[j].addr == NULL) {
-                       /* move to previous segment and exit loop */
-                       j--;
-                       break;
-               }
-
+       /* first memseg index shall be 0 after incrementing it below */
+       j = -1;
        for (i = 0; i < nr_hugefiles; i++) {
                new_memseg = 0;
 
@@ -1588,15 +1590,6 @@ rte_eal_hugepage_attach(void)
                if (mcfg->memseg[s].len == 0)
                        break;
 
-#ifdef RTE_LIBRTE_IVSHMEM
-               /*
-                * if segment has ioremap address set, it's an IVSHMEM segment and
-                * doesn't need mapping as it was already mapped earlier
-                */
-               if (mcfg->memseg[s].ioremap_addr != 0)
-                       continue;
-#endif
-
                /*
                 * fdzero is mmapped to get a contiguous block of virtual
                 * addresses of the appropriate memseg size.
@@ -1635,16 +1628,6 @@ rte_eal_hugepage_attach(void)
                void *addr, *base_addr;
                uintptr_t offset = 0;
                size_t mapping_size;
-#ifdef RTE_LIBRTE_IVSHMEM
-               /*
-                * if segment has ioremap address set, it's an IVSHMEM segment and
-                * doesn't need mapping as it was already mapped earlier
-                */
-               if (mcfg->memseg[s].ioremap_addr != 0) {
-                       s++;
-                       continue;
-               }
-#endif
                /*
                 * free previously mapped memory so we can map the
                 * hugepages into the space