mem: fix anonymous mapping on Power9
[dpdk.git] / lib / librte_eal / linuxapp / eal / eal_memalloc.c
index 301c34e..7849395 100644 (file)
 #include <numaif.h>
 #endif
 #include <linux/falloc.h>
+#include <linux/mman.h> /* for hugetlb-related mmap flags */
 
 #include <rte_common.h>
 #include <rte_log.h>
 #include <rte_eal_memconfig.h>
 #include <rte_eal.h>
+#include <rte_errno.h>
 #include <rte_memory.h>
 #include <rte_spinlock.h>
 
 #include "eal_filesystem.h"
 #include "eal_internal_cfg.h"
 #include "eal_memalloc.h"
+#include "eal_private.h"
+
+const int anonymous_hugepages_supported =
+#ifdef MAP_HUGE_SHIFT
+               1;
+#define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
+#else
+               0;
+#define RTE_MAP_HUGE_SHIFT 26
+#endif
+
+/*
+ * we don't actually care if memfd itself is supported - we only need to check
+ * if memfd supports hugetlbfs, as that already implies memfd support.
+ *
+ * also, this is not a constant, because while we may be *compiled* with memfd
+ * hugetlbfs support, we might not be *running* on a system that supports memfd
+ * and/or memfd with hugetlbfs, so we need to be able to adjust this flag at
+ * runtime, and fall back to anonymous memory.
+ */
+static int memfd_create_supported =
+#ifdef MFD_HUGETLB
+#define MEMFD_SUPPORTED
+               1;
+#else
+               0;
+#endif
 
 /*
  * not all kernel version support fallocate on hugetlbfs, so fall back to
 static int fallocate_supported = -1; /* unknown */
 
 /*
- * If each page is in a separate file, we can close fd's since we need each fd
- * only once. However, in single file segments mode, we can get away with using
- * a single fd for entire segments, but we need to store them somewhere. Each
- * fd is different within each process, so we'll store them in a local tailq.
+ * we have two modes - single file segments, and file-per-page mode.
+ *
+ * for single-file segments, we need some kind of mechanism to keep track of
+ * which hugepages can be freed back to the system, and which cannot. we cannot
+ * use flock() because they don't allow locking parts of a file, and we cannot
+ * use fcntl() due to issues with their semantics, so we will have to rely on a
+ * bunch of lockfiles for each page. so, we will use 'fds' array to keep track
+ * of per-page lockfiles. we will store the actual segment list fd in the
+ * 'memseg_list_fd' field.
+ *
+ * for file-per-page mode, each page will have its own fd, so 'memseg_list_fd'
+ * will be invalid (set to -1), and we'll use 'fds' to keep track of page fd's.
+ *
+ * we cannot know how many pages a system will have in advance, but we do know
+ * that they come in lists, and we know lengths of these lists. so, simply store
+ * a malloc'd array of fd's indexed by list and segment index.
+ *
+ * they will be initialized at startup, and filled as we allocate/deallocate
+ * segments.
  */
-struct msl_entry {
-       TAILQ_ENTRY(msl_entry) next;
-       unsigned int msl_idx;
-       int fd;
-};
-
-/** Double linked list of memseg list fd's. */
-TAILQ_HEAD(msl_entry_list, msl_entry);
-
-static struct msl_entry_list msl_entry_list =
-               TAILQ_HEAD_INITIALIZER(msl_entry_list);
-static rte_spinlock_t tailq_lock = RTE_SPINLOCK_INITIALIZER;
+static struct {
+       int *fds; /**< dynamically allocated array of segment lock fd's */
+       int memseg_list_fd; /**< memseg list fd */
+       int len; /**< total length of the array */
+       int count; /**< entries used in an array */
+} fd_list[RTE_MAX_MEMSEG_LISTS];
 
 /** local copy of a memory map, used to synchronize memory hotplug in MP */
 static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
@@ -142,11 +180,11 @@ prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
 }
 
 static void
-resotre_numa(int *oldpolicy, struct bitmask *oldmask)
+restore_numa(int *oldpolicy, struct bitmask *oldmask)
 {
        RTE_LOG(DEBUG, EAL,
                "Restoring previous memory policy: %d\n", *oldpolicy);
-       if (oldpolicy == MPOL_DEFAULT) {
+       if (*oldpolicy == MPOL_DEFAULT) {
                numa_set_localalloc();
        } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
                                 oldmask->size + 1) < 0) {
@@ -158,35 +196,6 @@ resotre_numa(int *oldpolicy, struct bitmask *oldmask)
 }
 #endif
 
-static struct msl_entry *
-get_msl_entry_by_idx(unsigned int list_idx)
-{
-       struct msl_entry *te;
-
-       rte_spinlock_lock(&tailq_lock);
-
-       TAILQ_FOREACH(te, &msl_entry_list, next) {
-               if (te->msl_idx == list_idx)
-                       break;
-       }
-       if (te == NULL) {
-               /* doesn't exist, so create it and set fd to -1 */
-
-               te = malloc(sizeof(*te));
-               if (te == NULL) {
-                       RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
-                               __func__);
-                       goto unlock;
-               }
-               te->msl_idx = list_idx;
-               te->fd = -1;
-               TAILQ_INSERT_TAIL(&msl_entry_list, te, next);
-       }
-unlock:
-       rte_spinlock_unlock(&tailq_lock);
-       return te;
-}
-
 /*
  * uses fstat to report the size of a file on disk
  */
@@ -199,127 +208,271 @@ get_file_size(int fd)
        return st.st_size;
 }
 
-/*
- * uses fstat to check if file size on disk is zero (regular fstat won't show
- * true file size due to how fallocate works)
- */
-static bool
-is_zero_length(int fd)
+static inline uint32_t
+bsf64(uint64_t v)
 {
-       struct stat st;
-       if (fstat(fd, &st) < 0)
-               return false;
-       return st.st_blocks == 0;
+       return (uint32_t)__builtin_ctzll(v);
+}
+
+static inline uint32_t
+log2_u64(uint64_t v)
+{
+       if (v == 0)
+               return 0;
+       v = rte_align64pow2(v);
+       return bsf64(v);
 }
 
-/* we cannot use rte_memseg_list_walk() here because we will be holding a
- * write lock whenever we enter every function in this file, however copying
- * the same iteration code everywhere is not ideal as well. so, use a lockless
- * copy of memseg list walk here.
- */
 static int
-memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
+pagesz_flags(uint64_t page_sz)
 {
-       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
-       int i, ret = 0;
+       /* as per mmap() manpage, all page sizes are log2 of page size
+        * shifted by MAP_HUGE_SHIFT
+        */
+       int log2 = log2_u64(page_sz);
+       return log2 << RTE_MAP_HUGE_SHIFT;
+}
 
-       for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
-               struct rte_memseg_list *msl = &mcfg->memsegs[i];
+/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
+static int lock(int fd, int type)
+{
+       int ret;
 
-               if (msl->base_va == NULL)
-                       continue;
+       /* flock may be interrupted */
+       do {
+               ret = flock(fd, type | LOCK_NB);
+       } while (ret && errno == EINTR);
 
-               ret = func(msl, arg);
-               if (ret < 0)
-                       return -1;
-               if (ret > 0)
-                       return 1;
+       if (ret && errno == EWOULDBLOCK) {
+               /* couldn't lock */
+               return 0;
+       } else if (ret) {
+               RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
+                       __func__, strerror(errno));
+               return -1;
        }
+       /* lock was successful */
+       return 1;
+}
+
+static int get_segment_lock_fd(int list_idx, int seg_idx)
+{
+       char path[PATH_MAX] = {0};
+       int fd;
+
+       if (list_idx < 0 || list_idx >= (int)RTE_DIM(fd_list))
+               return -1;
+       if (seg_idx < 0 || seg_idx >= fd_list[list_idx].len)
+               return -1;
+
+       fd = fd_list[list_idx].fds[seg_idx];
+       /* does this lock already exist? */
+       if (fd >= 0)
+               return fd;
+
+       eal_get_hugefile_lock_path(path, sizeof(path),
+                       list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+
+       fd = open(path, O_CREAT | O_RDWR, 0660);
+       if (fd < 0) {
+               RTE_LOG(ERR, EAL, "%s(): error creating lockfile '%s': %s\n",
+                       __func__, path, strerror(errno));
+               return -1;
+       }
+       /* take out a read lock */
+       if (lock(fd, LOCK_SH) != 1) {
+               RTE_LOG(ERR, EAL, "%s(): failed to take out a readlock on '%s': %s\n",
+                       __func__, path, strerror(errno));
+               close(fd);
+               return -1;
+       }
+       /* store it for future reference */
+       fd_list[list_idx].fds[seg_idx] = fd;
+       fd_list[list_idx].count++;
+       return fd;
+}
+
+static int unlock_segment(int list_idx, int seg_idx)
+{
+       int fd, ret;
+
+       if (list_idx < 0 || list_idx >= (int)RTE_DIM(fd_list))
+               return -1;
+       if (seg_idx < 0 || seg_idx >= fd_list[list_idx].len)
+               return -1;
+
+       fd = fd_list[list_idx].fds[seg_idx];
+
+       /* upgrade lock to exclusive to see if we can remove the lockfile */
+       ret = lock(fd, LOCK_EX);
+       if (ret == 1) {
+               /* we've succeeded in taking exclusive lock, this lockfile may
+                * be removed.
+                */
+               char path[PATH_MAX] = {0};
+               eal_get_hugefile_lock_path(path, sizeof(path),
+                               list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+               if (unlink(path)) {
+                       RTE_LOG(ERR, EAL, "%s(): error removing lockfile '%s': %s\n",
+                                       __func__, path, strerror(errno));
+               }
+       }
+       /* we don't want to leak the fd, so even if we fail to lock, close fd
+        * and remove it from list anyway.
+        */
+       close(fd);
+       fd_list[list_idx].fds[seg_idx] = -1;
+       fd_list[list_idx].count--;
+
+       if (ret < 0)
+               return -1;
        return 0;
 }
 
+static int
+get_seg_memfd(struct hugepage_info *hi __rte_unused,
+               unsigned int list_idx __rte_unused,
+               unsigned int seg_idx __rte_unused)
+{
+#ifdef MEMFD_SUPPORTED
+       int fd;
+       char segname[250]; /* as per manpage, limit is 249 bytes plus null */
+
+       if (internal_config.single_file_segments) {
+               fd = fd_list[list_idx].memseg_list_fd;
+
+               if (fd < 0) {
+                       int flags = MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
+
+                       snprintf(segname, sizeof(segname), "seg_%i", list_idx);
+                       fd = memfd_create(segname, flags);
+                       if (fd < 0) {
+                               RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
+                                       __func__, strerror(errno));
+                               return -1;
+                       }
+                       fd_list[list_idx].memseg_list_fd = fd;
+               }
+       } else {
+               fd = fd_list[list_idx].fds[seg_idx];
+
+               if (fd < 0) {
+                       int flags = MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
+
+                       snprintf(segname, sizeof(segname), "seg_%i-%i",
+                                       list_idx, seg_idx);
+                       fd = memfd_create(segname, flags);
+                       if (fd < 0) {
+                               RTE_LOG(DEBUG, EAL, "%s(): memfd create failed: %s\n",
+                                       __func__, strerror(errno));
+                               return -1;
+                       }
+                       fd_list[list_idx].fds[seg_idx] = fd;
+               }
+       }
+       return fd;
+#endif
+       return -1;
+}
+
 static int
 get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
                unsigned int list_idx, unsigned int seg_idx)
 {
        int fd;
 
+       /* for in-memory mode, we only make it here when we're sure we support
+        * memfd, and this is a special case.
+        */
+       if (internal_config.in_memory)
+               return get_seg_memfd(hi, list_idx, seg_idx);
+
        if (internal_config.single_file_segments) {
-               /*
-                * try to find a tailq entry, for this memseg list, or create
-                * one if it doesn't exist.
-                */
-               struct msl_entry *te = get_msl_entry_by_idx(list_idx);
-               if (te == NULL) {
-                       RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
-                               __func__);
-                       return -1;
-               } else if (te->fd < 0) {
-                       /* create a hugepage file */
-                       eal_get_hugefile_path(path, buflen, hi->hugedir,
-                                       list_idx);
+               /* create a hugepage file path */
+               eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
+
+               fd = fd_list[list_idx].memseg_list_fd;
+
+               if (fd < 0) {
                        fd = open(path, O_CREAT | O_RDWR, 0600);
                        if (fd < 0) {
-                               RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
+                               RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
                                        __func__, strerror(errno));
                                return -1;
                        }
-                       te->fd = fd;
-               } else {
-                       fd = te->fd;
+                       /* take out a read lock and keep it indefinitely */
+                       if (lock(fd, LOCK_SH) < 0) {
+                               RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+                                       __func__, strerror(errno));
+                               close(fd);
+                               return -1;
+                       }
+                       fd_list[list_idx].memseg_list_fd = fd;
                }
        } else {
-               /* one file per page, just create it */
+               /* create a hugepage file path */
                eal_get_hugefile_path(path, buflen, hi->hugedir,
                                list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
-               fd = open(path, O_CREAT | O_RDWR, 0600);
+
+               fd = fd_list[list_idx].fds[seg_idx];
+
                if (fd < 0) {
-                       RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
-                                       strerror(errno));
-                       return -1;
+                       fd = open(path, O_CREAT | O_RDWR, 0600);
+                       if (fd < 0) {
+                               RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
+                                       __func__, strerror(errno));
+                               return -1;
+                       }
+                       /* take out a read lock */
+                       if (lock(fd, LOCK_SH) < 0) {
+                               RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+                                       __func__, strerror(errno));
+                               close(fd);
+                               return -1;
+                       }
+                       fd_list[list_idx].fds[seg_idx] = fd;
                }
        }
        return fd;
 }
 
-/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
-static int lock(int fd, uint64_t offset, uint64_t len, int type)
+static int
+resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
+               uint64_t fa_offset, uint64_t page_sz, bool grow)
 {
-       struct flock lck;
-       int ret;
-
-       memset(&lck, 0, sizeof(lck));
+       bool again = false;
 
-       lck.l_type = type;
-       lck.l_whence = SEEK_SET;
-       lck.l_start = offset;
-       lck.l_len = len;
+       /* in-memory mode is a special case, because we don't need to perform
+        * any locking, and we can be sure that fallocate() is supported.
+        */
+       if (internal_config.in_memory) {
+               int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
+                               FALLOC_FL_KEEP_SIZE;
+               int ret;
 
-       ret = fcntl(fd, F_SETLK, &lck);
+               /* grow or shrink the file */
+               ret = fallocate(fd, flags, fa_offset, page_sz);
 
-       if (ret && (errno == EAGAIN || errno == EACCES)) {
-               /* locked by another process, not an error */
+               if (ret < 0) {
+                       RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
+                                       __func__,
+                                       strerror(errno));
+                       return -1;
+               }
+               /* increase/decrease total segment count */
+               fd_list[list_idx].count += (grow ? 1 : -1);
+               if (!grow && fd_list[list_idx].count == 0) {
+                       close(fd_list[list_idx].memseg_list_fd);
+                       fd_list[list_idx].memseg_list_fd = -1;
+               }
                return 0;
-       } else if (ret) {
-               RTE_LOG(ERR, EAL, "%s(): error calling fcntl(): %s\n",
-                       __func__, strerror(errno));
-               /* we've encountered an unexpected error */
-               return -1;
        }
-       return 1;
-}
 
-static int
-resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
-               bool grow)
-{
-       bool again = false;
        do {
                if (fallocate_supported == 0) {
                        /* we cannot deallocate memory if fallocate() is not
-                        * supported, but locks are still needed to prevent
-                        * primary process' initialization from clearing out
-                        * huge pages used by this process.
+                        * supported, and hugepage file is already locked at
+                        * creation, so no further synchronization needed.
                         */
 
                        if (!grow) {
@@ -337,13 +490,10 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
                                        __func__, strerror(errno));
                                return -1;
                        }
-                       /* not being able to take out a read lock is an error */
-                       if (lock(fd, fa_offset, page_sz, F_RDLCK) != 1)
-                               return -1;
                } else {
                        int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
                                        FALLOC_FL_KEEP_SIZE;
-                       int ret;
+                       int ret, lock_fd;
 
                        /* if fallocate() is supported, we need to take out a
                         * read lock on allocate (to prevent other processes
@@ -351,20 +501,69 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
                         * lock on deallocate (to ensure nobody else is using
                         * this page).
                         *
-                        * we can't use flock() for this, as we actually need to
-                        * lock part of the file, not the entire file.
+                        * read locks on page itself are already taken out at
+                        * file creation, in get_seg_fd().
+                        *
+                        * we cannot rely on simple use of flock() call, because
+                        * we need to be able to lock a section of the file,
+                        * and we cannot use fcntl() locks, because of numerous
+                        * problems with their semantics, so we will use
+                        * deterministically named lock files for each section
+                        * of the file.
+                        *
+                        * if we're shrinking the file, we want to upgrade our
+                        * lock from shared to exclusive.
+                        *
+                        * lock_fd is an fd for a lockfile, not for the segment
+                        * list.
                         */
+                       lock_fd = get_segment_lock_fd(list_idx, seg_idx);
 
                        if (!grow) {
-                               ret = lock(fd, fa_offset, page_sz, F_WRLCK);
+                               /* we are using this lockfile to determine
+                                * whether this particular page is locked, as we
+                                * are in single file segments mode and thus
+                                * cannot use regular flock() to get this info.
+                                *
+                                * we want to try and take out an exclusive lock
+                                * on the lock file to determine if we're the
+                                * last ones using this page, and if not, we
+                                * won't be shrinking it, and will instead exit
+                                * prematurely.
+                                */
+                               ret = lock(lock_fd, LOCK_EX);
+
+                               /* drop the lock on the lockfile, so that even
+                                * if we couldn't shrink the file ourselves, we
+                                * are signalling to other processes that we're
+                                * no longer using this page.
+                                */
+                               if (unlock_segment(list_idx, seg_idx))
+                                       RTE_LOG(ERR, EAL, "Could not unlock segment\n");
+
+                               /* additionally, if this was the last lock on
+                                * this segment list, we can safely close the
+                                * page file fd, so that one of the processes
+                                * could then delete the file after shrinking.
+                                */
+                               if (ret < 1 && fd_list[list_idx].count == 0) {
+                                       close(fd);
+                                       fd_list[list_idx].memseg_list_fd = -1;
+                               }
 
-                               if (ret < 0)
+                               if (ret < 0) {
+                                       RTE_LOG(ERR, EAL, "Could not lock segment\n");
                                        return -1;
-                               else if (ret == 0)
-                                       /* failed to lock, not an error */
+                               }
+                               if (ret == 0)
+                                       /* failed to lock, not an error. */
                                        return 0;
                        }
-                       if (fallocate(fd, flags, fa_offset, page_sz) < 0) {
+
+                       /* grow or shrink the file */
+                       ret = fallocate(fd, flags, fa_offset, page_sz);
+
+                       if (ret < 0) {
                                if (fallocate_supported == -1 &&
                                                errno == ENOTSUP) {
                                        RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
@@ -380,16 +579,18 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
                        } else {
                                fallocate_supported = 1;
 
-                               if (grow) {
-                                       /* if can't read lock, it's an error */
-                                       if (lock(fd, fa_offset, page_sz,
-                                                       F_RDLCK) != 1)
-                                               return -1;
-                               } else {
-                                       /* if can't unlock, it's an error */
-                                       if (lock(fd, fa_offset, page_sz,
-                                                       F_UNLCK) != 1)
-                                               return -1;
+                               /* we've grew/shrunk the file, and we hold an
+                                * exclusive lock now. check if there are no
+                                * more segments active in this segment list,
+                                * and remove the file if there aren't.
+                                */
+                               if (fd_list[list_idx].count == 0) {
+                                       if (unlink(path))
+                                               RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
+                                                       __func__, path,
+                                                       strerror(errno));
+                                       close(fd);
+                                       fd_list[list_idx].memseg_list_fd = -1;
                                }
                        }
                }
@@ -406,73 +607,121 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
        int cur_socket_id = 0;
 #endif
        uint64_t map_offset;
+       rte_iova_t iova;
+       void *va;
        char path[PATH_MAX];
        int ret = 0;
        int fd;
        size_t alloc_sz;
+       int flags;
+       void *new_addr;
 
-       fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
-       if (fd < 0)
+       alloc_sz = hi->hugepage_sz;
+
+       /* these are checked at init, but code analyzers don't know that */
+       if (internal_config.in_memory && !anonymous_hugepages_supported) {
+               RTE_LOG(ERR, EAL, "Anonymous hugepages not supported, in-memory mode cannot allocate memory\n");
+               return -1;
+       }
+       if (internal_config.in_memory && !memfd_create_supported &&
+                       internal_config.single_file_segments) {
+               RTE_LOG(ERR, EAL, "Single-file segments are not supported without memfd support\n");
                return -1;
+       }
 
-       alloc_sz = hi->hugepage_sz;
-       if (internal_config.single_file_segments) {
-               map_offset = seg_idx * alloc_sz;
-               ret = resize_hugefile(fd, map_offset, alloc_sz, true);
-               if (ret < 0)
-                       goto resized;
-       } else {
+       /* in-memory without memfd is a special case */
+       int mmap_flags;
+
+       if (internal_config.in_memory && !memfd_create_supported) {
+               int pagesz_flag, flags;
+
+               pagesz_flag = pagesz_flags(alloc_sz);
+               flags = pagesz_flag | MAP_HUGETLB | MAP_FIXED |
+                               MAP_PRIVATE | MAP_ANONYMOUS;
+               fd = -1;
+               mmap_flags = flags;
+
+               /* single-file segments codepath will never be active
+                * here because in-memory mode is incompatible with the
+                * fallback path, and it's stopped at EAL initialization
+                * stage.
+                */
                map_offset = 0;
-               if (ftruncate(fd, alloc_sz) < 0) {
-                       RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
-                               __func__, strerror(errno));
-                       goto resized;
+       } else {
+               /* takes out a read lock on segment or segment list */
+               fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+               if (fd < 0) {
+                       RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
+                       return -1;
                }
-               /* we've allocated a page - take out a read lock. we're using
-                * fcntl() locks rather than flock() here because doing that
-                * gives us one huge advantage - fcntl() locks are per-process,
-                * not per-file descriptor, which means that we don't have to
-                * keep the original fd's around to keep a lock on the file.
-                *
-                * this is useful, because when it comes to unmapping pages, we
-                * will have to take out a write lock (to figure out if another
-                * process still has this page mapped), and to do itwith flock()
-                * we'll have to use original fd, as lock is associated with
-                * that particular fd. with fcntl(), this is not necessary - we
-                * can open a new fd and use fcntl() on that.
-                */
-               ret = lock(fd, map_offset, alloc_sz, F_RDLCK);
-
-               /* this should not fail */
-               if (ret != 1) {
-                       RTE_LOG(ERR, EAL, "%s(): error locking file: %s\n",
-                               __func__,
-                               strerror(errno));
-                       goto resized;
+
+               if (internal_config.single_file_segments) {
+                       map_offset = seg_idx * alloc_sz;
+                       ret = resize_hugefile(fd, path, list_idx, seg_idx,
+                                       map_offset, alloc_sz, true);
+                       if (ret < 0)
+                               goto resized;
+               } else {
+                       map_offset = 0;
+                       if (ftruncate(fd, alloc_sz) < 0) {
+                               RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
+                                       __func__, strerror(errno));
+                               goto resized;
+                       }
+                       if (internal_config.hugepage_unlink &&
+                                       !internal_config.in_memory) {
+                               if (unlink(path)) {
+                                       RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
+                                               __func__, strerror(errno));
+                                       goto resized;
+                               }
+                       }
                }
+               mmap_flags = MAP_SHARED | MAP_POPULATE | MAP_FIXED;
        }
 
        /*
-        * map the segment, and populate page tables, the kernel fills this
-        * segment with zeros if it's a new page.
+        * map the segment, and populate page tables, the kernel fills
+        * this segment with zeros if it's a new page.
         */
-       void *va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
-                       MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, map_offset);
-       /* for non-single file segments, we can close fd here */
-       if (!internal_config.single_file_segments)
-               close(fd);
+       va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, mmap_flags, fd,
+                       map_offset);
 
        if (va == MAP_FAILED) {
                RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
                        strerror(errno));
-               goto resized;
+               /* mmap failed, but the previous region might have been
+                * unmapped anyway. try to remap it
+                */
+               goto unmapped;
        }
        if (va != addr) {
                RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
+               munmap(va, alloc_sz);
+               goto resized;
+       }
+
+       /* In linux, hugetlb limitations, like cgroup, are
+        * enforced at fault time instead of mmap(), even
+        * with the option of MAP_POPULATE. Kernel will send
+        * a SIGBUS signal. To avoid to be killed, save stack
+        * environment here, if SIGBUS happens, we can jump
+        * back here.
+        */
+       if (huge_wrap_sigsetjmp()) {
+               RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
+                       (unsigned int)(alloc_sz >> 20));
                goto mapped;
        }
 
-       rte_iova_t iova = rte_mem_virt2iova(addr);
+       /* we need to trigger a write to the page to enforce page fault and
+        * ensure that page is accessible to us, but we can't overwrite value
+        * that is already there, so read the old value, and write itback.
+        * kernel populates the page with zeroes initially.
+        */
+       *(volatile int *)addr = *(volatile int *)addr;
+
+       iova = rte_mem_virt2iova(addr);
        if (iova == RTE_BAD_PHYS_ADDR) {
                RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
                        __func__);
@@ -490,20 +739,6 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
        }
 #endif
 
-       /* In linux, hugetlb limitations, like cgroup, are
-        * enforced at fault time instead of mmap(), even
-        * with the option of MAP_POPULATE. Kernel will send
-        * a SIGBUS signal. To avoid to be killed, save stack
-        * environment here, if SIGBUS happens, we can jump
-        * back here.
-        */
-       if (huge_wrap_sigsetjmp()) {
-               RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
-                       (unsigned int)(alloc_sz >> 20));
-               goto mapped;
-       }
-       *(int *)addr = *(int *)addr;
-
        ms->addr = addr;
        ms->hugepage_sz = alloc_sz;
        ms->len = alloc_sz;
@@ -516,21 +751,35 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
 
 mapped:
        munmap(addr, alloc_sz);
+unmapped:
+       flags = MAP_FIXED;
+       new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
+       if (new_addr != addr) {
+               if (new_addr != NULL)
+                       munmap(new_addr, alloc_sz);
+               /* we're leaving a hole in our virtual address space. if
+                * somebody else maps this hole now, we could accidentally
+                * override it in the future.
+                */
+               RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
+       }
 resized:
+       /* some codepaths will return negative fd, so exit early */
+       if (fd < 0)
+               return -1;
+
        if (internal_config.single_file_segments) {
-               resize_hugefile(fd, map_offset, alloc_sz, false);
-               if (is_zero_length(fd)) {
-                       struct msl_entry *te = get_msl_entry_by_idx(list_idx);
-                       if (te != NULL && te->fd >= 0) {
-                               close(te->fd);
-                               te->fd = -1;
-                       }
-                       /* ignore errors, can't make it any worse */
-                       unlink(path);
-               }
+               resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+                               alloc_sz, false);
+               /* ignore failure, can't make it any worse */
        } else {
+               /* only remove file if we can take out a write lock */
+               if (internal_config.hugepage_unlink == 0 &&
+                               internal_config.in_memory == 0 &&
+                               lock(fd, LOCK_EX) == 1)
+                       unlink(path);
                close(fd);
-               unlink(path);
+               fd_list[list_idx].fds[seg_idx] = -1;
        }
        return -1;
 }
@@ -541,7 +790,8 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
 {
        uint64_t map_offset;
        char path[PATH_MAX];
-       int fd, ret;
+       int fd, ret = 0;
+       bool exit_early;
 
        /* erase page data */
        memset(ms->addr, 0, ms->len);
@@ -553,47 +803,55 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
                return -1;
        }
 
+       exit_early = false;
+
+       /* if we're using anonymous hugepages, nothing to be done */
+       if (internal_config.in_memory && !memfd_create_supported)
+               exit_early = true;
+
+       /* if we've already unlinked the page, nothing needs to be done */
+       if (!internal_config.in_memory && internal_config.hugepage_unlink)
+               exit_early = true;
+
+       if (exit_early) {
+               memset(ms, 0, sizeof(*ms));
+               return 0;
+       }
+
+       /* if we are not in single file segments mode, we're going to unmap the
+        * segment and thus drop the lock on original fd, but hugepage dir is
+        * now locked so we can take out another one without races.
+        */
        fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
        if (fd < 0)
                return -1;
 
        if (internal_config.single_file_segments) {
                map_offset = seg_idx * ms->len;
-               if (resize_hugefile(fd, map_offset, ms->len, false))
+               if (resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+                               ms->len, false))
                        return -1;
-               /* if file is zero-length, we've already shrunk it, so it's
-                * safe to remove.
-                */
-               if (is_zero_length(fd)) {
-                       struct msl_entry *te = get_msl_entry_by_idx(list_idx);
-                       if (te != NULL && te->fd >= 0) {
-                               close(te->fd);
-                               te->fd = -1;
-                       }
-                       unlink(path);
-               }
                ret = 0;
        } else {
                /* if we're able to take out a write lock, we're the last one
                 * holding onto this page.
                 */
-
-               ret = lock(fd, 0, ms->len, F_WRLCK);
-               if (ret >= 0) {
-                       /* no one else is using this page */
-                       if (ret == 1)
-                               unlink(path);
-                       ret = lock(fd, 0, ms->len, F_UNLCK);
-                       if (ret != 1)
-                               RTE_LOG(ERR, EAL, "%s(): unable to unlock file %s\n",
-                                       __func__, path);
+               if (!internal_config.in_memory) {
+                       ret = lock(fd, LOCK_EX);
+                       if (ret >= 0) {
+                               /* no one else is using this page */
+                               if (ret == 1)
+                                       unlink(path);
+                       }
                }
+               /* closing fd will drop the lock */
                close(fd);
+               fd_list[list_idx].fds[seg_idx] = -1;
        }
 
        memset(ms, 0, sizeof(*ms));
 
-       return ret;
+       return ret < 0 ? -1 : 0;
 }
 
 struct alloc_walk_param {
@@ -612,7 +870,7 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
        struct alloc_walk_param *wa = arg;
        struct rte_memseg_list *cur_msl;
        size_t page_sz;
-       int cur_idx, start_idx, j;
+       int cur_idx, start_idx, j, dir_fd = -1;
        unsigned int msl_idx, need, i;
 
        if (msl->page_sz != wa->page_sz)
@@ -633,6 +891,30 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
                return 0;
        start_idx = cur_idx;
 
+       /* do not allow any page allocations during the time we're allocating,
+        * because file creation and locking operations are not atomic,
+        * and we might be the first or the last ones to use a particular page,
+        * so we need to ensure atomicity of every operation.
+        *
+        * during init, we already hold a write lock, so don't try to take out
+        * another one.
+        */
+       if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+               dir_fd = open(wa->hi->hugedir, O_RDONLY);
+               if (dir_fd < 0) {
+                       RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
+                               __func__, wa->hi->hugedir, strerror(errno));
+                       return -1;
+               }
+               /* blocking writelock */
+               if (flock(dir_fd, LOCK_EX)) {
+                       RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
+                               __func__, wa->hi->hugedir, strerror(errno));
+                       close(dir_fd);
+                       return -1;
+               }
+       }
+
        for (i = 0; i < need; i++, cur_idx++) {
                struct rte_memseg *cur;
                void *map_addr;
@@ -657,17 +939,20 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
                                                &cur_msl->memseg_arr;
 
                                tmp = rte_fbarray_get(arr, j);
-                               if (free_seg(tmp, wa->hi, msl_idx,
-                                               start_idx + j)) {
-                                       RTE_LOG(ERR, EAL, "Cannot free page\n");
-                                       continue;
-                               }
-
                                rte_fbarray_set_free(arr, j);
+
+                               /* free_seg may attempt to create a file, which
+                                * may fail.
+                                */
+                               if (free_seg(tmp, wa->hi, msl_idx, j))
+                                       RTE_LOG(DEBUG, EAL, "Cannot free page\n");
                        }
                        /* clear the list */
                        if (wa->ms)
                                memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
+
+                       if (dir_fd >= 0)
+                               close(dir_fd);
                        return -1;
                }
                if (wa->ms)
@@ -679,6 +964,8 @@ out:
        wa->segs_allocated = i;
        if (i > 0)
                cur_msl->version++;
+       if (dir_fd >= 0)
+               close(dir_fd);
        return 1;
 }
 
@@ -693,10 +980,10 @@ free_seg_walk(const struct rte_memseg_list *msl, void *arg)
        struct rte_memseg_list *found_msl;
        struct free_walk_param *wa = arg;
        uintptr_t start_addr, end_addr;
-       int msl_idx, seg_idx;
+       int msl_idx, seg_idx, ret, dir_fd = -1;
 
        start_addr = (uintptr_t) msl->base_va;
-       end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
+       end_addr = start_addr + msl->len;
 
        if ((uintptr_t)wa->ms->addr < start_addr ||
                        (uintptr_t)wa->ms->addr >= end_addr)
@@ -708,11 +995,40 @@ free_seg_walk(const struct rte_memseg_list *msl, void *arg)
        /* msl is const */
        found_msl = &mcfg->memsegs[msl_idx];
 
+       /* do not allow any page allocations during the time we're freeing,
+        * because file creation and locking operations are not atomic,
+        * and we might be the first or the last ones to use a particular page,
+        * so we need to ensure atomicity of every operation.
+        *
+        * during init, we already hold a write lock, so don't try to take out
+        * another one.
+        */
+       if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+               dir_fd = open(wa->hi->hugedir, O_RDONLY);
+               if (dir_fd < 0) {
+                       RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
+                               __func__, wa->hi->hugedir, strerror(errno));
+                       return -1;
+               }
+               /* blocking writelock */
+               if (flock(dir_fd, LOCK_EX)) {
+                       RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
+                               __func__, wa->hi->hugedir, strerror(errno));
+                       close(dir_fd);
+                       return -1;
+               }
+       }
+
        found_msl->version++;
 
        rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
 
-       if (free_seg(wa->ms, wa->hi, msl_idx, seg_idx))
+       ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
+
+       if (dir_fd >= 0)
+               close(dir_fd);
+
+       if (ret < 0)
                return -1;
 
        return 1;
@@ -766,7 +1082,8 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
        wa.socket = socket;
        wa.segs_allocated = 0;
 
-       ret = memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
+       /* memalloc is locked, so it's safe to use thread-unsafe version */
+       ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
        if (ret == 0) {
                RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
                        __func__);
@@ -777,7 +1094,7 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
 
 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
        if (have_numa)
-               resotre_numa(&oldpolicy, oldmask);
+               restore_numa(&oldpolicy, oldmask);
 #endif
        return ret;
 }
@@ -831,7 +1148,10 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
                wa.ms = cur;
                wa.hi = hi;
 
-               walk_res = memseg_list_walk_thread_unsafe(free_seg_walk, &wa);
+               /* memalloc is locked, so it's safe to use thread-unsafe version
+                */
+               walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
+                               &wa);
                if (walk_res == 1)
                        continue;
                if (walk_res == 0)
@@ -1034,22 +1354,46 @@ sync_existing(struct rte_memseg_list *primary_msl,
                struct rte_memseg_list *local_msl, struct hugepage_info *hi,
                unsigned int msl_idx)
 {
-       int ret;
+       int ret, dir_fd;
+
+       /* do not allow any page allocations during the time we're allocating,
+        * because file creation and locking operations are not atomic,
+        * and we might be the first or the last ones to use a particular page,
+        * so we need to ensure atomicity of every operation.
+        */
+       dir_fd = open(hi->hugedir, O_RDONLY);
+       if (dir_fd < 0) {
+               RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
+                       hi->hugedir, strerror(errno));
+               return -1;
+       }
+       /* blocking writelock */
+       if (flock(dir_fd, LOCK_EX)) {
+               RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
+                       hi->hugedir, strerror(errno));
+               close(dir_fd);
+               return -1;
+       }
 
        /* ensure all allocated space is the same in both lists */
        ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
        if (ret < 0)
-               return -1;
+               goto fail;
 
        /* ensure all unallocated space is the same in both lists */
        ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
        if (ret < 0)
-               return -1;
+               goto fail;
 
        /* update version number */
        local_msl->version = primary_msl->version;
 
+       close(dir_fd);
+
        return 0;
+fail:
+       close(dir_fd);
+       return -1;
 }
 
 static int
@@ -1060,33 +1404,14 @@ sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
        struct hugepage_info *hi = NULL;
        unsigned int i;
        int msl_idx;
-       bool new_msl = false;
+
+       if (msl->external)
+               return 0;
 
        msl_idx = msl - mcfg->memsegs;
        primary_msl = &mcfg->memsegs[msl_idx];
        local_msl = &local_memsegs[msl_idx];
 
-       /* check if secondary has this memseg list set up */
-       if (local_msl->base_va == NULL) {
-               char name[PATH_MAX];
-               int ret;
-               new_msl = true;
-
-               /* create distinct fbarrays for each secondary */
-               snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
-                       primary_msl->memseg_arr.name, getpid());
-
-               ret = rte_fbarray_init(&local_msl->memseg_arr, name,
-                       primary_msl->memseg_arr.len,
-                       primary_msl->memseg_arr.elt_sz);
-               if (ret < 0) {
-                       RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
-                       return -1;
-               }
-
-               local_msl->base_va = primary_msl->base_va;
-       }
-
        for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
                uint64_t cur_sz =
                        internal_config.hugepage_info[i].hugepage_sz;
@@ -1101,10 +1426,8 @@ sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
                return -1;
        }
 
-       /* if versions don't match or if we have just allocated a new
-        * memseg list, synchronize everything
-        */
-       if ((new_msl || local_msl->version != primary_msl->version) &&
+       /* if versions don't match, synchronize everything */
+       if (local_msl->version != primary_msl->version &&
                        sync_existing(primary_msl, local_msl, hi, msl_idx))
                return -1;
        return 0;
@@ -1118,7 +1441,210 @@ eal_memalloc_sync_with_primary(void)
        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
                return 0;
 
-       if (memseg_list_walk_thread_unsafe(sync_walk, NULL))
+       /* memalloc is locked, so it's safe to call thread-unsafe version */
+       if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
+               return -1;
+       return 0;
+}
+
+static int
+secondary_msl_create_walk(const struct rte_memseg_list *msl,
+               void *arg __rte_unused)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       struct rte_memseg_list *primary_msl, *local_msl;
+       char name[PATH_MAX];
+       int msl_idx, ret;
+
+       if (msl->external)
+               return 0;
+
+       msl_idx = msl - mcfg->memsegs;
+       primary_msl = &mcfg->memsegs[msl_idx];
+       local_msl = &local_memsegs[msl_idx];
+
+       /* create distinct fbarrays for each secondary */
+       snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
+               primary_msl->memseg_arr.name, getpid());
+
+       ret = rte_fbarray_init(&local_msl->memseg_arr, name,
+               primary_msl->memseg_arr.len,
+               primary_msl->memseg_arr.elt_sz);
+       if (ret < 0) {
+               RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
+               return -1;
+       }
+       local_msl->base_va = primary_msl->base_va;
+       local_msl->len = primary_msl->len;
+
+       return 0;
+}
+
+static int
+alloc_list(int list_idx, int len)
+{
+       int *data;
+       int i;
+
+       /* ensure we have space to store fd per each possible segment */
+       data = malloc(sizeof(int) * len);
+       if (data == NULL) {
+               RTE_LOG(ERR, EAL, "Unable to allocate space for file descriptors\n");
+               return -1;
+       }
+       /* set all fd's as invalid */
+       for (i = 0; i < len; i++)
+               data[i] = -1;
+
+       fd_list[list_idx].fds = data;
+       fd_list[list_idx].len = len;
+       fd_list[list_idx].count = 0;
+       fd_list[list_idx].memseg_list_fd = -1;
+
+       return 0;
+}
+
+static int
+fd_list_create_walk(const struct rte_memseg_list *msl,
+               void *arg __rte_unused)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       unsigned int len;
+       int msl_idx;
+
+       if (msl->external)
+               return 0;
+
+       msl_idx = msl - mcfg->memsegs;
+       len = msl->memseg_arr.len;
+
+       return alloc_list(msl_idx, len);
+}
+
+int
+eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+       /* if list is not allocated, allocate it */
+       if (fd_list[list_idx].len == 0) {
+               int len = mcfg->memsegs[list_idx].memseg_arr.len;
+
+               if (alloc_list(list_idx, len) < 0)
+                       return -ENOMEM;
+       }
+       fd_list[list_idx].fds[seg_idx] = fd;
+
+       return 0;
+}
+
+int
+eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
+{
+       int fd;
+       if (internal_config.single_file_segments) {
+               fd = fd_list[list_idx].memseg_list_fd;
+       } else if (fd_list[list_idx].len == 0) {
+               /* list not initialized */
+               fd = -1;
+       } else {
+               fd = fd_list[list_idx].fds[seg_idx];
+       }
+       if (fd < 0)
+               return -ENODEV;
+       return fd;
+}
+
+static int
+test_memfd_create(void)
+{
+#ifdef MEMFD_SUPPORTED
+       unsigned int i;
+       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+               uint64_t pagesz = internal_config.hugepage_info[i].hugepage_sz;
+               int pagesz_flag = pagesz_flags(pagesz);
+               int flags;
+
+               flags = pagesz_flag | MFD_HUGETLB;
+               int fd = memfd_create("test", flags);
+               if (fd < 0) {
+                       /* we failed - let memalloc know this isn't working */
+                       if (errno == EINVAL) {
+                               memfd_create_supported = 0;
+                               return 0; /* not supported */
+                       }
+
+                       /* we got other error - something's wrong */
+                       return -1; /* error */
+               }
+               close(fd);
+               return 1; /* supported */
+       }
+#endif
+       return 0; /* not supported */
+}
+
+int
+eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+       /* fd_list not initialized? */
+       if (fd_list[list_idx].len == 0)
+               return -ENODEV;
+       if (internal_config.single_file_segments) {
+               size_t pgsz = mcfg->memsegs[list_idx].page_sz;
+
+               /* segment not active? */
+               if (fd_list[list_idx].memseg_list_fd < 0)
+                       return -ENOENT;
+               *offset = pgsz * seg_idx;
+       } else {
+               /* segment not active? */
+               if (fd_list[list_idx].fds[seg_idx] < 0)
+                       return -ENOENT;
+               *offset = 0;
+       }
+       return 0;
+}
+
+int
+eal_memalloc_init(void)
+{
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+               if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
+                       return -1;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
+                       internal_config.in_memory) {
+               int mfd_res = test_memfd_create();
+
+               if (mfd_res < 0) {
+                       RTE_LOG(ERR, EAL, "Unable to check if memfd is supported\n");
+                       return -1;
+               }
+               if (mfd_res == 1)
+                       RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
+               else
+                       RTE_LOG(INFO, EAL, "Using memfd is not supported, falling back to anonymous hugepages\n");
+
+               /* we only support single-file segments mode with in-memory mode
+                * if we support hugetlbfs with memfd_create. this code will
+                * test if we do.
+                */
+               if (internal_config.single_file_segments &&
+                               mfd_res != 1) {
+                       RTE_LOG(ERR, EAL, "Single-file segments mode cannot be used without memfd support\n");
+                       return -1;
+               }
+               /* this cannot ever happen but better safe than sorry */
+               if (!anonymous_hugepages_supported) {
+                       RTE_LOG(ERR, EAL, "Using anonymous memory is not supported\n");
+                       return -1;
+               }
+       }
+
+       /* initialize all of the fd lists */
+       if (rte_memseg_list_walk(fd_list_create_walk, NULL))
                return -1;
        return 0;
 }