*/
static int fallocate_supported = -1; /* unknown */
-/*
- * If each page is in a separate file, we can close fd's since we need each fd
- * only once. However, in single file segments mode, we can get away with using
- * a single fd for entire segments, but we need to store them somewhere. Each
- * fd is different within each process, so we'll store them in a local tailq.
+/* for single-file segments, we need some kind of mechanism to keep track of
+ * which hugepages can be freed back to the system, and which cannot. we cannot
+ * use flock() because they don't allow locking parts of a file, and we cannot
+ * use fcntl() due to issues with their semantics, so we will have to rely on a
+ * bunch of lockfiles for each page.
+ *
+ * we cannot know how many pages a system will have in advance, but we do know
+ * that they come in lists, and we know lengths of these lists. so, simply store
+ * a malloc'd array of fd's indexed by list and segment index.
+ *
+ * they will be initialized at startup, and filled as we allocate/deallocate
+ * segments. also, use this to track memseg list proper fd.
*/
-struct msl_entry {
- TAILQ_ENTRY(msl_entry) next;
- unsigned int msl_idx;
- int fd;
-};
-
-/** Double linked list of memseg list fd's. */
-TAILQ_HEAD(msl_entry_list, msl_entry);
-
-static struct msl_entry_list msl_entry_list =
- TAILQ_HEAD_INITIALIZER(msl_entry_list);
-static rte_spinlock_t tailq_lock = RTE_SPINLOCK_INITIALIZER;
+static struct {
+ int *fds; /**< dynamically allocated array of segment lock fd's */
+ int memseg_list_fd; /**< memseg list fd */
+ int len; /**< total length of the array */
+ int count; /**< entries used in an array */
+} lock_fds[RTE_MAX_MEMSEG_LISTS];
/** local copy of a memory map, used to synchronize memory hotplug in MP */
static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
}
#endif
-static struct msl_entry *
-get_msl_entry_by_idx(unsigned int list_idx)
-{
- struct msl_entry *te;
-
- rte_spinlock_lock(&tailq_lock);
-
- TAILQ_FOREACH(te, &msl_entry_list, next) {
- if (te->msl_idx == list_idx)
- break;
- }
- if (te == NULL) {
- /* doesn't exist, so create it and set fd to -1 */
-
- te = malloc(sizeof(*te));
- if (te == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
- __func__);
- goto unlock;
- }
- te->msl_idx = list_idx;
- te->fd = -1;
- TAILQ_INSERT_TAIL(&msl_entry_list, te, next);
- }
-unlock:
- rte_spinlock_unlock(&tailq_lock);
- return te;
-}
-
/*
* uses fstat to report the size of a file on disk
*/
return st.st_size;
}
-/*
- * uses fstat to check if file size on disk is zero (regular fstat won't show
- * true file size due to how fallocate works)
- */
-static bool
-is_zero_length(int fd)
-{
- struct stat st;
- if (fstat(fd, &st) < 0)
- return false;
- return st.st_blocks == 0;
-}
-
/* we cannot use rte_memseg_list_walk() here because we will be holding a
* write lock whenever we enter every function in this file, however copying
* the same iteration code everywhere is not ideal as well. so, use a lockless
return 0;
}
+/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
+static int lock(int fd, int type)
+{
+ int ret;
+
+ /* flock may be interrupted */
+ do {
+ ret = flock(fd, type | LOCK_NB);
+ } while (ret && errno == EINTR);
+
+ if (ret && errno == EWOULDBLOCK) {
+ /* couldn't lock */
+ return 0;
+ } else if (ret) {
+ RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+ /* lock was successful */
+ return 1;
+}
+
+static int get_segment_lock_fd(int list_idx, int seg_idx)
+{
+ char path[PATH_MAX] = {0};
+ int fd;
+
+ if (list_idx < 0 || list_idx >= (int)RTE_DIM(lock_fds))
+ return -1;
+ if (seg_idx < 0 || seg_idx >= lock_fds[list_idx].len)
+ return -1;
+
+ fd = lock_fds[list_idx].fds[seg_idx];
+ /* does this lock already exist? */
+ if (fd >= 0)
+ return fd;
+
+ eal_get_hugefile_lock_path(path, sizeof(path),
+ list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+
+ fd = open(path, O_CREAT | O_RDWR, 0660);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error creating lockfile '%s': %s\n",
+ __func__, path, strerror(errno));
+ return -1;
+ }
+ /* take out a read lock */
+ if (lock(fd, LOCK_SH) != 1) {
+ RTE_LOG(ERR, EAL, "%s(): failed to take out a readlock on '%s': %s\n",
+ __func__, path, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ /* store it for future reference */
+ lock_fds[list_idx].fds[seg_idx] = fd;
+ lock_fds[list_idx].count++;
+ return fd;
+}
+
+static int unlock_segment(int list_idx, int seg_idx)
+{
+ int fd, ret;
+
+ if (list_idx < 0 || list_idx >= (int)RTE_DIM(lock_fds))
+ return -1;
+ if (seg_idx < 0 || seg_idx >= lock_fds[list_idx].len)
+ return -1;
+
+ fd = lock_fds[list_idx].fds[seg_idx];
+
+ /* upgrade lock to exclusive to see if we can remove the lockfile */
+ ret = lock(fd, LOCK_EX);
+ if (ret == 1) {
+ /* we've succeeded in taking exclusive lock, this lockfile may
+ * be removed.
+ */
+ char path[PATH_MAX] = {0};
+ eal_get_hugefile_lock_path(path, sizeof(path),
+ list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+ if (unlink(path)) {
+ RTE_LOG(ERR, EAL, "%s(): error removing lockfile '%s': %s\n",
+ __func__, path, strerror(errno));
+ }
+ }
+ /* we don't want to leak the fd, so even if we fail to lock, close fd
+ * and remove it from list anyway.
+ */
+ close(fd);
+ lock_fds[list_idx].fds[seg_idx] = -1;
+ lock_fds[list_idx].count--;
+
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
static int
get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
unsigned int list_idx, unsigned int seg_idx)
int fd;
if (internal_config.single_file_segments) {
- /*
- * try to find a tailq entry, for this memseg list, or create
- * one if it doesn't exist.
- */
- struct msl_entry *te = get_msl_entry_by_idx(list_idx);
- if (te == NULL) {
- RTE_LOG(ERR, EAL, "%s(): cannot allocate tailq entry for memseg list\n",
- __func__);
- return -1;
- } else if (te->fd < 0) {
- /* create a hugepage file */
- eal_get_hugefile_path(path, buflen, hi->hugedir,
- list_idx);
+ /* create a hugepage file path */
+ eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
+
+ fd = lock_fds[list_idx].memseg_list_fd;
+
+ if (fd < 0) {
fd = open(path, O_CREAT | O_RDWR, 0600);
if (fd < 0) {
- RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n",
+ RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
__func__, strerror(errno));
return -1;
}
- te->fd = fd;
- } else {
- fd = te->fd;
+ /* take out a read lock and keep it indefinitely */
+ if (lock(fd, LOCK_SH) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ lock_fds[list_idx].memseg_list_fd = fd;
}
} else {
- /* one file per page, just create it */
+ /* create a hugepage file path */
eal_get_hugefile_path(path, buflen, hi->hugedir,
list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
fd = open(path, O_CREAT | O_RDWR, 0600);
strerror(errno));
return -1;
}
+ /* take out a read lock */
+ if (lock(fd, LOCK_SH) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
}
return fd;
}
-/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
-static int lock(int fd, uint64_t offset, uint64_t len, int type)
-{
- struct flock lck;
- int ret;
-
- memset(&lck, 0, sizeof(lck));
-
- lck.l_type = type;
- lck.l_whence = SEEK_SET;
- lck.l_start = offset;
- lck.l_len = len;
-
- ret = fcntl(fd, F_SETLK, &lck);
-
- if (ret && (errno == EAGAIN || errno == EACCES)) {
- /* locked by another process, not an error */
- return 0;
- } else if (ret) {
- RTE_LOG(ERR, EAL, "%s(): error calling fcntl(): %s\n",
- __func__, strerror(errno));
- /* we've encountered an unexpected error */
- return -1;
- }
- return 1;
-}
-
static int
-resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz,
- bool grow)
+resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
+ uint64_t fa_offset, uint64_t page_sz, bool grow)
{
bool again = false;
do {
if (fallocate_supported == 0) {
/* we cannot deallocate memory if fallocate() is not
- * supported, but locks are still needed to prevent
- * primary process' initialization from clearing out
- * huge pages used by this process.
+ * supported, and hugepage file is already locked at
+ * creation, so no further synchronization needed.
*/
if (!grow) {
__func__, strerror(errno));
return -1;
}
- /* not being able to take out a read lock is an error */
- if (lock(fd, fa_offset, page_sz, F_RDLCK) != 1)
- return -1;
} else {
int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_KEEP_SIZE;
- int ret;
+ int ret, lock_fd;
/* if fallocate() is supported, we need to take out a
* read lock on allocate (to prevent other processes
* lock on deallocate (to ensure nobody else is using
* this page).
*
- * we can't use flock() for this, as we actually need to
- * lock part of the file, not the entire file.
+ * read locks on page itself are already taken out at
+ * file creation, in get_seg_fd().
+ *
+ * we cannot rely on simple use of flock() call, because
+ * we need to be able to lock a section of the file,
+ * and we cannot use fcntl() locks, because of numerous
+ * problems with their semantics, so we will use
+ * deterministically named lock files for each section
+ * of the file.
+ *
+ * if we're shrinking the file, we want to upgrade our
+ * lock from shared to exclusive.
+ *
+ * lock_fd is an fd for a lockfile, not for the segment
+ * list.
*/
+ lock_fd = get_segment_lock_fd(list_idx, seg_idx);
if (!grow) {
- ret = lock(fd, fa_offset, page_sz, F_WRLCK);
+ /* we are using this lockfile to determine
+ * whether this particular page is locked, as we
+ * are in single file segments mode and thus
+ * cannot use regular flock() to get this info.
+ *
+ * we want to try and take out an exclusive lock
+ * on the lock file to determine if we're the
+ * last ones using this page, and if not, we
+ * won't be shrinking it, and will instead exit
+ * prematurely.
+ */
+ ret = lock(lock_fd, LOCK_EX);
+
+ /* drop the lock on the lockfile, so that even
+ * if we couldn't shrink the file ourselves, we
+ * are signalling to other processes that we're
+ * no longer using this page.
+ */
+ if (unlock_segment(list_idx, seg_idx))
+ RTE_LOG(ERR, EAL, "Could not unlock segment\n");
+
+ /* additionally, if this was the last lock on
+ * this segment list, we can safely close the
+ * page file fd, so that one of the processes
+ * could then delete the file after shrinking.
+ */
+ if (ret < 1 && lock_fds[list_idx].count == 0) {
+ close(fd);
+ lock_fds[list_idx].memseg_list_fd = -1;
+ }
- if (ret < 0)
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Could not lock segment\n");
return -1;
- else if (ret == 0)
- /* failed to lock, not an error */
+ }
+ if (ret == 0)
+ /* failed to lock, not an error. */
return 0;
}
- if (fallocate(fd, flags, fa_offset, page_sz) < 0) {
+
+ /* grow or shrink the file */
+ ret = fallocate(fd, flags, fa_offset, page_sz);
+
+ if (ret < 0) {
if (fallocate_supported == -1 &&
errno == ENOTSUP) {
RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
} else {
fallocate_supported = 1;
- if (grow) {
- /* if can't read lock, it's an error */
- if (lock(fd, fa_offset, page_sz,
- F_RDLCK) != 1)
- return -1;
- } else {
- /* if can't unlock, it's an error */
- if (lock(fd, fa_offset, page_sz,
- F_UNLCK) != 1)
- return -1;
+ /* we've grew/shrunk the file, and we hold an
+ * exclusive lock now. check if there are no
+ * more segments active in this segment list,
+ * and remove the file if there aren't.
+ */
+ if (lock_fds[list_idx].count == 0) {
+ if (unlink(path))
+ RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
+ __func__, path,
+ strerror(errno));
+ close(fd);
+ lock_fds[list_idx].memseg_list_fd = -1;
}
}
}
int fd;
size_t alloc_sz;
+ /* takes out a read lock on segment or segment list */
fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
- if (fd < 0)
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
return -1;
+ }
alloc_sz = hi->hugepage_sz;
if (internal_config.single_file_segments) {
map_offset = seg_idx * alloc_sz;
- ret = resize_hugefile(fd, map_offset, alloc_sz, true);
+ ret = resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+ alloc_sz, true);
if (ret < 0)
goto resized;
} else {
__func__, strerror(errno));
goto resized;
}
- /* we've allocated a page - take out a read lock. we're using
- * fcntl() locks rather than flock() here because doing that
- * gives us one huge advantage - fcntl() locks are per-process,
- * not per-file descriptor, which means that we don't have to
- * keep the original fd's around to keep a lock on the file.
- *
- * this is useful, because when it comes to unmapping pages, we
- * will have to take out a write lock (to figure out if another
- * process still has this page mapped), and to do itwith flock()
- * we'll have to use original fd, as lock is associated with
- * that particular fd. with fcntl(), this is not necessary - we
- * can open a new fd and use fcntl() on that.
- */
- ret = lock(fd, map_offset, alloc_sz, F_RDLCK);
-
- /* this should not fail */
- if (ret != 1) {
- RTE_LOG(ERR, EAL, "%s(): error locking file: %s\n",
- __func__,
- strerror(errno));
- goto resized;
- }
}
/*
munmap(addr, alloc_sz);
resized:
if (internal_config.single_file_segments) {
- resize_hugefile(fd, map_offset, alloc_sz, false);
- if (is_zero_length(fd)) {
- struct msl_entry *te = get_msl_entry_by_idx(list_idx);
- /* te->fd is equivalent to fd */
- if (te != NULL && te->fd >= 0)
- te->fd = -1;
- /* ignore errors, can't make it any worse */
- unlink(path);
- close(fd);
- }
- /* if we're not removing the file, fd stays in the tailq */
+ resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+ alloc_sz, false);
+ /* ignore failure, can't make it any worse */
} else {
+ /* only remove file if we can take out a write lock */
+ if (lock(fd, LOCK_EX) == 1)
+ unlink(path);
close(fd);
- unlink(path);
}
return -1;
}
/* erase page data */
memset(ms->addr, 0, ms->len);
+ /* if we are not in single file segments mode, we're going to unmap the
+ * segment and thus drop the lock on original fd, so take out another
+ * shared lock before we do that.
+ */
+ fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+ if (fd < 0)
+ return -1;
+
if (mmap(ms->addr, ms->len, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
MAP_FAILED) {
return -1;
}
- fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
- if (fd < 0)
- return -1;
-
if (internal_config.single_file_segments) {
map_offset = seg_idx * ms->len;
- if (resize_hugefile(fd, map_offset, ms->len, false))
+ if (resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+ ms->len, false))
return -1;
- /* if file is zero-length, we've already shrunk it, so it's
- * safe to remove.
- */
- if (is_zero_length(fd)) {
- struct msl_entry *te = get_msl_entry_by_idx(list_idx);
- /* te->fd is equivalent to fd */
- if (te != NULL && te->fd >= 0)
- te->fd = -1;
- unlink(path);
- close(fd);
- }
- /* if we're not removing the file, fd stays in the tailq */
ret = 0;
} else {
/* if we're able to take out a write lock, we're the last one
* holding onto this page.
*/
-
- ret = lock(fd, 0, ms->len, F_WRLCK);
+ ret = lock(fd, LOCK_EX);
if (ret >= 0) {
/* no one else is using this page */
if (ret == 1)
unlink(path);
- ret = lock(fd, 0, ms->len, F_UNLCK);
- if (ret != 1)
- RTE_LOG(ERR, EAL, "%s(): unable to unlock file %s\n",
- __func__, path);
}
+ /* closing fd will drop the lock */
close(fd);
}
struct alloc_walk_param *wa = arg;
struct rte_memseg_list *cur_msl;
size_t page_sz;
- int cur_idx, start_idx, j;
+ int cur_idx, start_idx, j, dir_fd;
unsigned int msl_idx, need, i;
if (msl->page_sz != wa->page_sz)
return 0;
start_idx = cur_idx;
+ /* do not allow any page allocations during the time we're allocating,
+ * because file creation and locking operations are not atomic,
+ * and we might be the first or the last ones to use a particular page,
+ * so we need to ensure atomicity of every operation.
+ */
+ dir_fd = open(wa->hi->hugedir, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
+ wa->hi->hugedir, strerror(errno));
+ return -1;
+ }
+ /* blocking writelock */
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
+ wa->hi->hugedir, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
+
for (i = 0; i < need; i++, cur_idx++) {
struct rte_memseg *cur;
void *map_addr;
/* clear the list */
if (wa->ms)
memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
+
+ close(dir_fd);
return -1;
}
if (wa->ms)
wa->segs_allocated = i;
if (i > 0)
cur_msl->version++;
+ close(dir_fd);
return 1;
}
struct rte_memseg_list *found_msl;
struct free_walk_param *wa = arg;
uintptr_t start_addr, end_addr;
- int msl_idx, seg_idx;
+ int msl_idx, seg_idx, ret, dir_fd;
start_addr = (uintptr_t) msl->base_va;
end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
/* msl is const */
found_msl = &mcfg->memsegs[msl_idx];
+ /* do not allow any page allocations during the time we're freeing,
+ * because file creation and locking operations are not atomic,
+ * and we might be the first or the last ones to use a particular page,
+ * so we need to ensure atomicity of every operation.
+ */
+ dir_fd = open(wa->hi->hugedir, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
+ wa->hi->hugedir, strerror(errno));
+ return -1;
+ }
+ /* blocking writelock */
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
+ wa->hi->hugedir, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
+
found_msl->version++;
rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
- if (free_seg(wa->ms, wa->hi, msl_idx, seg_idx))
+ ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
+
+ close(dir_fd);
+
+ if (ret < 0)
return -1;
return 1;
struct rte_memseg_list *local_msl, struct hugepage_info *hi,
unsigned int msl_idx)
{
- int ret;
+ int ret, dir_fd;
+
+ /* do not allow any page allocations during the time we're allocating,
+ * because file creation and locking operations are not atomic,
+ * and we might be the first or the last ones to use a particular page,
+ * so we need to ensure atomicity of every operation.
+ */
+ dir_fd = open(hi->hugedir, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
+ hi->hugedir, strerror(errno));
+ return -1;
+ }
+ /* blocking writelock */
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
+ hi->hugedir, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
/* ensure all allocated space is the same in both lists */
ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
if (ret < 0)
- return -1;
+ goto fail;
/* ensure all unallocated space is the same in both lists */
ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
if (ret < 0)
- return -1;
+ goto fail;
/* update version number */
local_msl->version = primary_msl->version;
+ close(dir_fd);
+
return 0;
+fail:
+ close(dir_fd);
+ return -1;
}
static int
return 0;
}
+static int
+secondary_lock_list_create_walk(const struct rte_memseg_list *msl,
+ void *arg __rte_unused)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ unsigned int i, len;
+ int msl_idx;
+ int *data;
+
+ msl_idx = msl - mcfg->memsegs;
+ len = msl->memseg_arr.len;
+
+ /* ensure we have space to store lock fd per each possible segment */
+ data = malloc(sizeof(int) * len);
+ if (data == NULL) {
+ RTE_LOG(ERR, EAL, "Unable to allocate space for lock descriptors\n");
+ return -1;
+ }
+ /* set all fd's as invalid */
+ for (i = 0; i < len; i++)
+ data[i] = -1;
+
+ lock_fds[msl_idx].fds = data;
+ lock_fds[msl_idx].len = len;
+ lock_fds[msl_idx].count = 0;
+ lock_fds[msl_idx].memseg_list_fd = -1;
+
+ return 0;
+}
+
int
eal_memalloc_init(void)
{
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
return -1;
+
+ /* initialize all of the lock fd lists */
+ if (internal_config.single_file_segments)
+ if (rte_memseg_list_walk(secondary_lock_list_create_walk, NULL))
+ return -1;
return 0;
}