#include "eal_filesystem.h"
#include "eal_internal_cfg.h"
#include "eal_memalloc.h"
+#include "eal_private.h"
/*
* not all kernel version support fallocate on hugetlbfs, so fall back to
return st.st_size;
}
-/* we cannot use rte_memseg_list_walk() here because we will be holding a
- * write lock whenever we enter every function in this file, however copying
- * the same iteration code everywhere is not ideal as well. so, use a lockless
- * copy of memseg list walk here.
- */
-static int
-memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
-{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- int i, ret = 0;
-
- for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
- struct rte_memseg_list *msl = &mcfg->memsegs[i];
-
- if (msl->base_va == NULL)
- continue;
-
- ret = func(msl, arg);
- if (ret < 0)
- return -1;
- if (ret > 0)
- return 1;
- }
- return 0;
-}
-
/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
static int lock(int fd, int type)
{
int ret = 0;
int fd;
size_t alloc_sz;
+ int flags;
+ void *new_addr;
/* takes out a read lock on segment or segment list */
fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
if (va == MAP_FAILED) {
RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
strerror(errno));
- goto resized;
+ /* mmap failed, but the previous region might have been
+ * unmapped anyway. try to remap it
+ */
+ goto unmapped;
}
if (va != addr) {
RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
mapped:
munmap(addr, alloc_sz);
+unmapped:
+ flags = MAP_FIXED;
+#ifdef RTE_ARCH_PPC_64
+ flags |= MAP_HUGETLB;
+#endif
+ new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
+ if (new_addr != addr) {
+ if (new_addr != NULL)
+ munmap(new_addr, alloc_sz);
+ /* we're leaving a hole in our virtual address space. if
+ * somebody else maps this hole now, we could accidentally
+ * override it in the future.
+ */
+ RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
+ }
resized:
if (internal_config.single_file_segments) {
resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
need = wa->n_segs;
/* try finding space in memseg list */
- cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
+ cur_idx = rte_fbarray_find_prev_n_free(&cur_msl->memseg_arr,
+ cur_msl->memseg_arr.len - 1, need);
if (cur_idx < 0)
return 0;
start_idx = cur_idx;
wa.socket = socket;
wa.segs_allocated = 0;
- ret = memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
+ /* memalloc is locked, so it's safe to use thread-unsafe version */
+ ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
if (ret == 0) {
RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
__func__);
wa.ms = cur;
wa.hi = hi;
- walk_res = memseg_list_walk_thread_unsafe(free_seg_walk, &wa);
+ /* memalloc is locked, so it's safe to use thread-unsafe version
+ */
+ walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
+ &wa);
if (walk_res == 1)
continue;
if (walk_res == 0)
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
return 0;
- if (memseg_list_walk_thread_unsafe(sync_walk, NULL))
+ /* memalloc is locked, so it's safe to call thread-unsafe version */
+ if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
return -1;
return 0;
}