#include "eal_private.h"
#include "eal_internal_cfg.h"
#include "eal_memcfg.h"
+#include "eal_options.h"
#include "malloc_heap.h"
/*
return aligned_addr;
}
+int
+eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
+ uint64_t page_sz, int n_segs, int socket_id, bool heap)
+{
+ if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
+ sizeof(struct rte_memseg))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
+ rte_strerror(rte_errno));
+ return -1;
+ }
+
+ msl->page_sz = page_sz;
+ msl->socket_id = socket_id;
+ msl->base_va = NULL;
+ msl->heap = heap;
+
+ RTE_LOG(DEBUG, EAL,
+ "Memseg list allocated at socket %i, page size 0x%"PRIx64"kB\n",
+ socket_id, page_sz >> 10);
+
+ return 0;
+}
+
+int
+eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
+ int n_segs, int socket_id, int type_msl_idx, bool heap)
+{
+ char name[RTE_FBARRAY_NAME_LEN];
+
+ snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
+ type_msl_idx);
+
+ return eal_memseg_list_init_named(
+ msl, name, page_sz, n_segs, socket_id, heap);
+}
+
+int
+eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags)
+{
+ size_t page_sz, mem_sz;
+ void *addr;
+
+ page_sz = msl->page_sz;
+ mem_sz = page_sz * msl->memseg_arr.len;
+
+ addr = eal_get_virtual_area(
+ msl->base_va, &mem_sz, page_sz, 0, reserve_flags);
+ if (addr == NULL) {
+#ifndef RTE_EXEC_ENV_WINDOWS
+ /* The hint would be misleading on Windows, because address
+ * is by default system-selected (base VA = 0).
+ * However, this function is called from many places,
+ * including common code, so don't duplicate the message.
+ */
+ if (rte_errno == EADDRNOTAVAIL)
+ RTE_LOG(ERR, EAL, "Cannot reserve %llu bytes at [%p] - "
+ "please use '--" OPT_BASE_VIRTADDR "' option\n",
+ (unsigned long long)mem_sz, msl->base_va);
+#endif
+ return -1;
+ }
+ msl->base_va = addr;
+ msl->len = mem_sz;
+
+ RTE_LOG(DEBUG, EAL, "VA reserved for memseg list at %p, size %zx\n",
+ addr, mem_sz);
+
+ return 0;
+}
+
+void
+eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs)
+{
+ size_t page_sz = msl->page_sz;
+ int i;
+
+ for (i = 0; i < n_segs; i++) {
+ struct rte_fbarray *arr = &msl->memseg_arr;
+ struct rte_memseg *ms = rte_fbarray_get(arr, i);
+
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ ms->iova = (uintptr_t)addr;
+ else
+ ms->iova = RTE_BAD_IOVA;
+ ms->addr = addr;
+ ms->hugepage_sz = page_sz;
+ ms->socket_id = 0;
+ ms->len = page_sz;
+
+ rte_fbarray_set_used(arr, i);
+
+ addr = RTE_PTR_ADD(addr, page_sz);
+ }
+}
+
static struct rte_memseg *
virt2memseg(const void *addr, const struct rte_memseg_list *msl)
{
eal_get_virtual_area(void *requested_addr, size_t *size,
size_t page_sz, int flags, int reserve_flags);
+/**
+ * Initialize a memory segment list and create its backing storage.
+ *
+ * @param msl
+ * Memory segment list to be filled.
+ * @param name
+ * Name for the backing storage.
+ * @param page_sz
+ * Size of segment pages in the MSL.
+ * @param n_segs
+ * Number of segments.
+ * @param socket_id
+ * Socket ID. Must not be SOCKET_ID_ANY.
+ * @param heap
+ * Mark MSL as pointing to a heap.
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+eal_memseg_list_init_named(struct rte_memseg_list *msl, const char *name,
+ uint64_t page_sz, int n_segs, int socket_id, bool heap);
+
+/**
+ * Initialize memory segment list and create its backing storage
+ * with a name corresponding to MSL parameters.
+ *
+ * @param type_msl_idx
+ * Index of the MSL among other MSLs of the same socket and page size.
+ *
+ * @see eal_memseg_list_init_named for remaining parameters description.
+ */
+int
+eal_memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
+ int n_segs, int socket_id, int type_msl_idx, bool heap);
+
+/**
+ * Reserve VA space for a memory segment list
+ * previously initialized with eal_memseg_list_init().
+ *
+ * @param msl
+ * Initialized memory segment list with page size defined.
+ * @param reserve_flags
+ * Extra memory reservation flags. Can be 0 if unnecessary.
+ * @return
+ * 0 on success, (-1) on failure and rte_errno is set.
+ */
+int
+eal_memseg_list_alloc(struct rte_memseg_list *msl, int reserve_flags);
+
+/**
+ * Populate MSL, each segment is one page long.
+ *
+ * @param msl
+ * Initialized memory segment list with page size defined.
+ * @param addr
+ * Starting address of list segments.
+ * @param n_segs
+ * Number of segments to populate.
+ */
+void
+eal_memseg_list_populate(struct rte_memseg_list *msl, void *addr, int n_segs);
+
/**
* Get cpu core_id.
*
/* for debug purposes, hugetlbfs can be disabled */
if (internal_config.no_hugetlbfs) {
struct rte_memseg_list *msl;
- struct rte_fbarray *arr;
- struct rte_memseg *ms;
- uint64_t page_sz;
- int n_segs, cur_seg;
+ uint64_t mem_sz, page_sz;
+ int n_segs;
/* create a memseg list */
msl = &mcfg->memsegs[0];
+ mem_sz = internal_config.memory;
page_sz = RTE_PGSIZE_4K;
- n_segs = internal_config.memory / page_sz;
+ n_segs = mem_sz / page_sz;
- if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
- sizeof(struct rte_memseg))) {
- RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+ if (eal_memseg_list_init_named(
+ msl, "nohugemem", page_sz, n_segs, 0, true)) {
return -1;
}
- addr = mmap(NULL, internal_config.memory,
- PROT_READ | PROT_WRITE,
+ addr = mmap(NULL, mem_sz, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
strerror(errno));
return -1;
}
- msl->base_va = addr;
- msl->page_sz = page_sz;
- msl->len = internal_config.memory;
- msl->socket_id = 0;
- msl->heap = 1;
-
- /* populate memsegs. each memseg is 1 page long */
- for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
- arr = &msl->memseg_arr;
- ms = rte_fbarray_get(arr, cur_seg);
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- ms->iova = (uintptr_t)addr;
- else
- ms->iova = RTE_BAD_IOVA;
- ms->addr = addr;
- ms->hugepage_sz = page_sz;
- ms->len = page_sz;
- ms->socket_id = 0;
+ msl->base_va = addr;
+ msl->len = mem_sz;
- rte_fbarray_set_used(arr, cur_seg);
+ eal_memseg_list_populate(msl, addr, n_segs);
- addr = RTE_PTR_ADD(addr, page_sz);
- }
return 0;
}
return RTE_ALIGN(area_sz, page_sz);
}
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
int n_segs, int socket_id, int type_msl_idx)
{
- char name[RTE_FBARRAY_NAME_LEN];
-
- snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
- type_msl_idx);
- if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
- sizeof(struct rte_memseg))) {
- RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
- rte_strerror(rte_errno));
- return -1;
- }
-
- msl->page_sz = page_sz;
- msl->socket_id = socket_id;
- msl->base_va = NULL;
-
- RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
- (size_t)page_sz >> 10, socket_id);
-
- return 0;
+ return eal_memseg_list_init(
+ msl, page_sz, n_segs, socket_id, type_msl_idx, false);
}
static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_alloc(struct rte_memseg_list *msl)
{
- uint64_t page_sz;
- size_t mem_sz;
- void *addr;
int flags = 0;
#ifdef RTE_ARCH_PPC_64
- flags |= MAP_HUGETLB;
+ flags |= EAL_RESERVE_HUGEPAGES;
#endif
-
- page_sz = msl->page_sz;
- mem_sz = page_sz * msl->memseg_arr.len;
-
- addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
- if (addr == NULL) {
- if (rte_errno == EADDRNOTAVAIL)
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
- "please use '--" OPT_BASE_VIRTADDR "' option\n",
- (unsigned long long)mem_sz, msl->base_va);
- else
- RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
- return -1;
- }
- msl->base_va = addr;
- msl->len = mem_sz;
-
- return 0;
+ return eal_memseg_list_alloc(msl, flags);
}
-
static int
memseg_primary_init(void)
{
cur_max_mem);
n_segs = cur_mem / hugepage_sz;
- if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ if (memseg_list_init(msl, hugepage_sz, n_segs,
0, type_msl_idx))
return -1;
total_type_mem = total_segs * hugepage_sz;
type_msl_idx++;
- if (alloc_va_space(msl)) {
+ if (memseg_list_alloc(msl)) {
RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
return -1;
}
}
/* preallocate VA space */
- if (alloc_va_space(msl)) {
+ if (memseg_list_alloc(msl)) {
RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
return -1;
}
}
static int
-free_memseg_list(struct rte_memseg_list *msl)
+memseg_list_free(struct rte_memseg_list *msl)
{
if (rte_fbarray_destroy(&msl->memseg_arr)) {
RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
return 0;
}
-#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
static int
-alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+memseg_list_init(struct rte_memseg_list *msl, uint64_t page_sz,
int n_segs, int socket_id, int type_msl_idx)
{
- char name[RTE_FBARRAY_NAME_LEN];
-
- snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
- type_msl_idx);
- if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
- sizeof(struct rte_memseg))) {
- RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
- rte_strerror(rte_errno));
- return -1;
- }
-
- msl->page_sz = page_sz;
- msl->socket_id = socket_id;
- msl->base_va = NULL;
- msl->heap = 1; /* mark it as a heap segment */
-
- RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
- (size_t)page_sz >> 10, socket_id);
-
- return 0;
+ return eal_memseg_list_init(
+ msl, page_sz, n_segs, socket_id, type_msl_idx, true);
}
static int
-alloc_va_space(struct rte_memseg_list *msl)
+memseg_list_alloc(struct rte_memseg_list *msl)
{
- uint64_t page_sz;
- size_t mem_sz;
- void *addr;
- int flags = 0;
-
- page_sz = msl->page_sz;
- mem_sz = page_sz * msl->memseg_arr.len;
-
- addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
- if (addr == NULL) {
- if (rte_errno == EADDRNOTAVAIL)
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - "
- "please use '--" OPT_BASE_VIRTADDR "' option\n",
- (unsigned long long)mem_sz, msl->base_va);
- else
- RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
- return -1;
- }
- msl->base_va = addr;
- msl->len = mem_sz;
-
- return 0;
+ return eal_memseg_list_alloc(msl, 0);
}
/*
}
/* now, allocate fbarray itself */
- if (alloc_memseg_list(msl, page_sz, n_segs, socket,
+ if (memseg_list_init(msl, page_sz, n_segs, socket,
msl_idx) < 0)
return -1;
/* finally, allocate VA space */
- if (alloc_va_space(msl) < 0)
+ if (memseg_list_alloc(msl) < 0) {
+ RTE_LOG(ERR, EAL, "Cannot preallocate 0x%"PRIx64"kB hugepages\n",
+ page_sz >> 10);
return -1;
+ }
}
}
return 0;
struct rte_mem_config *mcfg;
struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
- struct rte_fbarray *arr;
- struct rte_memseg *ms;
uint64_t memory[RTE_MAX_NUMA_NODES];
void *prealloc_addr;
size_t mem_sz;
struct rte_memseg_list *msl;
- int n_segs, cur_seg, fd, flags;
+ int n_segs, fd, flags;
#ifdef MEMFD_SUPPORTED
int memfd;
#endif
/* create a memseg list */
msl = &mcfg->memsegs[0];
+ mem_sz = internal_config.memory;
page_sz = RTE_PGSIZE_4K;
- n_segs = internal_config.memory / page_sz;
+ n_segs = mem_sz / page_sz;
- if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
- sizeof(struct rte_memseg))) {
- RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+ if (eal_memseg_list_init_named(
+ msl, "nohugemem", page_sz, n_segs, 0, true)) {
return -1;
}
/* preallocate address space for the memory, so that it can be
* fit into the DMA mask.
*/
- mem_sz = internal_config.memory;
- prealloc_addr = eal_get_virtual_area(
- NULL, &mem_sz, page_sz, 0, 0);
- if (prealloc_addr == NULL) {
- RTE_LOG(ERR, EAL,
- "%s: reserving memory area failed: "
- "%s\n",
- __func__, strerror(errno));
+ if (eal_memseg_list_alloc(msl, 0)) {
+ RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
return -1;
}
+
+ prealloc_addr = msl->base_va;
addr = mmap(prealloc_addr, mem_sz, PROT_READ | PROT_WRITE,
flags | MAP_FIXED, fd, 0);
if (addr == MAP_FAILED || addr != prealloc_addr) {
munmap(prealloc_addr, mem_sz);
return -1;
}
- msl->base_va = addr;
- msl->page_sz = page_sz;
- msl->socket_id = 0;
- msl->len = mem_sz;
- msl->heap = 1;
/* we're in single-file segments mode, so only the segment list
* fd needs to be set up.
}
}
- /* populate memsegs. each memseg is one page long */
- for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
- arr = &msl->memseg_arr;
+ eal_memseg_list_populate(msl, addr, n_segs);
- ms = rte_fbarray_get(arr, cur_seg);
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- ms->iova = (uintptr_t)addr;
- else
- ms->iova = RTE_BAD_IOVA;
- ms->addr = addr;
- ms->hugepage_sz = page_sz;
- ms->socket_id = 0;
- ms->len = page_sz;
-
- rte_fbarray_set_used(arr, cur_seg);
-
- addr = RTE_PTR_ADD(addr, (size_t)page_sz);
- }
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
max_pagesz_mem);
n_segs = cur_mem / hugepage_sz;
- if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ if (memseg_list_init(msl, hugepage_sz, n_segs,
socket_id, type_msl_idx)) {
/* failing to allocate a memseg list is
* a serious error.
return -1;
}
- if (alloc_va_space(msl)) {
+ if (memseg_list_alloc(msl)) {
/* if we couldn't allocate VA space, we
* can try with smaller page sizes.
*/
RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
/* deallocate memseg list */
- if (free_memseg_list(msl))
+ if (memseg_list_free(msl))
return -1;
break;
}
}
msl = &mcfg->memsegs[msl_idx++];
- if (alloc_memseg_list(msl, pagesz, n_segs,
+ if (memseg_list_init(msl, pagesz, n_segs,
socket_id, cur_seglist))
goto out;
- if (alloc_va_space(msl)) {
+ if (memseg_list_alloc(msl)) {
RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
goto out;
}
}
/* preallocate VA space */
- if (alloc_va_space(msl)) {
+ if (memseg_list_alloc(msl)) {
RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
return -1;
}