Move common functions between Unix and Windows to eal_common_config.c.
Those functions are getter functions for IOVA,
configuration, Multi-process.
Move rte_config, internal_config, early_mem_config and runtime_dir
to be defined in the common file with getter functions.
Refactor the users of the config variables above to use
the getter functions.
Signed-off-by: Tal Shnaiderman <talshn@mellanox.com>
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Mellanox Technologies, Ltd
+ */
+#include <string.h>
+
+#include <rte_os.h>
+#include <rte_string_fns.h>
+
+#include "eal_private.h"
+#include "eal_memcfg.h"
+
+/* early configuration structure, when memory config is not mmapped */
+static struct rte_mem_config early_mem_config;
+
+/* Address of global and public configuration */
+static struct rte_config rte_config = {
+ .mem_config = &early_mem_config,
+};
+
+/* platform-specific runtime dir */
+static char runtime_dir[PATH_MAX];
+
+/* internal configuration */
+static struct internal_config internal_config;
+
+const char *
+rte_eal_get_runtime_dir(void)
+{
+ return runtime_dir;
+}
+
+int
+eal_set_runtime_dir(char *run_dir, size_t size)
+{
+ size_t str_size;
+
+ str_size = strlcpy(runtime_dir, run_dir, size);
+ if (str_size >= size) {
+ RTE_LOG(ERR, EAL, "Runtime directory string too long\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Return a pointer to the configuration structure */
+struct rte_config *
+rte_eal_get_configuration(void)
+{
+ return &rte_config;
+}
+
+/* Return a pointer to the internal configuration structure */
+struct internal_config *
+eal_get_internal_configuration(void)
+{
+ return &internal_config;
+}
+
+enum rte_iova_mode
+rte_eal_iova_mode(void)
+{
+ return rte_eal_get_configuration()->iova_mode;
+}
+
+enum rte_proc_type_t
+rte_eal_process_type(void)
+{
+ return rte_config.process_type;
+}
+
+/* Return user provided mbuf pool ops name */
+const char *
+rte_eal_mbuf_user_pool_ops(void)
+{
+ return internal_config.user_mbuf_pool_ops_name;
+}
+
+/* return non-zero if hugepages are enabled. */
+int
+rte_eal_has_hugepages(void)
+{
+ return !internal_config.no_hugetlbfs;
+}
+
+int
+rte_eal_has_pci(void)
+{
+ return !internal_config.no_pci;
+}
uint64_t max_mem, max_mem_per_type;
unsigned int max_seglists_per_type;
unsigned int n_memtypes, cur_type;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* no-huge does not need this at all */
- if (internal_config.no_hugetlbfs)
+ if (internal_conf->no_hugetlbfs)
return 0;
/*
*/
/* create space for mem types */
- n_memtypes = internal_config.num_hugepage_sizes * rte_socket_count();
+ n_memtypes = internal_conf->num_hugepage_sizes * rte_socket_count();
memtypes = calloc(n_memtypes, sizeof(*memtypes));
if (memtypes == NULL) {
RTE_LOG(ERR, EAL, "Cannot allocate space for memory types\n");
/* populate mem types */
cur_type = 0;
- for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+ for (hpi_idx = 0; hpi_idx < (int) internal_conf->num_hugepage_sizes;
hpi_idx++) {
struct hugepage_info *hpi;
uint64_t hugepage_sz;
- hpi = &internal_config.hugepage_info[hpi_idx];
+ hpi = &internal_conf->hugepage_info[hpi_idx];
hugepage_sz = hpi->hugepage_sz;
for (i = 0; i < (int) rte_socket_count(); i++, cur_type++) {
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
/* we can still sort pages by socket in legacy mode */
- if (!internal_config.legacy_mem && socket_id > 0)
+ if (!internal_conf->legacy_mem && socket_id > 0)
break;
#endif
memtypes[cur_type].page_sz = hugepage_sz;
struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
uint64_t memory[RTE_MAX_NUMA_NODES];
int hp_sz_idx, socket_id;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
memset(used_hp, 0, sizeof(used_hp));
for (hp_sz_idx = 0;
- hp_sz_idx < (int) internal_config.num_hugepage_sizes;
+ hp_sz_idx < (int) internal_conf->num_hugepage_sizes;
hp_sz_idx++) {
#ifndef RTE_ARCH_64
struct hugepage_info dummy;
#endif
/* also initialize used_hp hugepage sizes in used_hp */
struct hugepage_info *hpi;
- hpi = &internal_config.hugepage_info[hp_sz_idx];
+ hpi = &internal_conf->hugepage_info[hp_sz_idx];
used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
#ifndef RTE_ARCH_64
/* make a copy of socket_mem, needed for balanced allocation. */
for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
- memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
+ memory[hp_sz_idx] = internal_conf->socket_mem[hp_sz_idx];
/* calculate final number of pages */
if (eal_dynmem_calc_num_pages_per_socket(memory,
- internal_config.hugepage_info, used_hp,
- internal_config.num_hugepage_sizes) < 0)
+ internal_conf->hugepage_info, used_hp,
+ internal_conf->num_hugepage_sizes) < 0)
return -1;
for (hp_sz_idx = 0;
- hp_sz_idx < (int)internal_config.num_hugepage_sizes;
+ hp_sz_idx < (int)internal_conf->num_hugepage_sizes;
hp_sz_idx++) {
for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
socket_id++) {
}
/* if socket limits were specified, set them */
- if (internal_config.force_socket_limits) {
+ if (internal_conf->force_socket_limits) {
unsigned int i;
for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- uint64_t limit = internal_config.socket_limit[i];
+ uint64_t limit = internal_conf->socket_limit[i];
if (limit == 0)
continue;
if (rte_mem_alloc_validator_register("socket-limit",
{
uint64_t size = 0;
unsigned int i;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
size += hpi->hugepage_sz * hpi->num_pages[socket];
}
unsigned int requested, available;
int total_num_pages = 0;
uint64_t remaining_mem, cur_mem;
- uint64_t total_mem = internal_config.memory;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+ uint64_t total_mem = internal_conf->memory;
if (num_hp_info == 0)
return -1;
/* if specific memory amounts per socket weren't requested */
- if (internal_config.force_sockets == 0) {
+ if (internal_conf->force_sockets == 0) {
size_t total_size;
#ifdef RTE_ARCH_64
int cpu_per_socket[RTE_MAX_NUMA_NODES];
* sockets according to number of cores from CPU mask present
* on each socket.
*/
- total_size = internal_config.memory;
+ total_size = internal_conf->memory;
for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
socket++) {
/* Set memory amount per socket */
- default_size = internal_config.memory *
+ default_size = internal_conf->memory *
cpu_per_socket[socket] / rte_lcore_count();
/* Limit to maximum available memory on socket */
/* in 32-bit mode, allocate all of the memory only on master
* lcore socket
*/
- total_size = internal_config.memory;
+ total_size = internal_conf->memory;
for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
socket++) {
struct rte_config *cfg = rte_eal_get_configuration();
/* if we didn't satisfy all memory requirements per socket */
if (memory[socket] > 0 &&
- internal_config.socket_mem[socket] != 0) {
+ internal_conf->socket_mem[socket] != 0) {
/* to prevent icc errors */
requested = (unsigned int)(
- internal_config.socket_mem[socket] / 0x100000);
+ internal_conf->socket_mem[socket] / 0x100000);
available = requested -
((unsigned int)(memory[socket] / 0x100000));
RTE_LOG(ERR, EAL, "Not enough memory available on "
/* if we didn't satisfy total memory requirements */
if (total_mem > 0) {
- requested = (unsigned int)(internal_config.memory / 0x100000);
+ requested = (unsigned int)(internal_conf->memory / 0x100000);
available = requested - (unsigned int)(total_mem / 0x100000);
RTE_LOG(ERR, EAL, "Not enough memory available! "
"Requested: %uMB, available: %uMB\n",
struct mem_area *ma = NULL;
void *data = NULL;
int fd = -1;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (arr == NULL) {
rte_errno = EINVAL;
fd = -1;
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
/* remap virtual area as writable */
static const int flags = RTE_MAP_FORCE_ADDRESS |
RTE_MAP_PRIVATE | RTE_MAP_ANONYMOUS;
size_t mmap_len;
int fd, ret;
char path[PATH_MAX];
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (arr == NULL) {
rte_errno = EINVAL;
goto out;
}
/* with no shconf, there were never any files to begin with */
- if (!internal_config.no_shconf) {
+ if (!internal_conf->no_shconf) {
/*
* attempt to get an exclusive lock on the file, to ensure it
* has been detached by all other processes
{
struct rte_config *cfg = rte_eal_get_configuration();
struct rte_mem_config *mcfg = cfg->mem_config;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* ALL shared mem_config related INIT DONE */
if (cfg->process_type == RTE_PROC_PRIMARY)
mcfg->magic = RTE_MAGIC;
- internal_config.init_complete = 1;
+ internal_conf->init_complete = 1;
}
void
eal_mcfg_update_internal(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- internal_config.legacy_mem = mcfg->legacy_mem;
- internal_config.single_file_segments = mcfg->single_file_segments;
+ internal_conf->legacy_mem = mcfg->legacy_mem;
+ internal_conf->single_file_segments = mcfg->single_file_segments;
}
void
eal_mcfg_update_from_internal(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- mcfg->legacy_mem = internal_config.legacy_mem;
- mcfg->single_file_segments = internal_config.single_file_segments;
+ mcfg->legacy_mem = internal_conf->legacy_mem;
+ mcfg->single_file_segments = internal_conf->single_file_segments;
/* record current DPDK version */
mcfg->version = RTE_VERSION;
}
void *end, *aligned_start, *aligned_end;
size_t pgsz = (size_t)msl->page_sz;
const struct rte_memseg *ms;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* for IOVA_VA, it's always contiguous */
if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external)
return true;
/* for legacy memory, it's always contiguous */
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
return true;
end = RTE_PTR_ADD(start, len);
uint64_t map_sz;
void *mapped_addr, *aligned_addr;
uint8_t try = 0;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (system_page_sz == 0)
system_page_sz = rte_mem_page_size();
allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
- if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
+ if (next_baseaddr == NULL && internal_conf->base_virtaddr != 0 &&
rte_eal_process_type() == RTE_PROC_PRIMARY)
- next_baseaddr = (void *) internal_config.base_virtaddr;
+ next_baseaddr = (void *) internal_conf->base_virtaddr;
#ifdef RTE_ARCH_64
- if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 &&
+ if (next_baseaddr == NULL && internal_conf->base_virtaddr == 0 &&
rte_eal_process_type() == RTE_PROC_PRIMARY)
next_baseaddr = (void *) eal_get_baseaddr();
#endif
rte_mem_iova2virt(rte_iova_t iova)
{
struct virtiova vi;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
memset(&vi, 0, sizeof(vi));
/* for legacy mem, we can get away with scanning VA-contiguous segments,
* as we know they are PA-contiguous as well
*/
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
rte_memseg_contig_walk(find_virt_legacy, &vi);
else
rte_memseg_walk(find_virt, &vi);
rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
void *arg)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* FreeBSD boots with legacy mem enabled by default */
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
rte_errno = ENOTSUP;
return -1;
int
rte_mem_event_callback_unregister(const char *name, void *arg)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* FreeBSD boots with legacy mem enabled by default */
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
rte_errno = ENOTSUP;
return -1;
rte_mem_alloc_validator_register(const char *name,
rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* FreeBSD boots with legacy mem enabled by default */
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
rte_errno = ENOTSUP;
return -1;
int
rte_mem_alloc_validator_unregister(const char *name, int socket_id)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* FreeBSD boots with legacy mem enabled by default */
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
rte_errno = ENOTSUP;
return -1;
rte_eal_memdevice_init(void)
{
struct rte_config *config;
+ const struct internal_config *internal_conf;
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
+ internal_conf = eal_get_internal_configuration();
config = rte_eal_get_configuration();
- config->mem_config->nchannel = internal_config.force_nchannel;
- config->mem_config->nrank = internal_config.force_nrank;
+ config->mem_config->nchannel = internal_conf->force_nchannel;
+ config->mem_config->nrank = internal_conf->force_nrank;
return 0;
}
rte_eal_memory_init(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
int retval;
RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
if (retval < 0)
goto fail;
- if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+ if (internal_conf->no_shconf == 0 && rte_eal_memdevice_init() < 0)
goto fail;
return 0;
const char *
eal_get_hugefile_prefix(void)
{
- if (internal_config.hugefile_prefix != NULL)
- return internal_config.hugefile_prefix;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ if (internal_conf->hugefile_prefix != NULL)
+ return internal_conf->hugefile_prefix;
return HUGEFILE_PREFIX_DEFAULT;
}
eal_parse_iova_mode(const char *name)
{
int mode;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (name == NULL)
return -1;
else
return -1;
- internal_config.iova_mode = mode;
+ internal_conf->iova_mode = mode;
return 0;
}
{
char *end;
uint64_t addr;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
errno = 0;
addr = strtoull(arg, &end, 16);
* it can align to 2MB for x86. So this alignment can also be used
* on x86 and other architectures.
*/
- internal_config.base_virtaddr =
+ internal_conf->base_virtaddr =
RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
return 0;
{
int i;
struct rte_config *cfg = rte_eal_get_configuration();
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (!core_parsed)
eal_auto_detect_cores(cfg);
- if (internal_config.process_type == RTE_PROC_AUTO)
- internal_config.process_type = eal_proc_type_detect();
+ if (internal_conf->process_type == RTE_PROC_AUTO)
+ internal_conf->process_type = eal_proc_type_detect();
/* default master lcore is the first one */
if (!master_lcore_parsed) {
eal_check_common_options(struct internal_config *internal_cfg)
{
struct rte_config *cfg = rte_eal_get_configuration();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
"be specified together with --"OPT_NO_HUGE"\n");
return -1;
}
- if (internal_config.force_socket_limits && internal_config.legacy_mem) {
+ if (internal_conf->force_socket_limits && internal_conf->legacy_mem) {
RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT
" is only supported in non-legacy memory mode\n");
}
rte_mp_action_register(const char *name, rte_mp_t action)
{
struct action_entry *entry;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (validate_action_name(name) != 0)
return -1;
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
rte_errno = ENOTSUP;
return -1;
rte_mp_action_unregister(const char *name)
{
struct action_entry *entry;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (validate_action_name(name) != 0)
return;
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
return;
}
struct action_entry *entry;
struct rte_mp_msg *msg = &m->msg;
rte_mp_t action = NULL;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
pthread_mutex_unlock(&mp_mutex_action);
if (!action) {
- if (m->type == MP_REQ && !internal_config.init_complete) {
+ if (m->type == MP_REQ && !internal_conf->init_complete) {
/* if this is a request, and init is not yet complete,
* and callback wasn't registered, we should tell the
* requester to ignore our existence because we're not
char path[PATH_MAX];
int dir_fd;
pthread_t mp_handle_tid;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* in no shared files mode, we do not have secondary processes support,
* so no need to initialize IPC.
*/
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
rte_errno = ENOTSUP;
return -1;
int
rte_mp_sendmsg(struct rte_mp_msg *msg)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
if (check_input(msg) != 0)
return -1;
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
rte_errno = ENOTSUP;
return -1;
struct dirent *ent;
struct timeval now;
struct timespec end;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
if (check_input(req) != 0)
goto end;
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
rte_errno = ENOTSUP;
return -1;
struct timeval now;
struct timespec *end;
bool dummy_used = false;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
if (check_input(req) != 0)
return -1;
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
rte_errno = ENOTSUP;
return -1;
rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
{
RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (check_input(msg) != 0)
return -1;
return -1;
}
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
return 0;
}
static void *rte_thread_init(void *arg)
{
int ret;
- rte_cpuset_t *cpuset = &internal_config.ctrl_cpuset;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+ rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
struct rte_thread_ctrl_params *params = arg;
void *(*start_routine)(void *) = params->start_routine;
void *routine_arg = params->arg;
const pthread_attr_t *attr,
void *(*start_routine)(void *), void *arg)
{
- rte_cpuset_t *cpuset = &internal_config.ctrl_cpuset;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+ rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
struct rte_thread_ctrl_params *params;
int ret;
/**< indicates whether EAL has completed initialization */
unsigned int no_telemetry; /**< true to disable Telemetry */
};
-extern struct internal_config internal_config; /**< Global EAL configuration. */
void eal_reset_internal_config(struct internal_config *internal_cfg);
int
eal_mem_set_dump(void *virt, size_t size, bool dump);
+/**
+ * Sets the runtime directory of DPDK
+ *
+ * @param run_dir
+ * The new runtime directory path of DPDK
+ * @param size
+ * The size of the new runtime directory path in bytes.
+ * @return
+ * 0 on success, (-1) on failure.
+ */
+int
+eal_set_runtime_dir(char *run_dir, size_t size);
+
+/**
+ * Get the internal configuration structure.
+ *
+ * @return
+ * A pointer to the internal configuration structure.
+ */
+struct internal_config *
+eal_get_internal_configuration(void);
+
#endif /* _EAL_PRIVATE_H_ */
#include <rte_common.h>
#include <rte_spinlock.h>
+#include "eal_private.h"
#include "eal_internal_cfg.h"
#include "eal_memalloc.h"
#include "malloc_elem.h"
rte_iova_t expected_iova;
struct rte_memseg *ms;
size_t page_sz, cur, max;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
page_sz = (size_t)elem->msl->page_sz;
data_start = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
*/
if (!elem->msl->external &&
(rte_eal_iova_mode() == RTE_IOVA_VA ||
- (internal_config.legacy_mem &&
+ (internal_conf->legacy_mem &&
rte_eal_has_hugepages())))
return RTE_PTR_DIFF(data_end, contig_seg_start);
static int
next_elem_is_adjacent(struct malloc_elem *elem)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
return elem->next == RTE_PTR_ADD(elem, elem->size) &&
elem->next->msl == elem->msl &&
- (!internal_config.match_allocations ||
+ (!internal_conf->match_allocations ||
elem->orig_elem == elem->next->orig_elem);
}
static int
prev_elem_is_adjacent(struct malloc_elem *elem)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
elem->prev->msl == elem->msl &&
- (!internal_config.match_allocations ||
+ (!internal_conf->match_allocations ||
elem->orig_elem == elem->prev->orig_elem);
}
unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
int socket_id;
void *ret;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
rte_spinlock_lock(&(heap->lock));
align = align == 0 ? 1 : align;
/* for legacy mode, try once and with all flags */
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
ret = heap_alloc(heap, type, size, flags, align, bound, contig);
goto alloc_unlock;
}
struct rte_memseg_list *msl;
unsigned int i, n_segs, before_space, after_space;
int ret;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
return -1;
/* ...of which we can't avail if we are in legacy mode, or if this is an
* externally allocated segment.
*/
- if (internal_config.legacy_mem || (msl->external > 0))
+ if (internal_conf->legacy_mem || (msl->external > 0))
goto free_unlock;
/* check if we can free any memory back to the system */
* we will defer freeing these hugepages until the entire original allocation
* can be freed
*/
- if (internal_config.match_allocations && elem->size != elem->orig_size)
+ if (internal_conf->match_allocations && elem->size != elem->orig_size)
goto free_unlock;
/* probably, but let's make sure, as we may not be using up full page */
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
unsigned int i;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.match_allocations) {
+ if (internal_conf->match_allocations)
RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n");
- }
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
/* assign min socket ID to external heaps */
sources += files(
'eal_common_bus.c',
'eal_common_class.c',
+ 'eal_common_config.c',
'eal_common_debug.c',
'eal_common_devargs.c',
'eal_common_dynmem.c',
'eal_common_bus.c',
'eal_common_cpuflags.c',
'eal_common_class.c',
+ 'eal_common_config.c',
'eal_common_debug.c',
'eal_common_devargs.c',
'eal_common_dev.c',
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_dev.c
# from common dir
+SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_config.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_lcore.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memzone.c
/* Allow the application to print its usage message too if set */
static rte_usage_hook_t rte_application_usage_hook = NULL;
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
/* define fd variable here, because file needs to be kept open for the
* duration of the program, as we hold a write lock on it in the primary proc */
static int mem_cfg_fd = -1;
.l_type = F_WRLCK,
.l_whence = SEEK_SET,
.l_start = offsetof(struct rte_mem_config, memsegs),
- .l_len = sizeof(early_mem_config.memsegs),
-};
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
- .mem_config = &early_mem_config,
+ .l_len = RTE_SIZEOF_FIELD(struct rte_mem_config, memsegs),
};
/* internal configuration (per-core) */
struct lcore_config lcore_config[RTE_MAX_LCORE];
-/* internal configuration */
-struct internal_config internal_config;
-
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
-/* platform-specific runtime dir */
-static char runtime_dir[PATH_MAX];
-
static const char *default_runtime_dir = "/var/run";
int
const char *directory = default_runtime_dir;
const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
const char *fallback = "/tmp";
+ char run_dir[PATH_MAX];
char tmp[PATH_MAX];
int ret;
}
/* create prefix-specific subdirectory under DPDK runtime dir */
- ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
+ ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
tmp, eal_get_hugefile_prefix());
- if (ret < 0 || ret == sizeof(runtime_dir)) {
+ if (ret < 0 || ret == sizeof(run_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
}
return -1;
}
- ret = mkdir(runtime_dir, 0700);
+ ret = mkdir(run_dir, 0700);
if (ret < 0 && errno != EEXIST) {
RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
- runtime_dir, strerror(errno));
+ run_dir, strerror(errno));
return -1;
}
+ if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
+ return -1;
+
return 0;
}
return 0;
}
-
-const char *
-rte_eal_get_runtime_dir(void)
-{
- return runtime_dir;
-}
-
-/* Return user provided mbuf pool ops name */
-const char *
-rte_eal_mbuf_user_pool_ops(void)
-{
- return internal_config.user_mbuf_pool_ops_name;
-}
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
- return &rte_config;
-}
-
-enum rte_iova_mode
-rte_eal_iova_mode(void)
-{
- return rte_eal_get_configuration()->iova_mode;
-}
-
/* parse a sysfs (or other) file containing one integer value */
int
eal_parse_sysfs_value(const char *filename, unsigned long *val)
static int
rte_eal_config_create(void)
{
+ struct rte_config *config = rte_eal_get_configuration();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
size_t page_sz = sysconf(_SC_PAGE_SIZE);
- size_t cfg_len = sizeof(*rte_config.mem_config);
+ size_t cfg_len = sizeof(struct rte_mem_config);
size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz);
void *rte_mem_cfg_addr, *mapped_mem_cfg_addr;
int retval;
const char *pathname = eal_runtime_config_path();
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
/* map the config before base address so that we don't waste a page */
- if (internal_config.base_virtaddr != 0)
+ if (internal_conf->base_virtaddr != 0)
rte_mem_cfg_addr = (void *)
- RTE_ALIGN_FLOOR(internal_config.base_virtaddr -
+ RTE_ALIGN_FLOOR(internal_conf->base_virtaddr -
sizeof(struct rte_mem_config), page_sz);
else
rte_mem_cfg_addr = NULL;
return -1;
}
- memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
- rte_config.mem_config = rte_mem_cfg_addr;
+ memcpy(rte_mem_cfg_addr, config->mem_config, sizeof(struct rte_mem_config));
+ config->mem_config = rte_mem_cfg_addr;
/* store address of the config in the config itself so that secondary
* processes could later map the config into this exact location
*/
- rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
-
+ config->mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
return 0;
}
{
void *rte_mem_cfg_addr;
const char *pathname = eal_runtime_config_path();
+ struct rte_config *config = rte_eal_get_configuration();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
if (mem_cfg_fd < 0){
}
}
- rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
+ rte_mem_cfg_addr = mmap(NULL, sizeof(*config->mem_config),
PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
/* don't close the fd here, it will be closed on reattach */
if (rte_mem_cfg_addr == MAP_FAILED) {
return -1;
}
- rte_config.mem_config = rte_mem_cfg_addr;
+ config->mem_config = rte_mem_cfg_addr;
return 0;
}
{
struct rte_mem_config *mem_config;
void *rte_mem_cfg_addr;
+ struct rte_config *config = rte_eal_get_configuration();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
/* save the address primary process has mapped shared config to */
rte_mem_cfg_addr =
- (void *)(uintptr_t)rte_config.mem_config->mem_cfg_addr;
+ (void *)(uintptr_t)config->mem_config->mem_cfg_addr;
/* unmap original config */
- munmap(rte_config.mem_config, sizeof(struct rte_mem_config));
+ munmap(config->mem_config, sizeof(struct rte_mem_config));
/* remap the config at proper address */
mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr,
return -1;
}
- rte_config.mem_config = mem_config;
+ config->mem_config = mem_config;
return 0;
}
{
enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
const char *pathname = eal_runtime_config_path();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* if there no shared config, there can be no secondary processes */
- if (!internal_config.no_shconf) {
+ if (!internal_conf->no_shconf) {
/* if we can open the file but not get a write-lock we are a
* secondary process. NOTE: if we get a file handle back, we
* keep that open and don't close it to prevent a race condition
static int
rte_config_init(void)
{
- rte_config.process_type = internal_config.process_type;
+ struct rte_config *config = rte_eal_get_configuration();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- switch (rte_config.process_type){
+ config->process_type = internal_conf->process_type;
+
+ switch (config->process_type) {
case RTE_PROC_PRIMARY:
if (rte_eal_config_create() < 0)
return -1;
case RTE_PROC_AUTO:
case RTE_PROC_INVALID:
RTE_LOG(ERR, EAL, "Invalid process type %d\n",
- rte_config.process_type);
+ config->process_type);
return -1;
}
{
uint64_t size = 0;
unsigned i, j;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
size += hpi->hugepage_sz * hpi->num_pages[j];
const int old_optopt = optopt;
const int old_optreset = optreset;
char * const old_optarg = optarg;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
argvopt = argv;
optind = 1;
break;
ret = (opt == OPT_LOG_LEVEL_NUM) ?
- eal_parse_common_option(opt, optarg, &internal_config) : 0;
+ eal_parse_common_option(opt, optarg, internal_conf) : 0;
/* common parser is not happy */
if (ret < 0)
const int old_optopt = optopt;
const int old_optreset = optreset;
char * const old_optarg = optarg;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
argvopt = argv;
optind = 1;
goto out;
}
- ret = eal_parse_common_option(opt, optarg, &internal_config);
+ ret = eal_parse_common_option(opt, optarg, internal_conf);
/* common parser is not happy */
if (ret < 0) {
eal_usage(prgname);
RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
else {
/* free old ops name */
- if (internal_config.user_mbuf_pool_ops_name !=
+ if (internal_conf->user_mbuf_pool_ops_name !=
NULL)
- free(internal_config.user_mbuf_pool_ops_name);
+ free(internal_conf->user_mbuf_pool_ops_name);
- internal_config.user_mbuf_pool_ops_name =
+ internal_conf->user_mbuf_pool_ops_name =
ops_name;
}
break;
}
/* create runtime data directory */
- if (internal_config.no_shconf == 0 &&
+ if (internal_conf->no_shconf == 0 &&
eal_create_runtime_dir() < 0) {
RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
ret = -1;
goto out;
}
- if (eal_adjust_config(&internal_config) != 0) {
+ if (eal_adjust_config(internal_conf) != 0) {
ret = -1;
goto out;
}
/* sanity checks */
- if (eal_check_common_options(&internal_config) != 0) {
+ if (eal_check_common_options(internal_conf) != 0) {
eal_usage(prgname);
ret = -1;
goto out;
eal_check_mem_on_local_socket(void)
{
int socket_id;
+ const struct rte_config *config = rte_eal_get_configuration();
- socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+ socket_id = rte_lcore_to_socket_id(config->master_lcore);
if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
{
return 0;
}
-
-/* return non-zero if hugepages are enabled. */
-int rte_eal_has_hugepages(void)
-{
- return !internal_config.no_hugetlbfs;
-}
-
/* Abstraction for port I/0 privilege */
int
rte_eal_iopl_init(void)
static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
char thread_name[RTE_MAX_THREAD_NAME_LEN];
+ const struct rte_config *config = rte_eal_get_configuration();
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* checks if the machine is adequate */
if (!rte_cpu_is_supported()) {
thread_id = pthread_self();
- eal_reset_internal_config(&internal_config);
+ eal_reset_internal_config(internal_conf);
/* clone argv to report out later in telemetry */
eal_save_args(argc, argv);
}
/* FreeBSD always uses legacy memory model */
- internal_config.legacy_mem = true;
+ internal_conf->legacy_mem = true;
if (eal_plugins_init() < 0) {
rte_eal_init_alert("Cannot init plugins");
}
/* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
- if (internal_config.iova_mode == RTE_IOVA_DC) {
+ if (internal_conf->iova_mode == RTE_IOVA_DC) {
/* autodetect the IOVA mapping mode (default is RTE_IOVA_PA) */
enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
rte_eal_get_configuration()->iova_mode = iova_mode;
} else {
rte_eal_get_configuration()->iova_mode =
- internal_config.iova_mode;
+ internal_conf->iova_mode;
}
RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
- if (internal_config.no_hugetlbfs == 0) {
+ if (internal_conf->no_hugetlbfs == 0) {
/* rte_config isn't initialized yet */
- ret = internal_config.process_type == RTE_PROC_PRIMARY ?
+ ret = internal_conf->process_type == RTE_PROC_PRIMARY ?
eal_hugepage_info_init() :
eal_hugepage_info_read();
if (ret < 0) {
}
}
- if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
- if (internal_config.no_hugetlbfs)
- internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+ if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
+ if (internal_conf->no_hugetlbfs)
+ internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
else
- internal_config.memory = eal_get_hugepage_mem_size();
+ internal_conf->memory = eal_get_hugepage_mem_size();
}
- if (internal_config.vmware_tsc_map == 1) {
+ if (internal_conf->vmware_tsc_map == 1) {
#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
rte_cycles_vmware_tsc_map = 1;
RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
eal_check_mem_on_local_socket();
- eal_thread_init_master(rte_config.master_lcore);
+ eal_thread_init_master(config->master_lcore);
ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
- rte_config.master_lcore, thread_id, cpuset,
+ config->master_lcore, thread_id, cpuset,
ret == 0 ? "" : "...");
RTE_LCORE_FOREACH_SLAVE(i) {
* In no_shconf mode, no runtime directory is created in the first
* place, so no cleanup needed.
*/
- if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) {
+ if (!internal_conf->no_shconf && eal_clean_runtime_dir() < 0) {
rte_eal_init_alert("Cannot clear runtime directory");
return -1;
}
- if (!internal_config.no_telemetry) {
+ if (!internal_conf->no_telemetry) {
const char *error_str = NULL;
if (rte_telemetry_init(rte_eal_get_runtime_dir(),
- &internal_config.ctrl_cpuset, &error_str)
+ &internal_conf->ctrl_cpuset, &error_str)
!= 0) {
rte_eal_init_alert(error_str);
return -1;
int
rte_eal_cleanup(void)
{
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
rte_service_finalize();
rte_mp_channel_cleanup();
rte_trace_save();
eal_trace_fini();
- eal_cleanup_config(&internal_config);
+ eal_cleanup_config(internal_conf);
return 0;
}
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
- return rte_config.process_type;
-}
-
-int rte_eal_has_pci(void)
-{
- return !internal_config.no_pci;
-}
-
int rte_eal_create_uio_dev(void)
{
- return internal_config.create_uio_dev;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+ return internal_conf->create_uio_dev;
}
enum rte_intr_mode
#include <rte_log.h>
#include <fcntl.h>
+
+#include "eal_private.h"
#include "eal_hugepages.h"
#include "eal_internal_cfg.h"
#include "eal_filesystem.h"
size_t sysctl_size;
int num_buffers, fd, error;
int64_t buffer_size;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* re-use the linux "internal config" structure for our memory data */
- struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[0];
struct hugepage_info *tmp_hpi;
unsigned int i;
- internal_config.num_hugepage_sizes = 1;
+ internal_conf->num_hugepage_sizes = 1;
sysctl_size = sizeof(num_buffers);
error = sysctlbyname("hw.contigmem.num_buffers", &num_buffers,
hpi->lock_descriptor = fd;
/* for no shared files mode, do not create shared memory config */
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
- sizeof(internal_config.hugepage_info));
+ sizeof(internal_conf->hugepage_info));
if (tmp_hpi == NULL ) {
RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
return -1;
}
- memcpy(tmp_hpi, hpi, sizeof(internal_config.hugepage_info));
+ memcpy(tmp_hpi, hpi, sizeof(internal_conf->hugepage_info));
/* we've copied file descriptors along with everything else, but they
* will be invalid in secondary process, so overwrite them
*/
- for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+ for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
struct hugepage_info *tmp = &tmp_hpi[i];
tmp->lock_descriptor = -1;
}
- if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
return -1;
}
int
eal_hugepage_info_read(void)
{
- struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[0];
struct hugepage_info *tmp_hpi;
- internal_config.num_hugepage_sizes = 1;
+ internal_conf->num_hugepage_sizes = 1;
tmp_hpi = open_shared_memory(eal_hugepage_info_path(),
- sizeof(internal_config.hugepage_info));
+ sizeof(internal_conf->hugepage_info));
if (tmp_hpi == NULL) {
RTE_LOG(ERR, EAL, "Failed to open shared memory!\n");
return -1;
}
- memcpy(hpi, tmp_hpi, sizeof(internal_config.hugepage_info));
+ memcpy(hpi, tmp_hpi, sizeof(internal_conf->hugepage_info));
- if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
return -1;
}
uint64_t total_mem = 0;
void *addr;
unsigned int i, j, seg_idx = 0;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
/* for debug purposes, hugetlbfs can be disabled */
- if (internal_config.no_hugetlbfs) {
+ if (internal_conf->no_hugetlbfs) {
struct rte_memseg_list *msl;
uint64_t mem_sz, page_sz;
int n_segs;
/* create a memseg list */
msl = &mcfg->memsegs[0];
- mem_sz = internal_config.memory;
+ mem_sz = internal_conf->memory;
page_sz = RTE_PGSIZE_4K;
n_segs = mem_sz / page_sz;
}
/* map all hugepages and sort them */
- for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
struct hugepage_info *hpi;
rte_iova_t prev_end = 0;
int prev_ms_idx = -1;
uint64_t page_sz, mem_needed;
unsigned int n_pages, max_pages;
- hpi = &internal_config.hugepage_info[i];
+ hpi = &internal_conf->hugepage_info[i];
page_sz = hpi->hugepage_sz;
max_pages = hpi->num_pages[0];
- mem_needed = RTE_ALIGN_CEIL(internal_config.memory - total_mem,
+ mem_needed = RTE_ALIGN_CEIL(internal_conf->memory - total_mem,
page_sz);
n_pages = RTE_MIN(mem_needed / page_sz, max_pages);
total_mem += seg->len;
}
- if (total_mem >= internal_config.memory)
+ if (total_mem >= internal_conf->memory)
break;
}
- if (total_mem < internal_config.memory) {
+ if (total_mem < internal_conf->memory) {
RTE_LOG(ERR, EAL, "Couldn't reserve requested memory, "
"requested: %" PRIu64 "M "
"available: %" PRIu64 "M\n",
- internal_config.memory >> 20, total_mem >> 20);
+ internal_conf->memory >> 20, total_mem >> 20);
return -1;
}
return 0;
int
rte_eal_hugepage_attach(void)
{
- const struct hugepage_info *hpi;
+ struct hugepage_info *hpi;
int fd_hugepage = -1;
unsigned int i;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- hpi = &internal_config.hugepage_info[0];
+ hpi = &internal_conf->hugepage_info[0];
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
const struct hugepage_info *cur_hpi = &hpi[i];
struct attach_walk_args wa;
int hpi_idx, msl_idx = 0;
struct rte_memseg_list *msl;
uint64_t max_mem, total_mem;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* no-huge does not need this at all */
- if (internal_config.no_hugetlbfs)
+ if (internal_conf->no_hugetlbfs)
return 0;
/* FreeBSD has an issue where core dump will dump the entire memory
total_mem = 0;
/* create memseg lists */
- for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+ for (hpi_idx = 0; hpi_idx < (int) internal_conf->num_hugepage_sizes;
hpi_idx++) {
uint64_t max_type_mem, total_type_mem = 0;
uint64_t avail_mem;
struct hugepage_info *hpi;
uint64_t hugepage_sz;
- hpi = &internal_config.hugepage_info[hpi_idx];
+ hpi = &internal_conf->hugepage_info[hpi_idx];
hugepage_sz = hpi->hugepage_sz;
/* no NUMA support on FreeBSD */
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_dev.c
# from common dir
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_config.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_lcore.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memzone.c
/* Allow the application to print its usage message too if set */
static rte_usage_hook_t rte_application_usage_hook = NULL;
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
/* define fd variable here, because file needs to be kept open for the
* duration of the program, as we hold a write lock on it in the primary proc */
static int mem_cfg_fd = -1;
.l_type = F_WRLCK,
.l_whence = SEEK_SET,
.l_start = offsetof(struct rte_mem_config, memsegs),
- .l_len = sizeof(early_mem_config.memsegs),
-};
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
- .mem_config = &early_mem_config,
+ .l_len = RTE_SIZEOF_FIELD(struct rte_mem_config, memsegs),
};
/* internal configuration (per-core) */
struct lcore_config lcore_config[RTE_MAX_LCORE];
-/* internal configuration */
-struct internal_config internal_config;
-
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
-/* platform-specific runtime dir */
-static char runtime_dir[PATH_MAX];
-
static const char *default_runtime_dir = "/var/run";
int
const char *directory = default_runtime_dir;
const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
const char *fallback = "/tmp";
+ char run_dir[PATH_MAX];
char tmp[PATH_MAX];
int ret;
}
/* create prefix-specific subdirectory under DPDK runtime dir */
- ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
+ ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
tmp, eal_get_hugefile_prefix());
- if (ret < 0 || ret == sizeof(runtime_dir)) {
+ if (ret < 0 || ret == sizeof(run_dir)) {
RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
return -1;
}
return -1;
}
- ret = mkdir(runtime_dir, 0700);
+ ret = mkdir(run_dir, 0700);
if (ret < 0 && errno != EEXIST) {
RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
- runtime_dir, strerror(errno));
+ run_dir, strerror(errno));
return -1;
}
+ if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
+ return -1;
+
return 0;
}
int
eal_clean_runtime_dir(void)
{
+ const char *runtime_dir = rte_eal_get_runtime_dir();
DIR *dir;
struct dirent *dirent;
int dir_fd, fd, lck_result;
return -1;
}
-const char *
-rte_eal_get_runtime_dir(void)
-{
- return runtime_dir;
-}
-
-/* Return user provided mbuf pool ops name */
-const char *
-rte_eal_mbuf_user_pool_ops(void)
-{
- return internal_config.user_mbuf_pool_ops_name;
-}
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
- return &rte_config;
-}
-
-enum rte_iova_mode
-rte_eal_iova_mode(void)
-{
- return rte_eal_get_configuration()->iova_mode;
-}
-
/* parse a sysfs (or other) file containing one integer value */
int
eal_parse_sysfs_value(const char *filename, unsigned long *val)
static int
rte_eal_config_create(void)
{
+ struct rte_config *config = rte_eal_get_configuration();
size_t page_sz = sysconf(_SC_PAGE_SIZE);
- size_t cfg_len = sizeof(*rte_config.mem_config);
+ size_t cfg_len = sizeof(*config->mem_config);
size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz);
void *rte_mem_cfg_addr, *mapped_mem_cfg_addr;
int retval;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
const char *pathname = eal_runtime_config_path();
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
/* map the config before hugepage address so that we don't waste a page */
- if (internal_config.base_virtaddr != 0)
+ if (internal_conf->base_virtaddr != 0)
rte_mem_cfg_addr = (void *)
- RTE_ALIGN_FLOOR(internal_config.base_virtaddr -
+ RTE_ALIGN_FLOOR(internal_conf->base_virtaddr -
sizeof(struct rte_mem_config), page_sz);
else
rte_mem_cfg_addr = NULL;
return -1;
}
- memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
- rte_config.mem_config = rte_mem_cfg_addr;
+ memcpy(rte_mem_cfg_addr, config->mem_config, sizeof(struct rte_mem_config));
+ config->mem_config = rte_mem_cfg_addr;
/* store address of the config in the config itself so that secondary
- * processes could later map the config into this exact location */
- rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
-
- rte_config.mem_config->dma_maskbits = 0;
+ * processes could later map the config into this exact location
+ */
+ config->mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
+ config->mem_config->dma_maskbits = 0;
return 0;
}
static int
rte_eal_config_attach(void)
{
+ struct rte_config *config = rte_eal_get_configuration();
struct rte_mem_config *mem_config;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
const char *pathname = eal_runtime_config_path();
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
if (mem_cfg_fd < 0){
return -1;
}
- rte_config.mem_config = mem_config;
+ config->mem_config = mem_config;
return 0;
}
static int
rte_eal_config_reattach(void)
{
+ struct rte_config *config = rte_eal_get_configuration();
struct rte_mem_config *mem_config;
void *rte_mem_cfg_addr;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
/* save the address primary process has mapped shared config to */
- rte_mem_cfg_addr = (void *) (uintptr_t) rte_config.mem_config->mem_cfg_addr;
+ rte_mem_cfg_addr =
+ (void *) (uintptr_t) config->mem_config->mem_cfg_addr;
/* unmap original config */
- munmap(rte_config.mem_config, sizeof(struct rte_mem_config));
+ munmap(config->mem_config, sizeof(struct rte_mem_config));
/* remap the config at proper address */
mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr,
return -1;
}
- rte_config.mem_config = mem_config;
+ config->mem_config = mem_config;
return 0;
}
{
enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
const char *pathname = eal_runtime_config_path();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* if there no shared config, there can be no secondary processes */
- if (!internal_config.no_shconf) {
+ if (!internal_conf->no_shconf) {
/* if we can open the file but not get a write-lock we are a
* secondary process. NOTE: if we get a file handle back, we
* keep that open and don't close it to prevent a race condition
static int
rte_config_init(void)
{
- rte_config.process_type = internal_config.process_type;
+ struct rte_config *config = rte_eal_get_configuration();
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- switch (rte_config.process_type){
+ config->process_type = internal_conf->process_type;
+
+ switch (config->process_type) {
case RTE_PROC_PRIMARY:
if (rte_eal_config_create() < 0)
return -1;
case RTE_PROC_AUTO:
case RTE_PROC_INVALID:
RTE_LOG(ERR, EAL, "Invalid process type %d\n",
- rte_config.process_type);
+ config->process_type);
return -1;
}
eal_hugedirs_unlock(void)
{
int i;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
{
/* skip uninitialized */
- if (internal_config.hugepage_info[i].lock_descriptor < 0)
+ if (internal_conf->hugepage_info[i].lock_descriptor < 0)
continue;
/* unlock hugepage file */
- flock(internal_config.hugepage_info[i].lock_descriptor, LOCK_UN);
- close(internal_config.hugepage_info[i].lock_descriptor);
+ flock(internal_conf->hugepage_info[i].lock_descriptor, LOCK_UN);
+ close(internal_conf->hugepage_info[i].lock_descriptor);
/* reset the field */
- internal_config.hugepage_info[i].lock_descriptor = -1;
+ internal_conf->hugepage_info[i].lock_descriptor = -1;
}
}
static int
eal_parse_vfio_intr(const char *mode)
{
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
unsigned i;
static struct {
const char *name;
for (i = 0; i < RTE_DIM(map); i++) {
if (!strcmp(mode, map[i].name)) {
- internal_config.vfio_intr_mode = map[i].value;
+ internal_conf->vfio_intr_mode = map[i].value;
return 0;
}
}
const int old_optind = optind;
const int old_optopt = optopt;
char * const old_optarg = optarg;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
argvopt = argv;
optind = 1;
break;
ret = (opt == OPT_LOG_LEVEL_NUM) ?
- eal_parse_common_option(opt, optarg, &internal_config) : 0;
+ eal_parse_common_option(opt, optarg, internal_conf) : 0;
/* common parser is not happy */
if (ret < 0)
const int old_optind = optind;
const int old_optopt = optopt;
char * const old_optarg = optarg;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
argvopt = argv;
optind = 1;
goto out;
}
- ret = eal_parse_common_option(opt, optarg, &internal_config);
+ ret = eal_parse_common_option(opt, optarg, internal_conf);
/* common parser is not happy */
if (ret < 0) {
eal_usage(prgname);
RTE_LOG(ERR, EAL, "Could not store hugepage directory\n");
else {
/* free old hugepage dir */
- if (internal_config.hugepage_dir != NULL)
- free(internal_config.hugepage_dir);
- internal_config.hugepage_dir = hdir;
+ if (internal_conf->hugepage_dir != NULL)
+ free(internal_conf->hugepage_dir);
+ internal_conf->hugepage_dir = hdir;
}
break;
}
RTE_LOG(ERR, EAL, "Could not store file prefix\n");
else {
/* free old prefix */
- if (internal_config.hugefile_prefix != NULL)
- free(internal_config.hugefile_prefix);
- internal_config.hugefile_prefix = prefix;
+ if (internal_conf->hugefile_prefix != NULL)
+ free(internal_conf->hugefile_prefix);
+ internal_conf->hugefile_prefix = prefix;
}
break;
}
case OPT_SOCKET_MEM_NUM:
if (eal_parse_socket_arg(optarg,
- internal_config.socket_mem) < 0) {
+ internal_conf->socket_mem) < 0) {
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_SOCKET_MEM "\n");
eal_usage(prgname);
ret = -1;
goto out;
}
- internal_config.force_sockets = 1;
+ internal_conf->force_sockets = 1;
break;
case OPT_SOCKET_LIMIT_NUM:
if (eal_parse_socket_arg(optarg,
- internal_config.socket_limit) < 0) {
+ internal_conf->socket_limit) < 0) {
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_SOCKET_LIMIT "\n");
eal_usage(prgname);
ret = -1;
goto out;
}
- internal_config.force_socket_limits = 1;
+ internal_conf->force_socket_limits = 1;
break;
case OPT_VFIO_INTR_NUM:
break;
case OPT_CREATE_UIO_DEV_NUM:
- internal_config.create_uio_dev = 1;
+ internal_conf->create_uio_dev = 1;
break;
case OPT_MBUF_POOL_OPS_NAME_NUM:
RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
else {
/* free old ops name */
- if (internal_config.user_mbuf_pool_ops_name !=
+ if (internal_conf->user_mbuf_pool_ops_name !=
NULL)
- free(internal_config.user_mbuf_pool_ops_name);
+ free(internal_conf->user_mbuf_pool_ops_name);
- internal_config.user_mbuf_pool_ops_name =
+ internal_conf->user_mbuf_pool_ops_name =
ops_name;
}
break;
}
case OPT_MATCH_ALLOCATIONS_NUM:
- internal_config.match_allocations = 1;
+ internal_conf->match_allocations = 1;
break;
default:
}
/* create runtime data directory */
- if (internal_config.no_shconf == 0 &&
+ if (internal_conf->no_shconf == 0 &&
eal_create_runtime_dir() < 0) {
RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
ret = -1;
goto out;
}
- if (eal_adjust_config(&internal_config) != 0) {
+ if (eal_adjust_config(internal_conf) != 0) {
ret = -1;
goto out;
}
/* sanity checks */
- if (eal_check_common_options(&internal_config) != 0) {
+ if (eal_check_common_options(internal_conf) != 0) {
eal_usage(prgname);
ret = -1;
goto out;
eal_check_mem_on_local_socket(void)
{
int socket_id;
+ const struct rte_config *config = rte_eal_get_configuration();
- socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+ socket_id = rte_lcore_to_socket_id(config->master_lcore);
if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
char thread_name[RTE_MAX_THREAD_NAME_LEN];
bool phys_addrs;
+ const struct rte_config *config = rte_eal_get_configuration();
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* checks if the machine is adequate */
if (!rte_cpu_is_supported()) {
strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
thread_id = pthread_self();
- eal_reset_internal_config(&internal_config);
+ eal_reset_internal_config(internal_conf);
/* set log level as early as possible */
eal_log_level_parse(argc, argv);
phys_addrs = rte_eal_using_phys_addrs() != 0;
/* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
- if (internal_config.iova_mode == RTE_IOVA_DC) {
+ if (internal_conf->iova_mode == RTE_IOVA_DC) {
/* autodetect the IOVA mapping mode */
enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
rte_eal_get_configuration()->iova_mode = iova_mode;
} else {
rte_eal_get_configuration()->iova_mode =
- internal_config.iova_mode;
+ internal_conf->iova_mode;
}
if (rte_eal_iova_mode() == RTE_IOVA_PA && !phys_addrs) {
RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
- if (internal_config.no_hugetlbfs == 0) {
+ if (internal_conf->no_hugetlbfs == 0) {
/* rte_config isn't initialized yet */
- ret = internal_config.process_type == RTE_PROC_PRIMARY ?
+ ret = internal_conf->process_type == RTE_PROC_PRIMARY ?
eal_hugepage_info_init() :
eal_hugepage_info_read();
if (ret < 0) {
}
}
- if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
- if (internal_config.no_hugetlbfs)
- internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+ if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
+ if (internal_conf->no_hugetlbfs)
+ internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
}
- if (internal_config.vmware_tsc_map == 1) {
+ if (internal_conf->vmware_tsc_map == 1) {
#ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
rte_cycles_vmware_tsc_map = 1;
RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
#endif
}
- if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0) {
+ if (rte_eal_log_init(logid, internal_conf->syslog_facility) < 0) {
rte_eal_init_alert("Cannot init logging.");
rte_errno = ENOMEM;
rte_atomic32_clear(&run_once);
eal_check_mem_on_local_socket();
- eal_thread_init_master(rte_config.master_lcore);
+ eal_thread_init_master(config->master_lcore);
ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
- rte_config.master_lcore, (uintptr_t)thread_id, cpuset,
+ config->master_lcore, (uintptr_t)thread_id, cpuset,
ret == 0 ? "" : "...");
RTE_LCORE_FOREACH_SLAVE(i) {
* In no_shconf mode, no runtime directory is created in the first
* place, so no cleanup needed.
*/
- if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) {
+ if (!internal_conf->no_shconf && eal_clean_runtime_dir() < 0) {
rte_eal_init_alert("Cannot clear runtime directory");
return -1;
}
- if (!internal_config.no_telemetry) {
+ if (!internal_conf->no_telemetry) {
const char *error_str = NULL;
if (rte_telemetry_init(rte_eal_get_runtime_dir(),
- &internal_config.ctrl_cpuset, &error_str)
+ &internal_conf->ctrl_cpuset, &error_str)
!= 0) {
rte_eal_init_alert(error_str);
return -1;
/* if we're in a primary process, we need to mark hugepages as freeable
* so that finalization can release them back to the system.
*/
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_memseg_walk(mark_freeable, NULL);
rte_service_finalize();
rte_mp_channel_cleanup();
rte_trace_save();
eal_trace_fini();
- eal_cleanup_config(&internal_config);
+ eal_cleanup_config(internal_conf);
return 0;
}
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
- return rte_config.process_type;
-}
-
-int rte_eal_has_hugepages(void)
-{
- return ! internal_config.no_hugetlbfs;
-}
-
-int rte_eal_has_pci(void)
-{
- return !internal_config.no_pci;
-}
-
int rte_eal_create_uio_dev(void)
{
- return internal_config.create_uio_dev;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return internal_conf->create_uio_dev;
}
enum rte_intr_mode
rte_eal_vfio_intr_mode(void)
{
- return internal_config.vfio_intr_mode;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return internal_conf->vfio_intr_mode;
}
int
#include <rte_log.h>
#include <rte_common.h>
#include "rte_string_fns.h"
+
+#include "eal_private.h"
#include "eal_internal_cfg.h"
#include "eal_hugepages.h"
#include "eal_filesystem.h"
char *splitstr[_FIELDNAME_MAX];
char buf[BUFSIZ];
int retval = -1;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
FILE *fd = fopen(proc_mounts, "r");
if (fd == NULL)
}
/* we have a specified --huge-dir option, only examine that dir */
- if (internal_config.hugepage_dir != NULL &&
- strcmp(splitstr[MOUNTPT], internal_config.hugepage_dir) != 0)
+ if (internal_conf->hugepage_dir != NULL &&
+ strcmp(splitstr[MOUNTPT], internal_conf->hugepage_dir) != 0)
continue;
if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 0){
{
uint64_t total_pages = 0;
unsigned int i;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/*
* first, try to put all hugepages into relevant sockets, but
*/
total_pages = 0;
/* we also don't want to do this for legacy init */
- if (!internal_config.legacy_mem)
+ if (!internal_conf->legacy_mem)
for (i = 0; i < rte_socket_count(); i++) {
int socket = rte_socket_id_by_idx(i);
unsigned int num_pages =
unsigned int i, num_sizes = 0;
DIR *dir;
struct dirent *dirent;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
dir = opendir(sys_dir_path);
if (dir == NULL) {
if (num_sizes >= MAX_HUGEPAGE_SIZES)
break;
- hpi = &internal_config.hugepage_info[num_sizes];
+ hpi = &internal_conf->hugepage_info[num_sizes];
hpi->hugepage_sz =
rte_str_to_size(&dirent->d_name[dirent_start_len]);
* init process.
*/
#ifdef MAP_HUGE_SHIFT
- if (internal_config.in_memory) {
+ if (internal_conf->in_memory) {
RTE_LOG(DEBUG, EAL, "In-memory mode enabled, "
"hugepages of size %" PRIu64 " bytes "
"will be allocated anonymously\n",
if (dirent != NULL)
return -1;
- internal_config.num_hugepage_sizes = num_sizes;
+ internal_conf->num_hugepage_sizes = num_sizes;
/* sort the page directory entries by size, largest to smallest */
- qsort(&internal_config.hugepage_info[0], num_sizes,
- sizeof(internal_config.hugepage_info[0]), compare_hpi);
+ qsort(&internal_conf->hugepage_info[0], num_sizes,
+ sizeof(internal_conf->hugepage_info[0]), compare_hpi);
/* now we have all info, check we have at least one valid size */
for (i = 0; i < num_sizes; i++) {
/* pages may no longer all be on socket 0, so check all */
unsigned int j, num_pages = 0;
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
num_pages += hpi->num_pages[j];
{
struct hugepage_info *hpi, *tmp_hpi;
unsigned int i;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (hugepage_info_init() < 0)
return -1;
/* for no shared files mode, we're done */
- if (internal_config.no_shconf)
+ if (internal_conf->no_shconf)
return 0;
- hpi = &internal_config.hugepage_info[0];
+ hpi = &internal_conf->hugepage_info[0];
tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
- sizeof(internal_config.hugepage_info));
+ sizeof(internal_conf->hugepage_info));
if (tmp_hpi == NULL) {
RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
return -1;
}
- memcpy(tmp_hpi, hpi, sizeof(internal_config.hugepage_info));
+ memcpy(tmp_hpi, hpi, sizeof(internal_conf->hugepage_info));
/* we've copied file descriptors along with everything else, but they
* will be invalid in secondary process, so overwrite them
*/
- for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+ for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
struct hugepage_info *tmp = &tmp_hpi[i];
tmp->lock_descriptor = -1;
}
- if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
return -1;
}
int eal_hugepage_info_read(void)
{
- struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[0];
struct hugepage_info *tmp_hpi;
tmp_hpi = open_shared_memory(eal_hugepage_info_path(),
- sizeof(internal_config.hugepage_info));
+ sizeof(internal_conf->hugepage_info));
if (tmp_hpi == NULL) {
RTE_LOG(ERR, EAL, "Failed to open shared memory!\n");
return -1;
}
- memcpy(hpi, tmp_hpi, sizeof(internal_config.hugepage_info));
+ memcpy(hpi, tmp_hpi, sizeof(internal_conf->hugepage_info));
- if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
return -1;
}
char segname[250]; /* as per manpage, limit is 249 bytes plus null */
int flags = RTE_MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
fd = fd_list[list_idx].memseg_list_fd;
if (fd < 0) {
unsigned int list_idx, unsigned int seg_idx)
{
int fd;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* for in-memory mode, we only make it here when we're sure we support
* memfd, and this is a special case.
*/
- if (internal_config.in_memory)
+ if (internal_conf->in_memory)
return get_seg_memfd(hi, list_idx, seg_idx);
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
/* create a hugepage file path */
eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
static void
close_hugefile(int fd, char *path, int list_idx)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/*
* primary process must unlink the file, but only when not in in-memory
* mode (as in that case there is no file to unlink).
*/
- if (!internal_config.in_memory &&
+ if (!internal_conf->in_memory &&
rte_eal_process_type() == RTE_PROC_PRIMARY &&
unlink(path))
RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
/* in-memory mode is a special case, because we can be sure that
* fallocate() is supported.
*/
- if (internal_config.in_memory)
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ if (internal_conf->in_memory)
return resize_hugefile_in_memory(fd, fa_offset,
page_sz, grow);
size_t alloc_sz;
int flags;
void *new_addr;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
alloc_sz = hi->hugepage_sz;
/* these are checked at init, but code analyzers don't know that */
- if (internal_config.in_memory && !anonymous_hugepages_supported) {
+ if (internal_conf->in_memory && !anonymous_hugepages_supported) {
RTE_LOG(ERR, EAL, "Anonymous hugepages not supported, in-memory mode cannot allocate memory\n");
return -1;
}
- if (internal_config.in_memory && !memfd_create_supported &&
- internal_config.single_file_segments) {
+ if (internal_conf->in_memory && !memfd_create_supported &&
+ internal_conf->single_file_segments) {
RTE_LOG(ERR, EAL, "Single-file segments are not supported without memfd support\n");
return -1;
}
/* in-memory without memfd is a special case */
int mmap_flags;
- if (internal_config.in_memory && !memfd_create_supported) {
+ if (internal_conf->in_memory && !memfd_create_supported) {
const int in_memory_flags = MAP_HUGETLB | MAP_FIXED |
MAP_PRIVATE | MAP_ANONYMOUS;
int pagesz_flag;
return -1;
}
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
map_offset = seg_idx * alloc_sz;
ret = resize_hugefile(fd, map_offset, alloc_sz, true);
if (ret < 0)
__func__, strerror(errno));
goto resized;
}
- if (internal_config.hugepage_unlink &&
- !internal_config.in_memory) {
+ if (internal_conf->hugepage_unlink &&
+ !internal_conf->in_memory) {
if (unlink(path)) {
RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
__func__, strerror(errno));
RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
}
/* roll back the ref count */
- if (internal_config.single_file_segments)
+ if (internal_conf->single_file_segments)
fd_list[list_idx].count--;
resized:
/* some codepaths will return negative fd, so exit early */
if (fd < 0)
return -1;
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
resize_hugefile(fd, map_offset, alloc_sz, false);
/* ignore failure, can't make it any worse */
close_hugefile(fd, path, list_idx);
} else {
/* only remove file if we can take out a write lock */
- if (internal_config.hugepage_unlink == 0 &&
- internal_config.in_memory == 0 &&
+ if (internal_conf->hugepage_unlink == 0 &&
+ internal_conf->in_memory == 0 &&
lock(fd, LOCK_EX) == 1)
unlink(path);
close(fd);
char path[PATH_MAX];
int fd, ret = 0;
bool exit_early;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* erase page data */
memset(ms->addr, 0, ms->len);
exit_early = false;
/* if we're using anonymous hugepages, nothing to be done */
- if (internal_config.in_memory && !memfd_create_supported)
+ if (internal_conf->in_memory && !memfd_create_supported)
exit_early = true;
/* if we've already unlinked the page, nothing needs to be done */
- if (!internal_config.in_memory && internal_config.hugepage_unlink)
+ if (!internal_conf->in_memory && internal_conf->hugepage_unlink)
exit_early = true;
if (exit_early) {
if (fd < 0)
return -1;
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
map_offset = seg_idx * ms->len;
if (resize_hugefile(fd, map_offset, ms->len, false))
return -1;
/* if we're able to take out a write lock, we're the last one
* holding onto this page.
*/
- if (!internal_config.in_memory) {
+ if (!internal_conf->in_memory) {
ret = lock(fd, LOCK_EX);
if (ret >= 0) {
/* no one else is using this page */
size_t page_sz;
int cur_idx, start_idx, j, dir_fd = -1;
unsigned int msl_idx, need, i;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (msl->page_sz != wa->page_sz)
return 0;
* during init, we already hold a write lock, so don't try to take out
* another one.
*/
- if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+ if (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {
dir_fd = open(wa->hi->hugedir, O_RDONLY);
if (dir_fd < 0) {
RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
struct free_walk_param *wa = arg;
uintptr_t start_addr, end_addr;
int msl_idx, seg_idx, ret, dir_fd = -1;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
start_addr = (uintptr_t) msl->base_va;
end_addr = start_addr + msl->len;
* during init, we already hold a write lock, so don't try to take out
* another one.
*/
- if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+ if (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {
dir_fd = open(wa->hi->hugedir, O_RDONLY);
if (dir_fd < 0) {
RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
#endif
struct alloc_walk_param wa;
struct hugepage_info *hi = NULL;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
memset(&wa, 0, sizeof(wa));
/* dynamic allocation not supported in legacy mode */
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
return -1;
- for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
+ for (i = 0; i < (int) RTE_DIM(internal_conf->hugepage_info); i++) {
if (page_sz ==
- internal_config.hugepage_info[i].hugepage_sz) {
- hi = &internal_config.hugepage_info[i];
+ internal_conf->hugepage_info[i].hugepage_sz) {
+ hi = &internal_conf->hugepage_info[i];
break;
}
}
eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
{
int seg, ret = 0;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* dynamic free not supported in legacy mode */
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
return -1;
for (seg = 0; seg < n_segs; seg++) {
memset(&wa, 0, sizeof(wa));
- for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
+ for (i = 0; i < (int)RTE_DIM(internal_conf->hugepage_info);
i++) {
- hi = &internal_config.hugepage_info[i];
+ hi = &internal_conf->hugepage_info[i];
if (cur->hugepage_sz == hi->hugepage_sz)
break;
}
- if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
+ if (i == (int)RTE_DIM(internal_conf->hugepage_info)) {
RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
ret = -1;
continue;
int
eal_memalloc_free_seg(struct rte_memseg *ms)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* dynamic free not supported in legacy mode */
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
return -1;
return eal_memalloc_free_seg_bulk(&ms, 1);
struct hugepage_info *hi = NULL;
unsigned int i;
int msl_idx;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (msl->external)
return 0;
primary_msl = &mcfg->memsegs[msl_idx];
local_msl = &local_memsegs[msl_idx];
- for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+ for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
uint64_t cur_sz =
- internal_config.hugepage_info[i].hugepage_sz;
+ internal_conf->hugepage_info[i].hugepage_sz;
uint64_t msl_sz = primary_msl->page_sz;
if (msl_sz == cur_sz) {
- hi = &internal_config.hugepage_info[i];
+ hi = &internal_conf->hugepage_info[i];
break;
}
}
{
int *data;
int i;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* single-file segments mode does not need fd list */
- if (!internal_config.single_file_segments) {
+ if (!internal_conf->single_file_segments) {
/* ensure we have space to store fd per each possible segment */
data = malloc(sizeof(int) * len);
if (data == NULL) {
eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* single file segments mode doesn't support individual segment fd's */
- if (internal_config.single_file_segments)
+ if (internal_conf->single_file_segments)
return -ENOTSUP;
/* if list is not allocated, allocate it */
int
eal_memalloc_set_seg_list_fd(int list_idx, int fd)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
/* non-single file segment mode doesn't support segment list fd's */
- if (!internal_config.single_file_segments)
+ if (!internal_conf->single_file_segments)
return -ENOTSUP;
fd_list[list_idx].memseg_list_fd = fd;
eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
{
int fd;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.in_memory || internal_config.no_hugetlbfs) {
+ if (internal_conf->in_memory || internal_conf->no_hugetlbfs) {
#ifndef MEMFD_SUPPORTED
/* in in-memory or no-huge mode, we rely on memfd support */
return -ENOTSUP;
#endif
/* memfd supported, but hugetlbfs memfd may not be */
- if (!internal_config.no_hugetlbfs && !memfd_create_supported)
+ if (!internal_conf->no_hugetlbfs && !memfd_create_supported)
return -ENOTSUP;
}
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
fd = fd_list[list_idx].memseg_list_fd;
} else if (fd_list[list_idx].len == 0) {
/* list not initialized */
test_memfd_create(void)
{
#ifdef MEMFD_SUPPORTED
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
unsigned int i;
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- uint64_t pagesz = internal_config.hugepage_info[i].hugepage_sz;
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+ uint64_t pagesz = internal_conf->hugepage_info[i].hugepage_sz;
int pagesz_flag = pagesz_flags(pagesz);
int flags;
eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.in_memory || internal_config.no_hugetlbfs) {
+ if (internal_conf->in_memory || internal_conf->no_hugetlbfs) {
#ifndef MEMFD_SUPPORTED
/* in in-memory or no-huge mode, we rely on memfd support */
return -ENOTSUP;
#endif
/* memfd supported, but hugetlbfs memfd may not be */
- if (!internal_config.no_hugetlbfs && !memfd_create_supported)
+ if (!internal_conf->no_hugetlbfs && !memfd_create_supported)
return -ENOTSUP;
}
- if (internal_config.single_file_segments) {
+ if (internal_conf->single_file_segments) {
size_t pgsz = mcfg->memsegs[list_idx].page_sz;
/* segment not active? */
int
eal_memalloc_init(void)
{
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
return -1;
if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
- internal_config.in_memory) {
+ internal_conf->in_memory) {
int mfd_res = test_memfd_create();
if (mfd_res < 0) {
* if we support hugetlbfs with memfd_create. this code will
* test if we do.
*/
- if (internal_config.single_file_segments &&
+ if (internal_conf->single_file_segments &&
mfd_res != 1) {
RTE_LOG(ERR, EAL, "Single-file segments mode cannot be used without memfd support\n");
return -1;
struct bitmask *oldmask = NULL;
bool have_numa = true;
unsigned long maxnode = 0;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* Check if kernel supports NUMA. */
if (numa_available() != 0) {
oldpolicy = MPOL_DEFAULT;
}
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- if (internal_config.socket_mem[i])
+ if (internal_conf->socket_mem[i])
maxnode = i + 1;
}
#endif
if (j == maxnode) {
node_id = (node_id + 1) % maxnode;
- while (!internal_config.socket_mem[node_id]) {
+ while (!internal_conf->socket_mem[node_id]) {
node_id++;
node_id %= maxnode;
}
{
void *retval;
int fd;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* if no shared files mode is used, create anonymous memory instead */
- if (internal_config.no_shconf) {
+ if (internal_conf->no_shconf) {
retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (retval == MAP_FAILED)
{
unsigned socket, size;
int page, nrpages = 0;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* get total number of hugepages */
for (size = 0; size < num_hp_info; size++)
for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
nrpages +=
- internal_config.hugepage_info[size].num_pages[socket];
+ internal_conf->hugepage_info[size].num_pages[socket];
for (page = 0; page < nrpages; page++) {
struct hugepage_file *hp = &hugepg_tbl[page];
{
unsigned socket, size;
int page, nrpages = 0;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* get total number of hugepages */
for (size = 0; size < num_hp_info; size++)
for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
- nrpages += internal_config.hugepage_info[size].num_pages[socket];
+ nrpages += internal_conf->hugepage_info[size].num_pages[socket];
for (size = 0; size < num_hp_info; size++) {
for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
uint64_t page_sz;
size_t memseg_len;
int socket_id;
-
+#ifndef RTE_ARCH_64
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+#endif
page_sz = hugepages[seg_start].size;
socket_id = hugepages[seg_start].socket_id;
seg_len = seg_end - seg_start;
/* we have a new address, so unmap previous one */
#ifndef RTE_ARCH_64
/* in 32-bit legacy mode, we have already unmapped the page */
- if (!internal_config.legacy_mem)
+ if (!internal_conf->legacy_mem)
munmap(hfile->orig_va, page_sz);
#else
munmap(hfile->orig_va, page_sz);
unsigned int hpi_idx, socket, i;
int n_contig_segs, n_segs;
int msl_idx;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* before we preallocate segments, we need to free up our VA space.
* we're not removing files, and we already have information about
/* we cannot know how many page sizes and sockets we have discovered, so
* loop over all of them
*/
- for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes;
+ for (hpi_idx = 0; hpi_idx < internal_conf->num_hugepage_sizes;
hpi_idx++) {
uint64_t page_sz =
- internal_config.hugepage_info[hpi_idx].hugepage_sz;
+ internal_conf->hugepage_info[hpi_idx].hugepage_sz;
for (i = 0; i < rte_socket_count(); i++) {
struct rte_memseg_list *msl;
{
uint64_t size = 0;
unsigned i, j;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
size += hpi->hugepage_sz * hpi->num_pages[j];
struct rte_mem_config *mcfg;
struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
uint64_t memory[RTE_MAX_NUMA_NODES];
mcfg = rte_eal_get_configuration()->mem_config;
/* hugetlbfs can be disabled */
- if (internal_config.no_hugetlbfs) {
+ if (internal_conf->no_hugetlbfs) {
void *prealloc_addr;
size_t mem_sz;
struct rte_memseg_list *msl;
uint64_t page_sz;
/* nohuge mode is legacy mode */
- internal_config.legacy_mem = 1;
+ internal_conf->legacy_mem = 1;
/* nohuge mode is single-file segments mode */
- internal_config.single_file_segments = 1;
+ internal_conf->single_file_segments = 1;
/* create a memseg list */
msl = &mcfg->memsegs[0];
- mem_sz = internal_config.memory;
+ mem_sz = internal_conf->memory;
page_sz = RTE_PGSIZE_4K;
n_segs = mem_sz / page_sz;
RTE_LOG(DEBUG, EAL, "Falling back to anonymous map\n");
} else {
/* we got an fd - now resize it */
- if (ftruncate(memfd, internal_config.memory) < 0) {
+ if (ftruncate(memfd, internal_conf->memory) < 0) {
RTE_LOG(ERR, EAL, "Cannot resize memfd: %s\n",
strerror(errno));
RTE_LOG(ERR, EAL, "Falling back to anonymous map\n");
/* calculate total number of hugepages available. at this point we haven't
* yet started sorting them so they all are on socket 0 */
- for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+ for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
- used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
+ used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
- nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
+ nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
}
/*
/* make a copy of socket_mem, needed for balanced allocation. */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- memory[i] = internal_config.socket_mem[i];
+ memory[i] = internal_conf->socket_mem[i];
/* map all hugepages and sort them */
- for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+ for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
unsigned pages_old, pages_new;
struct hugepage_info *hpi;
* we just map all hugepages available to the system
* all hugepages are still located on socket 0
*/
- hpi = &internal_config.hugepage_info[i];
+ hpi = &internal_conf->hugepage_info[i];
if (hpi->num_pages[0] == 0)
continue;
huge_recover_sigbus();
- if (internal_config.memory == 0 && internal_config.force_sockets == 0)
- internal_config.memory = eal_get_hugepage_mem_size();
+ if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
+ internal_conf->memory = eal_get_hugepage_mem_size();
nr_hugefiles = nr_hugepages;
/* clean out the numbers of pages */
- for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
+ for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++)
for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
- internal_config.hugepage_info[i].num_pages[j] = 0;
+ internal_conf->hugepage_info[i].num_pages[j] = 0;
/* get hugepages for each socket */
for (i = 0; i < nr_hugefiles; i++) {
/* find a hugepage info with right size and increment num_pages */
const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
- (int)internal_config.num_hugepage_sizes);
+ (int)internal_conf->num_hugepage_sizes);
for (j = 0; j < nb_hpsizes; j++) {
if (tmp_hp[i].size ==
- internal_config.hugepage_info[j].hugepage_sz) {
- internal_config.hugepage_info[j].num_pages[socket]++;
+ internal_conf->hugepage_info[j].hugepage_sz) {
+ internal_conf->hugepage_info[j].num_pages[socket]++;
}
}
}
/* make a copy of socket_mem, needed for number of pages calculation */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
- memory[i] = internal_config.socket_mem[i];
+ memory[i] = internal_conf->socket_mem[i];
/* calculate final number of pages */
nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
- internal_config.hugepage_info, used_hp,
- internal_config.num_hugepage_sizes);
+ internal_conf->hugepage_info, used_hp,
+ internal_conf->num_hugepage_sizes);
/* error if not enough memory available */
if (nr_hugepages < 0)
goto fail;
/* reporting in! */
- for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+ for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
if (used_hp[i].num_pages[j] > 0) {
RTE_LOG(DEBUG, EAL,
* also, sets final_va to NULL on pages that were unmapped.
*/
if (unmap_unneeded_hugepages(tmp_hp, used_hp,
- internal_config.num_hugepage_sizes) < 0) {
+ internal_conf->num_hugepage_sizes) < 0) {
RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
goto fail;
}
#ifndef RTE_ARCH_64
/* for legacy 32-bit mode, we did not preallocate VA space, so do it */
- if (internal_config.legacy_mem &&
+ if (internal_conf->legacy_mem &&
prealloc_segments(hugepage, nr_hugefiles)) {
RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
goto fail;
}
/* free the hugepage backing files */
- if (internal_config.hugepage_unlink &&
- unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
+ if (internal_conf->hugepage_unlink &&
+ unlink_hugepage_files(tmp_hp, internal_conf->num_hugepage_sizes) < 0) {
RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
goto fail;
}
int
rte_eal_hugepage_init(void)
{
- return internal_config.legacy_mem ?
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return internal_conf->legacy_mem ?
eal_legacy_hugepage_init() :
eal_dynmem_hugepage_init();
}
int
rte_eal_hugepage_attach(void)
{
- return internal_config.legacy_mem ?
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return internal_conf->legacy_mem ?
eal_legacy_hugepage_attach() :
eal_hugepage_attach();
}
struct rte_memseg_list *msl;
uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
uint64_t max_mem;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* no-huge does not need this at all */
- if (internal_config.no_hugetlbfs)
+ if (internal_conf->no_hugetlbfs)
return 0;
/* this is a giant hack, but desperate times call for desperate
* unneeded pages. this will not affect secondary processes, as those
* should be able to mmap the space without (too many) problems.
*/
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
return 0;
/* 32-bit mode is a very special case. we cannot know in advance where
*/
active_sockets = 0;
total_requested_mem = 0;
- if (internal_config.force_sockets)
+ if (internal_conf->force_sockets)
for (i = 0; i < rte_socket_count(); i++) {
uint64_t mem;
socket_id = rte_socket_id_by_idx(i);
- mem = internal_config.socket_mem[socket_id];
+ mem = internal_conf->socket_mem[socket_id];
if (mem == 0)
continue;
total_requested_mem += mem;
}
else
- total_requested_mem = internal_config.memory;
+ total_requested_mem = internal_conf->memory;
max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
if (total_requested_mem > max_mem) {
/* create memseg lists */
for (i = 0; i < rte_socket_count(); i++) {
- int hp_sizes = (int) internal_config.num_hugepage_sizes;
+ int hp_sizes = (int) internal_conf->num_hugepage_sizes;
uint64_t max_socket_mem, cur_socket_mem;
unsigned int master_lcore_socket;
struct rte_config *cfg = rte_eal_get_configuration();
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
/* we can still sort pages by socket in legacy mode */
- if (!internal_config.legacy_mem && socket_id > 0)
+ if (!internal_conf->legacy_mem && socket_id > 0)
break;
#endif
/* if we didn't specifically request memory on this socket */
skip = active_sockets != 0 &&
- internal_config.socket_mem[socket_id] == 0;
+ internal_conf->socket_mem[socket_id] == 0;
/* ...or if we didn't specifically request memory on *any*
* socket, and this is not master lcore
*/
/* max amount of memory on this socket */
max_socket_mem = (active_sockets != 0 ?
- internal_config.socket_mem[socket_id] :
- internal_config.memory) +
+ internal_conf->socket_mem[socket_id] :
+ internal_conf->memory) +
extra_mem_per_socket;
cur_socket_mem = 0;
struct hugepage_info *hpi;
int type_msl_idx, max_segs, total_segs = 0;
- hpi = &internal_config.hugepage_info[hpi_idx];
+ hpi = &internal_conf->hugepage_info[hpi_idx];
hugepage_sz = hpi->hugepage_sz;
/* check if pages are actually available */
/* increase rlimit to maximum */
struct rlimit lim;
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+#endif
if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
/* set limit to maximum */
lim.rlim_cur = lim.rlim_max;
RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
}
#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
- if (!internal_config.legacy_mem && rte_socket_count() > 1) {
+ if (!internal_conf->legacy_mem && rte_socket_count() > 1) {
RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
uint64_t
rte_get_hpet_hz(void)
{
- if(internal_config.no_hpet)
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ if (internal_conf->no_hpet)
rte_panic("Error, HPET called, but no HPET present\n");
return eal_hpet_resolution_hz;
{
uint32_t t, msb;
uint64_t ret;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if(internal_config.no_hpet)
+ if (internal_conf->no_hpet)
rte_panic("Error, HPET called, but no HPET present\n");
t = eal_hpet->counter_l;
rte_eal_hpet_init(int make_default)
{
int fd, ret;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.no_hpet) {
+ if (internal_conf->no_hpet) {
RTE_LOG(NOTICE, EAL, "HPET is disabled\n");
return -1;
}
if (fd < 0) {
RTE_LOG(ERR, EAL, "ERROR: Cannot open "DEV_HPET": %s!\n",
strerror(errno));
- internal_config.no_hpet = 1;
+ internal_conf->no_hpet = 1;
return -1;
}
eal_hpet = mmap(NULL, 1024, PROT_READ, MAP_SHARED, fd, 0);
"To run without using HPET, set CONFIG_RTE_LIBEAL_USE_HPET=n "
"in your build configuration or use '--no-hpet' EAL flag.\n");
close(fd);
- internal_config.no_hpet = 1;
+ internal_conf->no_hpet = 1;
return -1;
}
close(fd);
hpet_msb_inc, NULL);
if (ret != 0) {
RTE_LOG(ERR, EAL, "ERROR: Cannot create HPET timer thread!\n");
- internal_config.no_hpet = 1;
+ internal_conf->no_hpet = 1;
return -1;
}
struct rte_mp_reply mp_reply = {0};
struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* if primary, try to open the group */
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ if (internal_conf->process_type == RTE_PROC_PRIMARY) {
/* try regular group format */
snprintf(filename, sizeof(filename),
VFIO_GROUP_FMT, iommu_group_num);
int vfio_group_fd;
int iommu_group_num;
int i, ret;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* get group number */
ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
* Note this can happen several times with the hotplug
* functionality.
*/
- if (internal_config.process_type == RTE_PROC_PRIMARY &&
+ if (internal_conf->process_type == RTE_PROC_PRIMARY &&
vfio_cfg->vfio_active_groups == 1 &&
vfio_group_device_count(vfio_group_fd) == 0) {
const struct vfio_iommu_type *t;
/* initialize group list */
int i, j;
int vfio_available;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
return 0;
}
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ if (internal_conf->process_type == RTE_PROC_PRIMARY) {
/* open a new container */
default_vfio_cfg->vfio_container_fd =
rte_vfio_get_container_fd();
struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
int container_fd;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
if (default_vfio_cfg->vfio_enabled)
return default_vfio_cfg->vfio_container_fd;
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ if (internal_conf->process_type == RTE_PROC_PRIMARY) {
/* if we were secondary process we would try requesting
* container fd from the primary, but we're the primary
* process so just exit here
struct rte_mp_reply mp_reply = {0};
struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* if we're in a primary process, try to open the container */
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
+ if (internal_conf->process_type == RTE_PROC_PRIMARY) {
vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
if (vfio_container_fd < 0) {
RTE_LOG(ERR, EAL, " cannot open VFIO container, "
*/
static int mem_cfg_fd = -1;
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
- .mem_config = &early_mem_config,
-};
-
/* internal configuration (per-core) */
struct lcore_config lcore_config[RTE_MAX_LCORE];
-/* internal configuration */
-struct internal_config internal_config;
-
-/* platform-specific runtime dir */
-static char runtime_dir[PATH_MAX];
-
-const char *
-rte_eal_get_runtime_dir(void)
-{
- return runtime_dir;
-}
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
- return &rte_config;
-}
-
/* Detect if we are a primary or a secondary process */
enum rte_proc_type_t
eal_proc_type_detect(void)
{
enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
const char *pathname = eal_runtime_config_path();
+ const struct rte_config *config = rte_eal_get_configuration();
/* if we can open the file but not get a write-lock we are a secondary
* process. NOTE: if we get a file handle back, we keep that open
_O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE);
if (err == 0) {
OVERLAPPED soverlapped = { 0 };
- soverlapped.Offset = sizeof(*rte_config.mem_config);
+ soverlapped.Offset = sizeof(*config->mem_config);
soverlapped.OffsetHigh = 0;
HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
if (!LockFileEx(hwinfilehandle,
LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
- sizeof(*rte_config.mem_config), 0, &soverlapped))
+ sizeof(*config->mem_config), 0, &soverlapped))
ptype = RTE_PROC_SECONDARY;
}
return ptype;
}
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
- return rte_config.process_type;
-}
-
-int
-rte_eal_has_hugepages(void)
-{
- return !internal_config.no_hugetlbfs;
-}
-
-enum rte_iova_mode
-rte_eal_iova_mode(void)
-{
- return rte_config.iova_mode;
-}
-
/* display usage */
static void
eal_usage(const char *prgname)
int opt;
char **argvopt;
int option_index;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
argvopt = argv;
- eal_reset_internal_config(&internal_config);
+ eal_reset_internal_config(internal_conf);
while ((opt = getopt_long(argc, argvopt, eal_short_options,
eal_long_options, &option_index)) != EOF) {
ret = (opt == OPT_LOG_LEVEL_NUM) ?
eal_parse_common_option(opt, optarg,
- &internal_config) : 0;
+ internal_conf) : 0;
/* common parser is not happy */
if (ret < 0)
char **argvopt;
int option_index;
char *prgname = argv[0];
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
argvopt = argv;
return -1;
}
- ret = eal_parse_common_option(opt, optarg, &internal_config);
+ ret = eal_parse_common_option(opt, optarg, internal_conf);
/* common parser is not happy */
if (ret < 0) {
eal_usage(prgname);
}
}
- if (eal_adjust_config(&internal_config) != 0)
+ if (eal_adjust_config(internal_conf) != 0)
return -1;
/* sanity checks */
- if (eal_check_common_options(&internal_config) != 0) {
+ if (eal_check_common_options(internal_conf) != 0) {
eal_usage(prgname);
return -1;
}
int
rte_eal_cleanup(void)
{
- eal_cleanup_config(&internal_config);
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ eal_cleanup_config(internal_conf);
return 0;
}
rte_eal_init(int argc, char **argv)
{
int i, fctret;
+ const struct rte_config *config = rte_eal_get_configuration();
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
rte_eal_log_init(NULL, 0);
exit(1);
/* Prevent creation of shared memory files. */
- if (internal_config.in_memory == 0) {
+ if (internal_conf->in_memory == 0) {
RTE_LOG(WARNING, EAL, "Multi-process support is requested, "
"but not available.\n");
- internal_config.in_memory = 1;
+ internal_conf->in_memory = 1;
}
- if (!internal_config.no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
+ if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
rte_eal_init_alert("Cannot get hugepage information");
rte_errno = EACCES;
return -1;
}
- if (internal_config.memory == 0 && !internal_config.force_sockets) {
- if (internal_config.no_hugetlbfs)
- internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+ if (internal_conf->memory == 0 && !internal_conf->force_sockets) {
+ if (internal_conf->no_hugetlbfs)
+ internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
}
if (eal_mem_win32api_init() < 0) {
return -1;
}
- eal_thread_init_master(rte_config.master_lcore);
+ eal_thread_init_master(config->master_lcore);
RTE_LCORE_FOREACH_SLAVE(i) {
#include <rte_memzone.h>
#include <rte_os.h>
+#include "eal_private.h"
#include "eal_filesystem.h"
#include "eal_hugepages.h"
#include "eal_internal_cfg.h"
struct hugepage_info *hpi;
unsigned int socket_id;
int ret = 0;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* Only one hugepage size available on Windows. */
- internal_config.num_hugepage_sizes = 1;
- hpi = &internal_config.hugepage_info[0];
+ internal_conf->num_hugepage_sizes = 1;
+ hpi = &internal_conf->hugepage_info[0];
hpi->hugepage_sz = GetLargePageMinimum();
if (hpi->hugepage_sz == 0)
int ret = -1;
struct alloc_walk_param wa;
struct hugepage_info *hi = NULL;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
- if (internal_config.legacy_mem) {
+ if (internal_conf->legacy_mem) {
RTE_LOG(ERR, EAL, "dynamic allocation not supported in legacy mode\n");
return -ENOTSUP;
}
- for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
- struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+ for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+ struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
if (page_sz == hpi->hugepage_sz) {
hi = hpi;
break;
eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
{
int seg, ret = 0;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* dynamic free not supported in legacy mode */
- if (internal_config.legacy_mem)
+ if (internal_conf->legacy_mem)
return -1;
for (seg = 0; seg < n_segs; seg++) {
memset(&wa, 0, sizeof(wa));
- for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
- hi = &internal_config.hugepage_info[i];
+ for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
+ hi = &internal_conf->hugepage_info[i];
if (cur->hugepage_sz == hi->hugepage_sz)
break;
}
- if (i == RTE_DIM(internal_config.hugepage_info)) {
+ if (i == RTE_DIM(internal_conf->hugepage_info)) {
RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
ret = -1;
continue;
void *addr;
mcfg = rte_eal_get_configuration()->mem_config;
+ struct internal_config *internal_conf =
+ eal_get_internal_configuration();
/* nohuge mode is legacy mode */
- internal_config.legacy_mem = 1;
+ internal_conf->legacy_mem = 1;
msl = &mcfg->memsegs[0];
- mem_sz = internal_config.memory;
+ mem_sz = internal_conf->memory;
page_sz = RTE_PGSIZE_4K;
n_segs = mem_sz / page_sz;
int
rte_eal_hugepage_init(void)
{
- return internal_config.no_hugetlbfs ?
+ const struct internal_config *internal_conf =
+ eal_get_internal_configuration();
+
+ return internal_conf->no_hugetlbfs ?
eal_nohuge_init() : eal_dynmem_hugepage_init();
}