eal: move OS common config objects
authorTal Shnaiderman <talshn@mellanox.com>
Mon, 29 Jun 2020 12:37:32 +0000 (15:37 +0300)
committerThomas Monjalon <thomas@monjalon.net>
Mon, 29 Jun 2020 22:02:53 +0000 (00:02 +0200)
Move common functions between Unix and Windows to eal_common_config.c.

Those functions are getter functions for IOVA,
configuration, Multi-process.

Move rte_config, internal_config, early_mem_config and runtime_dir
to be defined in the common file with getter functions.

Refactor the users of the config variables above to use
the getter functions.

Signed-off-by: Tal Shnaiderman <talshn@mellanox.com>
29 files changed:
lib/librte_eal/common/eal_common_config.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_dynmem.c
lib/librte_eal/common/eal_common_fbarray.c
lib/librte_eal/common/eal_common_mcfg.c
lib/librte_eal/common/eal_common_memalloc.c
lib/librte_eal/common/eal_common_memory.c
lib/librte_eal/common/eal_common_options.c
lib/librte_eal/common/eal_common_proc.c
lib/librte_eal/common/eal_common_thread.c
lib/librte_eal/common/eal_internal_cfg.h
lib/librte_eal/common/eal_private.h
lib/librte_eal/common/malloc_elem.c
lib/librte_eal/common/malloc_heap.c
lib/librte_eal/common/meson.build
lib/librte_eal/freebsd/Makefile
lib/librte_eal/freebsd/eal.c
lib/librte_eal/freebsd/eal_hugepage_info.c
lib/librte_eal/freebsd/eal_memory.c
lib/librte_eal/linux/Makefile
lib/librte_eal/linux/eal.c
lib/librte_eal/linux/eal_hugepage_info.c
lib/librte_eal/linux/eal_memalloc.c
lib/librte_eal/linux/eal_memory.c
lib/librte_eal/linux/eal_timer.c
lib/librte_eal/linux/eal_vfio.c
lib/librte_eal/windows/eal.c
lib/librte_eal/windows/eal_hugepages.c
lib/librte_eal/windows/eal_memalloc.c
lib/librte_eal/windows/eal_memory.c

diff --git a/lib/librte_eal/common/eal_common_config.c b/lib/librte_eal/common/eal_common_config.c
new file mode 100644 (file)
index 0000000..56d09dd
--- /dev/null
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Mellanox Technologies, Ltd
+ */
+#include <string.h>
+
+#include <rte_os.h>
+#include <rte_string_fns.h>
+
+#include "eal_private.h"
+#include "eal_memcfg.h"
+
+/* early configuration structure, when memory config is not mmapped */
+static struct rte_mem_config early_mem_config;
+
+/* Address of global and public configuration */
+static struct rte_config rte_config = {
+       .mem_config = &early_mem_config,
+};
+
+/* platform-specific runtime dir */
+static char runtime_dir[PATH_MAX];
+
+/* internal configuration */
+static struct internal_config internal_config;
+
+const char *
+rte_eal_get_runtime_dir(void)
+{
+       return runtime_dir;
+}
+
+int
+eal_set_runtime_dir(char *run_dir, size_t size)
+{
+       size_t str_size;
+
+       str_size = strlcpy(runtime_dir, run_dir, size);
+       if (str_size >= size) {
+               RTE_LOG(ERR, EAL, "Runtime directory string too long\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+/* Return a pointer to the configuration structure */
+struct rte_config *
+rte_eal_get_configuration(void)
+{
+       return &rte_config;
+}
+
+/* Return a pointer to the internal configuration structure */
+struct internal_config *
+eal_get_internal_configuration(void)
+{
+       return &internal_config;
+}
+
+enum rte_iova_mode
+rte_eal_iova_mode(void)
+{
+       return rte_eal_get_configuration()->iova_mode;
+}
+
+enum rte_proc_type_t
+rte_eal_process_type(void)
+{
+       return rte_config.process_type;
+}
+
+/* Return user provided mbuf pool ops name */
+const char *
+rte_eal_mbuf_user_pool_ops(void)
+{
+       return internal_config.user_mbuf_pool_ops_name;
+}
+
+/* return non-zero if hugepages are enabled. */
+int
+rte_eal_has_hugepages(void)
+{
+       return !internal_config.no_hugetlbfs;
+}
+
+int
+rte_eal_has_pci(void)
+{
+       return !internal_config.no_pci;
+}
index 6b07672..614648d 100644 (file)
@@ -29,9 +29,11 @@ eal_dynmem_memseg_lists_init(void)
        uint64_t max_mem, max_mem_per_type;
        unsigned int max_seglists_per_type;
        unsigned int n_memtypes, cur_type;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* no-huge does not need this at all */
-       if (internal_config.no_hugetlbfs)
+       if (internal_conf->no_hugetlbfs)
                return 0;
 
        /*
@@ -70,7 +72,7 @@ eal_dynmem_memseg_lists_init(void)
         */
 
        /* create space for mem types */
-       n_memtypes = internal_config.num_hugepage_sizes * rte_socket_count();
+       n_memtypes = internal_conf->num_hugepage_sizes * rte_socket_count();
        memtypes = calloc(n_memtypes, sizeof(*memtypes));
        if (memtypes == NULL) {
                RTE_LOG(ERR, EAL, "Cannot allocate space for memory types\n");
@@ -79,12 +81,12 @@ eal_dynmem_memseg_lists_init(void)
 
        /* populate mem types */
        cur_type = 0;
-       for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+       for (hpi_idx = 0; hpi_idx < (int) internal_conf->num_hugepage_sizes;
                        hpi_idx++) {
                struct hugepage_info *hpi;
                uint64_t hugepage_sz;
 
-               hpi = &internal_config.hugepage_info[hpi_idx];
+               hpi = &internal_conf->hugepage_info[hpi_idx];
                hugepage_sz = hpi->hugepage_sz;
 
                for (i = 0; i < (int) rte_socket_count(); i++, cur_type++) {
@@ -92,7 +94,7 @@ eal_dynmem_memseg_lists_init(void)
 
 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
                        /* we can still sort pages by socket in legacy mode */
-                       if (!internal_config.legacy_mem && socket_id > 0)
+                       if (!internal_conf->legacy_mem && socket_id > 0)
                                break;
 #endif
                        memtypes[cur_type].page_sz = hugepage_sz;
@@ -227,11 +229,13 @@ eal_dynmem_hugepage_init(void)
        struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
        uint64_t memory[RTE_MAX_NUMA_NODES];
        int hp_sz_idx, socket_id;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        memset(used_hp, 0, sizeof(used_hp));
 
        for (hp_sz_idx = 0;
-                       hp_sz_idx < (int) internal_config.num_hugepage_sizes;
+                       hp_sz_idx < (int) internal_conf->num_hugepage_sizes;
                        hp_sz_idx++) {
 #ifndef RTE_ARCH_64
                struct hugepage_info dummy;
@@ -239,7 +243,7 @@ eal_dynmem_hugepage_init(void)
 #endif
                /* also initialize used_hp hugepage sizes in used_hp */
                struct hugepage_info *hpi;
-               hpi = &internal_config.hugepage_info[hp_sz_idx];
+               hpi = &internal_conf->hugepage_info[hp_sz_idx];
                used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
 
 #ifndef RTE_ARCH_64
@@ -260,16 +264,16 @@ eal_dynmem_hugepage_init(void)
 
        /* make a copy of socket_mem, needed for balanced allocation. */
        for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
-               memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
+               memory[hp_sz_idx] = internal_conf->socket_mem[hp_sz_idx];
 
        /* calculate final number of pages */
        if (eal_dynmem_calc_num_pages_per_socket(memory,
-                       internal_config.hugepage_info, used_hp,
-                       internal_config.num_hugepage_sizes) < 0)
+                       internal_conf->hugepage_info, used_hp,
+                       internal_conf->num_hugepage_sizes) < 0)
                return -1;
 
        for (hp_sz_idx = 0;
-                       hp_sz_idx < (int)internal_config.num_hugepage_sizes;
+                       hp_sz_idx < (int)internal_conf->num_hugepage_sizes;
                        hp_sz_idx++) {
                for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
                                socket_id++) {
@@ -324,10 +328,10 @@ eal_dynmem_hugepage_init(void)
        }
 
        /* if socket limits were specified, set them */
-       if (internal_config.force_socket_limits) {
+       if (internal_conf->force_socket_limits) {
                unsigned int i;
                for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
-                       uint64_t limit = internal_config.socket_limit[i];
+                       uint64_t limit = internal_conf->socket_limit[i];
                        if (limit == 0)
                                continue;
                        if (rte_mem_alloc_validator_register("socket-limit",
@@ -344,9 +348,11 @@ get_socket_mem_size(int socket)
 {
        uint64_t size = 0;
        unsigned int i;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
-               struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+               struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
                size += hpi->hugepage_sz * hpi->num_pages[socket];
        }
 
@@ -362,13 +368,15 @@ eal_dynmem_calc_num_pages_per_socket(
        unsigned int requested, available;
        int total_num_pages = 0;
        uint64_t remaining_mem, cur_mem;
-       uint64_t total_mem = internal_config.memory;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+       uint64_t total_mem = internal_conf->memory;
 
        if (num_hp_info == 0)
                return -1;
 
        /* if specific memory amounts per socket weren't requested */
-       if (internal_config.force_sockets == 0) {
+       if (internal_conf->force_sockets == 0) {
                size_t total_size;
 #ifdef RTE_ARCH_64
                int cpu_per_socket[RTE_MAX_NUMA_NODES];
@@ -386,12 +394,12 @@ eal_dynmem_calc_num_pages_per_socket(
                 * sockets according to number of cores from CPU mask present
                 * on each socket.
                 */
-               total_size = internal_config.memory;
+               total_size = internal_conf->memory;
                for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
                                socket++) {
 
                        /* Set memory amount per socket */
-                       default_size = internal_config.memory *
+                       default_size = internal_conf->memory *
                                cpu_per_socket[socket] / rte_lcore_count();
 
                        /* Limit to maximum available memory on socket */
@@ -422,7 +430,7 @@ eal_dynmem_calc_num_pages_per_socket(
                /* in 32-bit mode, allocate all of the memory only on master
                 * lcore socket
                 */
-               total_size = internal_config.memory;
+               total_size = internal_conf->memory;
                for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
                                socket++) {
                        struct rte_config *cfg = rte_eal_get_configuration();
@@ -495,10 +503,10 @@ eal_dynmem_calc_num_pages_per_socket(
 
                /* if we didn't satisfy all memory requirements per socket */
                if (memory[socket] > 0 &&
-                               internal_config.socket_mem[socket] != 0) {
+                               internal_conf->socket_mem[socket] != 0) {
                        /* to prevent icc errors */
                        requested = (unsigned int)(
-                               internal_config.socket_mem[socket] / 0x100000);
+                               internal_conf->socket_mem[socket] / 0x100000);
                        available = requested -
                                ((unsigned int)(memory[socket] / 0x100000));
                        RTE_LOG(ERR, EAL, "Not enough memory available on "
@@ -510,7 +518,7 @@ eal_dynmem_calc_num_pages_per_socket(
 
        /* if we didn't satisfy total memory requirements */
        if (total_mem > 0) {
-               requested = (unsigned int)(internal_config.memory / 0x100000);
+               requested = (unsigned int)(internal_conf->memory / 0x100000);
                available = requested - (unsigned int)(total_mem / 0x100000);
                RTE_LOG(ERR, EAL, "Not enough memory available! "
                        "Requested: %uMB, available: %uMB\n",
index fd0292a..1220e2b 100644 (file)
@@ -715,6 +715,8 @@ rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
        struct mem_area *ma = NULL;
        void *data = NULL;
        int fd = -1;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (arr == NULL) {
                rte_errno = EINVAL;
@@ -750,7 +752,7 @@ rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
 
        fd = -1;
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                /* remap virtual area as writable */
                static const int flags = RTE_MAP_FORCE_ADDRESS |
                        RTE_MAP_PRIVATE | RTE_MAP_ANONYMOUS;
@@ -977,6 +979,8 @@ rte_fbarray_destroy(struct rte_fbarray *arr)
        size_t mmap_len;
        int fd, ret;
        char path[PATH_MAX];
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (arr == NULL) {
                rte_errno = EINVAL;
@@ -1010,7 +1014,7 @@ rte_fbarray_destroy(struct rte_fbarray *arr)
                goto out;
        }
        /* with no shconf, there were never any files to begin with */
-       if (!internal_config.no_shconf) {
+       if (!internal_conf->no_shconf) {
                /*
                 * attempt to get an exclusive lock on the file, to ensure it
                 * has been detached by all other processes
index 49d3ed0..c77ba97 100644 (file)
@@ -14,12 +14,14 @@ eal_mcfg_complete(void)
 {
        struct rte_config *cfg = rte_eal_get_configuration();
        struct rte_mem_config *mcfg = cfg->mem_config;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* ALL shared mem_config related INIT DONE */
        if (cfg->process_type == RTE_PROC_PRIMARY)
                mcfg->magic = RTE_MAGIC;
 
-       internal_config.init_complete = 1;
+       internal_conf->init_complete = 1;
 }
 
 void
@@ -48,18 +50,22 @@ void
 eal_mcfg_update_internal(void)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       internal_config.legacy_mem = mcfg->legacy_mem;
-       internal_config.single_file_segments = mcfg->single_file_segments;
+       internal_conf->legacy_mem = mcfg->legacy_mem;
+       internal_conf->single_file_segments = mcfg->single_file_segments;
 }
 
 void
 eal_mcfg_update_from_internal(void)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       mcfg->legacy_mem = internal_config.legacy_mem;
-       mcfg->single_file_segments = internal_config.single_file_segments;
+       mcfg->legacy_mem = internal_conf->legacy_mem;
+       mcfg->single_file_segments = internal_conf->single_file_segments;
        /* record current DPDK version */
        mcfg->version = RTE_VERSION;
 }
index 55189d0..e872c65 100644 (file)
@@ -74,13 +74,15 @@ eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
        void *end, *aligned_start, *aligned_end;
        size_t pgsz = (size_t)msl->page_sz;
        const struct rte_memseg *ms;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* for IOVA_VA, it's always contiguous */
        if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external)
                return true;
 
        /* for legacy memory, it's always contiguous */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                return true;
 
        end = RTE_PTR_ADD(start, len);
index 194904b..33917fa 100644 (file)
@@ -50,6 +50,8 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
        uint64_t map_sz;
        void *mapped_addr, *aligned_addr;
        uint8_t try = 0;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (system_page_sz == 0)
                system_page_sz = rte_mem_page_size();
@@ -60,12 +62,12 @@ eal_get_virtual_area(void *requested_addr, size_t *size,
        allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
        unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
 
-       if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
+       if (next_baseaddr == NULL && internal_conf->base_virtaddr != 0 &&
                        rte_eal_process_type() == RTE_PROC_PRIMARY)
-               next_baseaddr = (void *) internal_config.base_virtaddr;
+               next_baseaddr = (void *) internal_conf->base_virtaddr;
 
 #ifdef RTE_ARCH_64
-       if (next_baseaddr == NULL && internal_config.base_virtaddr == 0 &&
+       if (next_baseaddr == NULL && internal_conf->base_virtaddr == 0 &&
                        rte_eal_process_type() == RTE_PROC_PRIMARY)
                next_baseaddr = (void *) eal_get_baseaddr();
 #endif
@@ -364,6 +366,8 @@ void *
 rte_mem_iova2virt(rte_iova_t iova)
 {
        struct virtiova vi;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        memset(&vi, 0, sizeof(vi));
 
@@ -371,7 +375,7 @@ rte_mem_iova2virt(rte_iova_t iova)
        /* for legacy mem, we can get away with scanning VA-contiguous segments,
         * as we know they are PA-contiguous as well
         */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                rte_memseg_contig_walk(find_virt_legacy, &vi);
        else
                rte_memseg_walk(find_virt, &vi);
@@ -452,8 +456,11 @@ int
 rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
                void *arg)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* FreeBSD boots with legacy mem enabled by default */
-       if (internal_config.legacy_mem) {
+       if (internal_conf->legacy_mem) {
                RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -464,8 +471,11 @@ rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
 int
 rte_mem_event_callback_unregister(const char *name, void *arg)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* FreeBSD boots with legacy mem enabled by default */
-       if (internal_config.legacy_mem) {
+       if (internal_conf->legacy_mem) {
                RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -477,8 +487,11 @@ int
 rte_mem_alloc_validator_register(const char *name,
                rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* FreeBSD boots with legacy mem enabled by default */
-       if (internal_config.legacy_mem) {
+       if (internal_conf->legacy_mem) {
                RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -490,8 +503,11 @@ rte_mem_alloc_validator_register(const char *name,
 int
 rte_mem_alloc_validator_unregister(const char *name, int socket_id)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* FreeBSD boots with legacy mem enabled by default */
-       if (internal_config.legacy_mem) {
+       if (internal_conf->legacy_mem) {
                RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -613,13 +629,15 @@ static int
 rte_eal_memdevice_init(void)
 {
        struct rte_config *config;
+       const struct internal_config *internal_conf;
 
        if (rte_eal_process_type() == RTE_PROC_SECONDARY)
                return 0;
 
+       internal_conf = eal_get_internal_configuration();
        config = rte_eal_get_configuration();
-       config->mem_config->nchannel = internal_config.force_nchannel;
-       config->mem_config->nrank = internal_config.force_nrank;
+       config->mem_config->nchannel = internal_conf->force_nchannel;
+       config->mem_config->nrank = internal_conf->force_nrank;
 
        return 0;
 }
@@ -989,6 +1007,9 @@ int
 rte_eal_memory_init(void)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        int retval;
        RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
 
@@ -1010,7 +1031,7 @@ rte_eal_memory_init(void)
        if (retval < 0)
                goto fail;
 
-       if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+       if (internal_conf->no_shconf == 0 && rte_eal_memdevice_init() < 0)
                goto fail;
 
        return 0;
index 0546beb..6b909ce 100644 (file)
@@ -264,8 +264,11 @@ eal_option_device_parse(void)
 const char *
 eal_get_hugefile_prefix(void)
 {
-       if (internal_config.hugefile_prefix != NULL)
-               return internal_config.hugefile_prefix;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       if (internal_conf->hugefile_prefix != NULL)
+               return internal_conf->hugefile_prefix;
        return HUGEFILE_PREFIX_DEFAULT;
 }
 
@@ -1176,6 +1179,8 @@ static int
 eal_parse_iova_mode(const char *name)
 {
        int mode;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (name == NULL)
                return -1;
@@ -1187,7 +1192,7 @@ eal_parse_iova_mode(const char *name)
        else
                return -1;
 
-       internal_config.iova_mode = mode;
+       internal_conf->iova_mode = mode;
        return 0;
 }
 
@@ -1196,6 +1201,8 @@ eal_parse_base_virtaddr(const char *arg)
 {
        char *end;
        uint64_t addr;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        errno = 0;
        addr = strtoull(arg, &end, 16);
@@ -1215,7 +1222,7 @@ eal_parse_base_virtaddr(const char *arg)
         * it can align to 2MB for x86. So this alignment can also be used
         * on x86 and other architectures.
         */
-       internal_config.base_virtaddr =
+       internal_conf->base_virtaddr =
                RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M);
 
        return 0;
@@ -1668,12 +1675,14 @@ eal_adjust_config(struct internal_config *internal_cfg)
 {
        int i;
        struct rte_config *cfg = rte_eal_get_configuration();
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (!core_parsed)
                eal_auto_detect_cores(cfg);
 
-       if (internal_config.process_type == RTE_PROC_AUTO)
-               internal_config.process_type = eal_proc_type_detect();
+       if (internal_conf->process_type == RTE_PROC_AUTO)
+               internal_conf->process_type = eal_proc_type_detect();
 
        /* default master lcore is the first one */
        if (!master_lcore_parsed) {
@@ -1697,6 +1706,8 @@ int
 eal_check_common_options(struct internal_config *internal_cfg)
 {
        struct rte_config *cfg = rte_eal_get_configuration();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (cfg->lcore_role[cfg->master_lcore] != ROLE_RTE) {
                RTE_LOG(ERR, EAL, "Master lcore is not enabled for DPDK\n");
@@ -1743,7 +1754,7 @@ eal_check_common_options(struct internal_config *internal_cfg)
                        "be specified together with --"OPT_NO_HUGE"\n");
                return -1;
        }
-       if (internal_config.force_socket_limits && internal_config.legacy_mem) {
+       if (internal_conf->force_socket_limits && internal_conf->legacy_mem) {
                RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT
                        " is only supported in non-legacy memory mode\n");
        }
index 935e8fe..c649789 100644 (file)
@@ -201,11 +201,13 @@ int
 rte_mp_action_register(const char *name, rte_mp_t action)
 {
        struct action_entry *entry;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (validate_action_name(name) != 0)
                return -1;
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -235,11 +237,13 @@ void
 rte_mp_action_unregister(const char *name)
 {
        struct action_entry *entry;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (validate_action_name(name) != 0)
                return;
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
                return;
        }
@@ -315,6 +319,8 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
        struct action_entry *entry;
        struct rte_mp_msg *msg = &m->msg;
        rte_mp_t action = NULL;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
 
@@ -350,7 +356,7 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
        pthread_mutex_unlock(&mp_mutex_action);
 
        if (!action) {
-               if (m->type == MP_REQ && !internal_config.init_complete) {
+               if (m->type == MP_REQ && !internal_conf->init_complete) {
                        /* if this is a request, and init is not yet complete,
                         * and callback wasn't registered, we should tell the
                         * requester to ignore our existence because we're not
@@ -581,11 +587,13 @@ rte_mp_channel_init(void)
        char path[PATH_MAX];
        int dir_fd;
        pthread_t mp_handle_tid;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* in no shared files mode, we do not have secondary processes support,
         * so no need to initialize IPC.
         */
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -804,10 +812,13 @@ check_input(const struct rte_mp_msg *msg)
 int
 rte_mp_sendmsg(struct rte_mp_msg *msg)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        if (check_input(msg) != 0)
                return -1;
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -957,6 +968,8 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
        struct dirent *ent;
        struct timeval now;
        struct timespec end;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
 
@@ -967,7 +980,7 @@ rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
        if (check_input(req) != 0)
                goto end;
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -1058,13 +1071,15 @@ rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
        struct timeval now;
        struct timespec *end;
        bool dummy_used = false;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
 
        if (check_input(req) != 0)
                return -1;
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
                rte_errno = ENOTSUP;
                return -1;
@@ -1198,6 +1213,8 @@ int
 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
 {
        RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (check_input(msg) != 0)
                return -1;
@@ -1208,7 +1225,7 @@ rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
                return -1;
        }
 
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
                return 0;
        }
index 370bb1b..a341070 100644 (file)
@@ -153,7 +153,9 @@ struct rte_thread_ctrl_params {
 static void *rte_thread_init(void *arg)
 {
        int ret;
-       rte_cpuset_t *cpuset = &internal_config.ctrl_cpuset;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+       rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
        struct rte_thread_ctrl_params *params = arg;
        void *(*start_routine)(void *) = params->start_routine;
        void *routine_arg = params->arg;
@@ -177,7 +179,9 @@ rte_ctrl_thread_create(pthread_t *thread, const char *name,
                const pthread_attr_t *attr,
                void *(*start_routine)(void *), void *arg)
 {
-       rte_cpuset_t *cpuset = &internal_config.ctrl_cpuset;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+       rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset;
        struct rte_thread_ctrl_params *params;
        int ret;
 
index c650bc0..2aaa167 100644 (file)
@@ -84,7 +84,6 @@ struct internal_config {
        /**< indicates whether EAL has completed initialization */
        unsigned int no_telemetry; /**< true to disable Telemetry */
 };
-extern struct internal_config internal_config; /**< Global EAL configuration. */
 
 void eal_reset_internal_config(struct internal_config *internal_cfg);
 
index 0592fcd..be4d0a8 100644 (file)
@@ -668,4 +668,26 @@ eal_mem_free(void *virt, size_t size);
 int
 eal_mem_set_dump(void *virt, size_t size, bool dump);
 
+/**
+ * Sets the runtime directory of DPDK
+ *
+ * @param run_dir
+ *   The new runtime directory path of DPDK
+ * @param size
+ *   The size of the new runtime directory path in bytes.
+ * @return
+ *   0 on success, (-1) on failure.
+ */
+int
+eal_set_runtime_dir(char *run_dir, size_t size);
+
+/**
+ * Get the internal configuration structure.
+ *
+ * @return
+ *   A pointer to the internal configuration structure.
+ */
+struct internal_config *
+eal_get_internal_configuration(void);
+
 #endif /* _EAL_PRIVATE_H_ */
index 51cdfc5..c70112f 100644 (file)
@@ -18,6 +18,7 @@
 #include <rte_common.h>
 #include <rte_spinlock.h>
 
+#include "eal_private.h"
 #include "eal_internal_cfg.h"
 #include "eal_memalloc.h"
 #include "malloc_elem.h"
@@ -42,6 +43,8 @@ malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
        rte_iova_t expected_iova;
        struct rte_memseg *ms;
        size_t page_sz, cur, max;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        page_sz = (size_t)elem->msl->page_sz;
        data_start = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
@@ -60,7 +63,7 @@ malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
         */
        if (!elem->msl->external &&
                        (rte_eal_iova_mode() == RTE_IOVA_VA ||
-                               (internal_config.legacy_mem &&
+                               (internal_conf->legacy_mem &&
                                        rte_eal_has_hugepages())))
                return RTE_PTR_DIFF(data_end, contig_seg_start);
 
@@ -340,18 +343,24 @@ remove_elem(struct malloc_elem *elem)
 static int
 next_elem_is_adjacent(struct malloc_elem *elem)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        return elem->next == RTE_PTR_ADD(elem, elem->size) &&
                        elem->next->msl == elem->msl &&
-                       (!internal_config.match_allocations ||
+                       (!internal_conf->match_allocations ||
                         elem->orig_elem == elem->next->orig_elem);
 }
 
 static int
 prev_elem_is_adjacent(struct malloc_elem *elem)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
                        elem->prev->msl == elem->msl &&
-                       (!internal_config.match_allocations ||
+                       (!internal_conf->match_allocations ||
                         elem->orig_elem == elem->prev->orig_elem);
 }
 
index bd50656..5a09247 100644 (file)
@@ -642,13 +642,15 @@ malloc_heap_alloc_on_heap_id(const char *type, size_t size,
        unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
        int socket_id;
        void *ret;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        rte_spinlock_lock(&(heap->lock));
 
        align = align == 0 ? 1 : align;
 
        /* for legacy mode, try once and with all flags */
-       if (internal_config.legacy_mem) {
+       if (internal_conf->legacy_mem) {
                ret = heap_alloc(heap, type, size, flags, align, bound, contig);
                goto alloc_unlock;
        }
@@ -832,6 +834,8 @@ malloc_heap_free(struct malloc_elem *elem)
        struct rte_memseg_list *msl;
        unsigned int i, n_segs, before_space, after_space;
        int ret;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
                return -1;
@@ -854,7 +858,7 @@ malloc_heap_free(struct malloc_elem *elem)
        /* ...of which we can't avail if we are in legacy mode, or if this is an
         * externally allocated segment.
         */
-       if (internal_config.legacy_mem || (msl->external > 0))
+       if (internal_conf->legacy_mem || (msl->external > 0))
                goto free_unlock;
 
        /* check if we can free any memory back to the system */
@@ -865,7 +869,7 @@ malloc_heap_free(struct malloc_elem *elem)
         * we will defer freeing these hugepages until the entire original allocation
         * can be freed
         */
-       if (internal_config.match_allocations && elem->size != elem->orig_size)
+       if (internal_conf->match_allocations && elem->size != elem->orig_size)
                goto free_unlock;
 
        /* probably, but let's make sure, as we may not be using up full page */
@@ -1323,10 +1327,11 @@ rte_eal_malloc_heap_init(void)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
        unsigned int i;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.match_allocations) {
+       if (internal_conf->match_allocations)
                RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n");
-       }
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
                /* assign min socket ID to external heaps */
index 036ff62..5f9032c 100644 (file)
@@ -7,6 +7,7 @@ if is_windows
        sources += files(
                'eal_common_bus.c',
                'eal_common_class.c',
+               'eal_common_config.c',
                'eal_common_debug.c',
                'eal_common_devargs.c',
                'eal_common_dynmem.c',
@@ -36,6 +37,7 @@ sources += files(
        'eal_common_bus.c',
        'eal_common_cpuflags.c',
        'eal_common_class.c',
+       'eal_common_config.c',
        'eal_common_debug.c',
        'eal_common_devargs.c',
        'eal_common_dev.c',
index 9988ea5..9a89556 100644 (file)
@@ -38,6 +38,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_alarm.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_dev.c
 
 # from common dir
+SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_config.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_lcore.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_timer.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_FREEBSD) += eal_common_memzone.c
index 00fc66b..021e36f 100644 (file)
@@ -58,9 +58,6 @@
 
 /* Allow the application to print its usage message too if set */
 static rte_usage_hook_t        rte_application_usage_hook = NULL;
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
 /* define fd variable here, because file needs to be kept open for the
  * duration of the program, as we hold a write lock on it in the primary proc */
 static int mem_cfg_fd = -1;
@@ -69,26 +66,15 @@ static struct flock wr_lock = {
                .l_type = F_WRLCK,
                .l_whence = SEEK_SET,
                .l_start = offsetof(struct rte_mem_config, memsegs),
-               .l_len = sizeof(early_mem_config.memsegs),
-};
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
-               .mem_config = &early_mem_config,
+               .l_len = RTE_SIZEOF_FIELD(struct rte_mem_config, memsegs),
 };
 
 /* internal configuration (per-core) */
 struct lcore_config lcore_config[RTE_MAX_LCORE];
 
-/* internal configuration */
-struct internal_config internal_config;
-
 /* used by rte_rdtsc() */
 int rte_cycles_vmware_tsc_map;
 
-/* platform-specific runtime dir */
-static char runtime_dir[PATH_MAX];
-
 static const char *default_runtime_dir = "/var/run";
 
 int
@@ -97,6 +83,7 @@ eal_create_runtime_dir(void)
        const char *directory = default_runtime_dir;
        const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
        const char *fallback = "/tmp";
+       char run_dir[PATH_MAX];
        char tmp[PATH_MAX];
        int ret;
 
@@ -115,9 +102,9 @@ eal_create_runtime_dir(void)
        }
 
        /* create prefix-specific subdirectory under DPDK runtime dir */
-       ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
+       ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
                        tmp, eal_get_hugefile_prefix());
-       if (ret < 0 || ret == sizeof(runtime_dir)) {
+       if (ret < 0 || ret == sizeof(run_dir)) {
                RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
                return -1;
        }
@@ -132,13 +119,16 @@ eal_create_runtime_dir(void)
                return -1;
        }
 
-       ret = mkdir(runtime_dir, 0700);
+       ret = mkdir(run_dir, 0700);
        if (ret < 0 && errno != EEXIST) {
                RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
-                       runtime_dir, strerror(errno));
+                       run_dir, strerror(errno));
                return -1;
        }
 
+       if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
+               return -1;
+
        return 0;
 }
 
@@ -151,33 +141,6 @@ eal_clean_runtime_dir(void)
        return 0;
 }
 
-
-const char *
-rte_eal_get_runtime_dir(void)
-{
-       return runtime_dir;
-}
-
-/* Return user provided mbuf pool ops name */
-const char *
-rte_eal_mbuf_user_pool_ops(void)
-{
-       return internal_config.user_mbuf_pool_ops_name;
-}
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
-       return &rte_config;
-}
-
-enum rte_iova_mode
-rte_eal_iova_mode(void)
-{
-       return rte_eal_get_configuration()->iova_mode;
-}
-
 /* parse a sysfs (or other) file containing one integer value */
 int
 eal_parse_sysfs_value(const char *filename, unsigned long *val)
@@ -219,21 +182,24 @@ eal_parse_sysfs_value(const char *filename, unsigned long *val)
 static int
 rte_eal_config_create(void)
 {
+       struct rte_config *config = rte_eal_get_configuration();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
        size_t page_sz = sysconf(_SC_PAGE_SIZE);
-       size_t cfg_len = sizeof(*rte_config.mem_config);
+       size_t cfg_len = sizeof(struct rte_mem_config);
        size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz);
        void *rte_mem_cfg_addr, *mapped_mem_cfg_addr;
        int retval;
 
        const char *pathname = eal_runtime_config_path();
 
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        /* map the config before base address so that we don't waste a page */
-       if (internal_config.base_virtaddr != 0)
+       if (internal_conf->base_virtaddr != 0)
                rte_mem_cfg_addr = (void *)
-                       RTE_ALIGN_FLOOR(internal_config.base_virtaddr -
+                       RTE_ALIGN_FLOOR(internal_conf->base_virtaddr -
                        sizeof(struct rte_mem_config), page_sz);
        else
                rte_mem_cfg_addr = NULL;
@@ -287,14 +253,13 @@ rte_eal_config_create(void)
                return -1;
        }
 
-       memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
-       rte_config.mem_config = rte_mem_cfg_addr;
+       memcpy(rte_mem_cfg_addr, config->mem_config, sizeof(struct rte_mem_config));
+       config->mem_config = rte_mem_cfg_addr;
 
        /* store address of the config in the config itself so that secondary
         * processes could later map the config into this exact location
         */
-       rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
-
+       config->mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
        return 0;
 }
 
@@ -304,8 +269,12 @@ rte_eal_config_attach(void)
 {
        void *rte_mem_cfg_addr;
        const char *pathname = eal_runtime_config_path();
+       struct rte_config *config = rte_eal_get_configuration();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
 
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        if (mem_cfg_fd < 0){
@@ -317,7 +286,7 @@ rte_eal_config_attach(void)
                }
        }
 
-       rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
+       rte_mem_cfg_addr = mmap(NULL, sizeof(*config->mem_config),
                                PROT_READ, MAP_SHARED, mem_cfg_fd, 0);
        /* don't close the fd here, it will be closed on reattach */
        if (rte_mem_cfg_addr == MAP_FAILED) {
@@ -328,7 +297,7 @@ rte_eal_config_attach(void)
                return -1;
        }
 
-       rte_config.mem_config = rte_mem_cfg_addr;
+       config->mem_config = rte_mem_cfg_addr;
 
        return 0;
 }
@@ -339,16 +308,19 @@ rte_eal_config_reattach(void)
 {
        struct rte_mem_config *mem_config;
        void *rte_mem_cfg_addr;
+       struct rte_config *config = rte_eal_get_configuration();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        /* save the address primary process has mapped shared config to */
        rte_mem_cfg_addr =
-                       (void *)(uintptr_t)rte_config.mem_config->mem_cfg_addr;
+                       (void *)(uintptr_t)config->mem_config->mem_cfg_addr;
 
        /* unmap original config */
-       munmap(rte_config.mem_config, sizeof(struct rte_mem_config));
+       munmap(config->mem_config, sizeof(struct rte_mem_config));
 
        /* remap the config at proper address */
        mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr,
@@ -372,7 +344,7 @@ rte_eal_config_reattach(void)
                return -1;
        }
 
-       rte_config.mem_config = mem_config;
+       config->mem_config = mem_config;
 
        return 0;
 }
@@ -383,9 +355,11 @@ eal_proc_type_detect(void)
 {
        enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
        const char *pathname = eal_runtime_config_path();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* if there no shared config, there can be no secondary processes */
-       if (!internal_config.no_shconf) {
+       if (!internal_conf->no_shconf) {
                /* if we can open the file but not get a write-lock we are a
                 * secondary process. NOTE: if we get a file handle back, we
                 * keep that open and don't close it to prevent a race condition
@@ -406,9 +380,13 @@ eal_proc_type_detect(void)
 static int
 rte_config_init(void)
 {
-       rte_config.process_type = internal_config.process_type;
+       struct rte_config *config = rte_eal_get_configuration();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       switch (rte_config.process_type){
+       config->process_type = internal_conf->process_type;
+
+       switch (config->process_type) {
        case RTE_PROC_PRIMARY:
                if (rte_eal_config_create() < 0)
                        return -1;
@@ -429,7 +407,7 @@ rte_config_init(void)
        case RTE_PROC_AUTO:
        case RTE_PROC_INVALID:
                RTE_LOG(ERR, EAL, "Invalid process type %d\n",
-                       rte_config.process_type);
+                       config->process_type);
                return -1;
        }
 
@@ -467,9 +445,11 @@ eal_get_hugepage_mem_size(void)
 {
        uint64_t size = 0;
        unsigned i, j;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
-               struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+               struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
                if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
                        for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
                                size += hpi->hugepage_sz * hpi->num_pages[j];
@@ -491,6 +471,8 @@ eal_log_level_parse(int argc, char **argv)
        const int old_optopt = optopt;
        const int old_optreset = optreset;
        char * const old_optarg = optarg;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        argvopt = argv;
        optind = 1;
@@ -506,7 +488,7 @@ eal_log_level_parse(int argc, char **argv)
                        break;
 
                ret = (opt == OPT_LOG_LEVEL_NUM) ?
-                       eal_parse_common_option(opt, optarg, &internal_config) : 0;
+                   eal_parse_common_option(opt, optarg, internal_conf) : 0;
 
                /* common parser is not happy */
                if (ret < 0)
@@ -532,6 +514,8 @@ eal_parse_args(int argc, char **argv)
        const int old_optopt = optopt;
        const int old_optreset = optreset;
        char * const old_optarg = optarg;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        argvopt = argv;
        optind = 1;
@@ -547,7 +531,7 @@ eal_parse_args(int argc, char **argv)
                        goto out;
                }
 
-               ret = eal_parse_common_option(opt, optarg, &internal_config);
+               ret = eal_parse_common_option(opt, optarg, internal_conf);
                /* common parser is not happy */
                if (ret < 0) {
                        eal_usage(prgname);
@@ -566,11 +550,11 @@ eal_parse_args(int argc, char **argv)
                                RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
                        else {
                                /* free old ops name */
-                               if (internal_config.user_mbuf_pool_ops_name !=
+                               if (internal_conf->user_mbuf_pool_ops_name !=
                                                NULL)
-                                       free(internal_config.user_mbuf_pool_ops_name);
+                                       free(internal_conf->user_mbuf_pool_ops_name);
 
-                               internal_config.user_mbuf_pool_ops_name =
+                               internal_conf->user_mbuf_pool_ops_name =
                                                ops_name;
                        }
                        break;
@@ -598,20 +582,20 @@ eal_parse_args(int argc, char **argv)
        }
 
        /* create runtime data directory */
-       if (internal_config.no_shconf == 0 &&
+       if (internal_conf->no_shconf == 0 &&
                        eal_create_runtime_dir() < 0) {
                RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
                ret = -1;
                goto out;
        }
 
-       if (eal_adjust_config(&internal_config) != 0) {
+       if (eal_adjust_config(internal_conf) != 0) {
                ret = -1;
                goto out;
        }
 
        /* sanity checks */
-       if (eal_check_common_options(&internal_config) != 0) {
+       if (eal_check_common_options(internal_conf) != 0) {
                eal_usage(prgname);
                ret = -1;
                goto out;
@@ -649,8 +633,9 @@ static void
 eal_check_mem_on_local_socket(void)
 {
        int socket_id;
+       const struct rte_config *config = rte_eal_get_configuration();
 
-       socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+       socket_id = rte_lcore_to_socket_id(config->master_lcore);
 
        if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
                RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
@@ -662,13 +647,6 @@ sync_func(__rte_unused void *arg)
 {
        return 0;
 }
-
-/* return non-zero if hugepages are enabled. */
-int rte_eal_has_hugepages(void)
-{
-       return !internal_config.no_hugetlbfs;
-}
-
 /* Abstraction for port I/0 privilege */
 int
 rte_eal_iopl_init(void)
@@ -699,6 +677,9 @@ rte_eal_init(int argc, char **argv)
        static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
        char cpuset[RTE_CPU_AFFINITY_STR_LEN];
        char thread_name[RTE_MAX_THREAD_NAME_LEN];
+       const struct rte_config *config = rte_eal_get_configuration();
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* checks if the machine is adequate */
        if (!rte_cpu_is_supported()) {
@@ -715,7 +696,7 @@ rte_eal_init(int argc, char **argv)
 
        thread_id = pthread_self();
 
-       eal_reset_internal_config(&internal_config);
+       eal_reset_internal_config(internal_conf);
 
        /* clone argv to report out later in telemetry */
        eal_save_args(argc, argv);
@@ -738,7 +719,7 @@ rte_eal_init(int argc, char **argv)
        }
 
        /* FreeBSD always uses legacy memory model */
-       internal_config.legacy_mem = true;
+       internal_conf->legacy_mem = true;
 
        if (eal_plugins_init() < 0) {
                rte_eal_init_alert("Cannot init plugins");
@@ -795,7 +776,7 @@ rte_eal_init(int argc, char **argv)
        }
 
        /* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
-       if (internal_config.iova_mode == RTE_IOVA_DC) {
+       if (internal_conf->iova_mode == RTE_IOVA_DC) {
                /* autodetect the IOVA mapping mode (default is RTE_IOVA_PA) */
                enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
 
@@ -804,15 +785,15 @@ rte_eal_init(int argc, char **argv)
                rte_eal_get_configuration()->iova_mode = iova_mode;
        } else {
                rte_eal_get_configuration()->iova_mode =
-                       internal_config.iova_mode;
+                       internal_conf->iova_mode;
        }
 
        RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
                rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
 
-       if (internal_config.no_hugetlbfs == 0) {
+       if (internal_conf->no_hugetlbfs == 0) {
                /* rte_config isn't initialized yet */
-               ret = internal_config.process_type == RTE_PROC_PRIMARY ?
+               ret = internal_conf->process_type == RTE_PROC_PRIMARY ?
                        eal_hugepage_info_init() :
                        eal_hugepage_info_read();
                if (ret < 0) {
@@ -823,14 +804,14 @@ rte_eal_init(int argc, char **argv)
                }
        }
 
-       if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
-               if (internal_config.no_hugetlbfs)
-                       internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+       if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
+               if (internal_conf->no_hugetlbfs)
+                       internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
                else
-                       internal_config.memory = eal_get_hugepage_mem_size();
+                       internal_conf->memory = eal_get_hugepage_mem_size();
        }
 
-       if (internal_config.vmware_tsc_map == 1) {
+       if (internal_conf->vmware_tsc_map == 1) {
 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
                rte_cycles_vmware_tsc_map = 1;
                RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
@@ -877,12 +858,12 @@ rte_eal_init(int argc, char **argv)
 
        eal_check_mem_on_local_socket();
 
-       eal_thread_init_master(rte_config.master_lcore);
+       eal_thread_init_master(config->master_lcore);
 
        ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
 
        RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
-               rte_config.master_lcore, thread_id, cpuset,
+               config->master_lcore, thread_id, cpuset,
                ret == 0 ? "" : "...");
 
        RTE_LCORE_FOREACH_SLAVE(i) {
@@ -951,14 +932,14 @@ rte_eal_init(int argc, char **argv)
         * In no_shconf mode, no runtime directory is created in the first
         * place, so no cleanup needed.
         */
-       if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) {
+       if (!internal_conf->no_shconf && eal_clean_runtime_dir() < 0) {
                rte_eal_init_alert("Cannot clear runtime directory");
                return -1;
        }
-       if (!internal_config.no_telemetry) {
+       if (!internal_conf->no_telemetry) {
                const char *error_str = NULL;
                if (rte_telemetry_init(rte_eal_get_runtime_dir(),
-                               &internal_config.ctrl_cpuset, &error_str)
+                               &internal_conf->ctrl_cpuset, &error_str)
                                != 0) {
                        rte_eal_init_alert(error_str);
                        return -1;
@@ -975,28 +956,21 @@ rte_eal_init(int argc, char **argv)
 int
 rte_eal_cleanup(void)
 {
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
        rte_service_finalize();
        rte_mp_channel_cleanup();
        rte_trace_save();
        eal_trace_fini();
-       eal_cleanup_config(&internal_config);
+       eal_cleanup_config(internal_conf);
        return 0;
 }
 
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
-       return rte_config.process_type;
-}
-
-int rte_eal_has_pci(void)
-{
-       return !internal_config.no_pci;
-}
-
 int rte_eal_create_uio_dev(void)
 {
-       return internal_config.create_uio_dev;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+       return internal_conf->create_uio_dev;
 }
 
 enum rte_intr_mode
index 32012e1..408f054 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <rte_log.h>
 #include <fcntl.h>
+
+#include "eal_private.h"
 #include "eal_hugepages.h"
 #include "eal_internal_cfg.h"
 #include "eal_filesystem.h"
@@ -55,12 +57,15 @@ eal_hugepage_info_init(void)
        size_t sysctl_size;
        int num_buffers, fd, error;
        int64_t buffer_size;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* re-use the linux "internal config" structure for our memory data */
-       struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+       struct hugepage_info *hpi = &internal_conf->hugepage_info[0];
        struct hugepage_info *tmp_hpi;
        unsigned int i;
 
-       internal_config.num_hugepage_sizes = 1;
+       internal_conf->num_hugepage_sizes = 1;
 
        sysctl_size = sizeof(num_buffers);
        error = sysctlbyname("hw.contigmem.num_buffers", &num_buffers,
@@ -102,27 +107,27 @@ eal_hugepage_info_init(void)
        hpi->lock_descriptor = fd;
 
        /* for no shared files mode, do not create shared memory config */
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
-                       sizeof(internal_config.hugepage_info));
+                       sizeof(internal_conf->hugepage_info));
        if (tmp_hpi == NULL ) {
                RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
                return -1;
        }
 
-       memcpy(tmp_hpi, hpi, sizeof(internal_config.hugepage_info));
+       memcpy(tmp_hpi, hpi, sizeof(internal_conf->hugepage_info));
 
        /* we've copied file descriptors along with everything else, but they
         * will be invalid in secondary process, so overwrite them
         */
-       for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+       for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
                struct hugepage_info *tmp = &tmp_hpi[i];
                tmp->lock_descriptor = -1;
        }
 
-       if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+       if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
                RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
                return -1;
        }
@@ -134,21 +139,24 @@ eal_hugepage_info_init(void)
 int
 eal_hugepage_info_read(void)
 {
-       struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       struct hugepage_info *hpi = &internal_conf->hugepage_info[0];
        struct hugepage_info *tmp_hpi;
 
-       internal_config.num_hugepage_sizes = 1;
+       internal_conf->num_hugepage_sizes = 1;
 
        tmp_hpi = open_shared_memory(eal_hugepage_info_path(),
-                                 sizeof(internal_config.hugepage_info));
+                                 sizeof(internal_conf->hugepage_info));
        if (tmp_hpi == NULL) {
                RTE_LOG(ERR, EAL, "Failed to open shared memory!\n");
                return -1;
        }
 
-       memcpy(hpi, tmp_hpi, sizeof(internal_config.hugepage_info));
+       memcpy(hpi, tmp_hpi, sizeof(internal_conf->hugepage_info));
 
-       if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+       if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
                RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
                return -1;
        }
index 72a30f2..b8b337a 100644 (file)
@@ -57,12 +57,14 @@ rte_eal_hugepage_init(void)
        uint64_t total_mem = 0;
        void *addr;
        unsigned int i, j, seg_idx = 0;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* get pointer to global configuration */
        mcfg = rte_eal_get_configuration()->mem_config;
 
        /* for debug purposes, hugetlbfs can be disabled */
-       if (internal_config.no_hugetlbfs) {
+       if (internal_conf->no_hugetlbfs) {
                struct rte_memseg_list *msl;
                uint64_t mem_sz, page_sz;
                int n_segs;
@@ -70,7 +72,7 @@ rte_eal_hugepage_init(void)
                /* create a memseg list */
                msl = &mcfg->memsegs[0];
 
-               mem_sz = internal_config.memory;
+               mem_sz = internal_conf->memory;
                page_sz = RTE_PGSIZE_4K;
                n_segs = mem_sz / page_sz;
 
@@ -96,17 +98,17 @@ rte_eal_hugepage_init(void)
        }
 
        /* map all hugepages and sort them */
-       for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
                struct hugepage_info *hpi;
                rte_iova_t prev_end = 0;
                int prev_ms_idx = -1;
                uint64_t page_sz, mem_needed;
                unsigned int n_pages, max_pages;
 
-               hpi = &internal_config.hugepage_info[i];
+               hpi = &internal_conf->hugepage_info[i];
                page_sz = hpi->hugepage_sz;
                max_pages = hpi->num_pages[0];
-               mem_needed = RTE_ALIGN_CEIL(internal_config.memory - total_mem,
+               mem_needed = RTE_ALIGN_CEIL(internal_conf->memory - total_mem,
                                page_sz);
 
                n_pages = RTE_MIN(mem_needed / page_sz, max_pages);
@@ -210,14 +212,14 @@ rte_eal_hugepage_init(void)
 
                        total_mem += seg->len;
                }
-               if (total_mem >= internal_config.memory)
+               if (total_mem >= internal_conf->memory)
                        break;
        }
-       if (total_mem < internal_config.memory) {
+       if (total_mem < internal_conf->memory) {
                RTE_LOG(ERR, EAL, "Couldn't reserve requested memory, "
                                "requested: %" PRIu64 "M "
                                "available: %" PRIu64 "M\n",
-                               internal_config.memory >> 20, total_mem >> 20);
+                               internal_conf->memory >> 20, total_mem >> 20);
                return -1;
        }
        return 0;
@@ -250,13 +252,15 @@ attach_segment(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
 int
 rte_eal_hugepage_attach(void)
 {
-       const struct hugepage_info *hpi;
+       struct hugepage_info *hpi;
        int fd_hugepage = -1;
        unsigned int i;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       hpi = &internal_config.hugepage_info[0];
+       hpi = &internal_conf->hugepage_info[0];
 
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
                const struct hugepage_info *cur_hpi = &hpi[i];
                struct attach_walk_args wa;
 
@@ -333,9 +337,11 @@ memseg_primary_init(void)
        int hpi_idx, msl_idx = 0;
        struct rte_memseg_list *msl;
        uint64_t max_mem, total_mem;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* no-huge does not need this at all */
-       if (internal_config.no_hugetlbfs)
+       if (internal_conf->no_hugetlbfs)
                return 0;
 
        /* FreeBSD has an issue where core dump will dump the entire memory
@@ -352,7 +358,7 @@ memseg_primary_init(void)
        total_mem = 0;
 
        /* create memseg lists */
-       for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+       for (hpi_idx = 0; hpi_idx < (int) internal_conf->num_hugepage_sizes;
                        hpi_idx++) {
                uint64_t max_type_mem, total_type_mem = 0;
                uint64_t avail_mem;
@@ -360,7 +366,7 @@ memseg_primary_init(void)
                struct hugepage_info *hpi;
                uint64_t hugepage_sz;
 
-               hpi = &internal_config.hugepage_info[hpi_idx];
+               hpi = &internal_conf->hugepage_info[hpi_idx];
                hugepage_sz = hpi->hugepage_sz;
 
                /* no NUMA support on FreeBSD */
index 180fc51..9939b3d 100644 (file)
@@ -45,6 +45,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_alarm.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_dev.c
 
 # from common dir
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_config.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_lcore.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_timer.c
 SRCS-$(CONFIG_RTE_EXEC_ENV_LINUX) += eal_common_memzone.c
index 28a8b78..e7068f1 100644 (file)
@@ -72,9 +72,6 @@
 /* Allow the application to print its usage message too if set */
 static rte_usage_hook_t        rte_application_usage_hook = NULL;
 
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
 /* define fd variable here, because file needs to be kept open for the
  * duration of the program, as we hold a write lock on it in the primary proc */
 static int mem_cfg_fd = -1;
@@ -83,26 +80,15 @@ static struct flock wr_lock = {
                .l_type = F_WRLCK,
                .l_whence = SEEK_SET,
                .l_start = offsetof(struct rte_mem_config, memsegs),
-               .l_len = sizeof(early_mem_config.memsegs),
-};
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
-               .mem_config = &early_mem_config,
+               .l_len = RTE_SIZEOF_FIELD(struct rte_mem_config, memsegs),
 };
 
 /* internal configuration (per-core) */
 struct lcore_config lcore_config[RTE_MAX_LCORE];
 
-/* internal configuration */
-struct internal_config internal_config;
-
 /* used by rte_rdtsc() */
 int rte_cycles_vmware_tsc_map;
 
-/* platform-specific runtime dir */
-static char runtime_dir[PATH_MAX];
-
 static const char *default_runtime_dir = "/var/run";
 
 int
@@ -111,6 +97,7 @@ eal_create_runtime_dir(void)
        const char *directory = default_runtime_dir;
        const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
        const char *fallback = "/tmp";
+       char run_dir[PATH_MAX];
        char tmp[PATH_MAX];
        int ret;
 
@@ -129,9 +116,9 @@ eal_create_runtime_dir(void)
        }
 
        /* create prefix-specific subdirectory under DPDK runtime dir */
-       ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
+       ret = snprintf(run_dir, sizeof(run_dir), "%s/%s",
                        tmp, eal_get_hugefile_prefix());
-       if (ret < 0 || ret == sizeof(runtime_dir)) {
+       if (ret < 0 || ret == sizeof(run_dir)) {
                RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
                return -1;
        }
@@ -146,19 +133,23 @@ eal_create_runtime_dir(void)
                return -1;
        }
 
-       ret = mkdir(runtime_dir, 0700);
+       ret = mkdir(run_dir, 0700);
        if (ret < 0 && errno != EEXIST) {
                RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
-                       runtime_dir, strerror(errno));
+                       run_dir, strerror(errno));
                return -1;
        }
 
+       if (eal_set_runtime_dir(run_dir, sizeof(run_dir)))
+               return -1;
+
        return 0;
 }
 
 int
 eal_clean_runtime_dir(void)
 {
+       const char *runtime_dir = rte_eal_get_runtime_dir();
        DIR *dir;
        struct dirent *dirent;
        int dir_fd, fd, lck_result;
@@ -241,32 +232,6 @@ error:
        return -1;
 }
 
-const char *
-rte_eal_get_runtime_dir(void)
-{
-       return runtime_dir;
-}
-
-/* Return user provided mbuf pool ops name */
-const char *
-rte_eal_mbuf_user_pool_ops(void)
-{
-       return internal_config.user_mbuf_pool_ops_name;
-}
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
-       return &rte_config;
-}
-
-enum rte_iova_mode
-rte_eal_iova_mode(void)
-{
-       return rte_eal_get_configuration()->iova_mode;
-}
-
 /* parse a sysfs (or other) file containing one integer value */
 int
 eal_parse_sysfs_value(const char *filename, unsigned long *val)
@@ -308,21 +273,24 @@ eal_parse_sysfs_value(const char *filename, unsigned long *val)
 static int
 rte_eal_config_create(void)
 {
+       struct rte_config *config = rte_eal_get_configuration();
        size_t page_sz = sysconf(_SC_PAGE_SIZE);
-       size_t cfg_len = sizeof(*rte_config.mem_config);
+       size_t cfg_len = sizeof(*config->mem_config);
        size_t cfg_len_aligned = RTE_ALIGN(cfg_len, page_sz);
        void *rte_mem_cfg_addr, *mapped_mem_cfg_addr;
        int retval;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        const char *pathname = eal_runtime_config_path();
 
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        /* map the config before hugepage address so that we don't waste a page */
-       if (internal_config.base_virtaddr != 0)
+       if (internal_conf->base_virtaddr != 0)
                rte_mem_cfg_addr = (void *)
-                       RTE_ALIGN_FLOOR(internal_config.base_virtaddr -
+                       RTE_ALIGN_FLOOR(internal_conf->base_virtaddr -
                        sizeof(struct rte_mem_config), page_sz);
        else
                rte_mem_cfg_addr = NULL;
@@ -376,14 +344,14 @@ rte_eal_config_create(void)
                return -1;
        }
 
-       memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
-       rte_config.mem_config = rte_mem_cfg_addr;
+       memcpy(rte_mem_cfg_addr, config->mem_config, sizeof(struct rte_mem_config));
+       config->mem_config = rte_mem_cfg_addr;
 
        /* store address of the config in the config itself so that secondary
-        * processes could later map the config into this exact location */
-       rte_config.mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
-
-       rte_config.mem_config->dma_maskbits = 0;
+        * processes could later map the config into this exact location
+        */
+       config->mem_config->mem_cfg_addr = (uintptr_t) rte_mem_cfg_addr;
+       config->mem_config->dma_maskbits = 0;
 
        return 0;
 }
@@ -392,11 +360,14 @@ rte_eal_config_create(void)
 static int
 rte_eal_config_attach(void)
 {
+       struct rte_config *config = rte_eal_get_configuration();
        struct rte_mem_config *mem_config;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        const char *pathname = eal_runtime_config_path();
 
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        if (mem_cfg_fd < 0){
@@ -419,7 +390,7 @@ rte_eal_config_attach(void)
                return -1;
        }
 
-       rte_config.mem_config = mem_config;
+       config->mem_config = mem_config;
 
        return 0;
 }
@@ -428,17 +399,21 @@ rte_eal_config_attach(void)
 static int
 rte_eal_config_reattach(void)
 {
+       struct rte_config *config = rte_eal_get_configuration();
        struct rte_mem_config *mem_config;
        void *rte_mem_cfg_addr;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
        /* save the address primary process has mapped shared config to */
-       rte_mem_cfg_addr = (void *) (uintptr_t) rte_config.mem_config->mem_cfg_addr;
+       rte_mem_cfg_addr =
+               (void *) (uintptr_t) config->mem_config->mem_cfg_addr;
 
        /* unmap original config */
-       munmap(rte_config.mem_config, sizeof(struct rte_mem_config));
+       munmap(config->mem_config, sizeof(struct rte_mem_config));
 
        /* remap the config at proper address */
        mem_config = (struct rte_mem_config *) mmap(rte_mem_cfg_addr,
@@ -462,7 +437,7 @@ rte_eal_config_reattach(void)
                return -1;
        }
 
-       rte_config.mem_config = mem_config;
+       config->mem_config = mem_config;
 
        return 0;
 }
@@ -473,9 +448,11 @@ eal_proc_type_detect(void)
 {
        enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
        const char *pathname = eal_runtime_config_path();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* if there no shared config, there can be no secondary processes */
-       if (!internal_config.no_shconf) {
+       if (!internal_conf->no_shconf) {
                /* if we can open the file but not get a write-lock we are a
                 * secondary process. NOTE: if we get a file handle back, we
                 * keep that open and don't close it to prevent a race condition
@@ -496,9 +473,13 @@ eal_proc_type_detect(void)
 static int
 rte_config_init(void)
 {
-       rte_config.process_type = internal_config.process_type;
+       struct rte_config *config = rte_eal_get_configuration();
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       switch (rte_config.process_type){
+       config->process_type = internal_conf->process_type;
+
+       switch (config->process_type) {
        case RTE_PROC_PRIMARY:
                if (rte_eal_config_create() < 0)
                        return -1;
@@ -519,7 +500,7 @@ rte_config_init(void)
        case RTE_PROC_AUTO:
        case RTE_PROC_INVALID:
                RTE_LOG(ERR, EAL, "Invalid process type %d\n",
-                       rte_config.process_type);
+                       config->process_type);
                return -1;
        }
 
@@ -531,17 +512,19 @@ static void
 eal_hugedirs_unlock(void)
 {
        int i;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
        {
                /* skip uninitialized */
-               if (internal_config.hugepage_info[i].lock_descriptor < 0)
+               if (internal_conf->hugepage_info[i].lock_descriptor < 0)
                        continue;
                /* unlock hugepage file */
-               flock(internal_config.hugepage_info[i].lock_descriptor, LOCK_UN);
-               close(internal_config.hugepage_info[i].lock_descriptor);
+               flock(internal_conf->hugepage_info[i].lock_descriptor, LOCK_UN);
+               close(internal_conf->hugepage_info[i].lock_descriptor);
                /* reset the field */
-               internal_config.hugepage_info[i].lock_descriptor = -1;
+               internal_conf->hugepage_info[i].lock_descriptor = -1;
        }
 }
 
@@ -630,6 +613,8 @@ eal_parse_socket_arg(char *strval, volatile uint64_t *socket_arg)
 static int
 eal_parse_vfio_intr(const char *mode)
 {
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
        unsigned i;
        static struct {
                const char *name;
@@ -642,7 +627,7 @@ eal_parse_vfio_intr(const char *mode)
 
        for (i = 0; i < RTE_DIM(map); i++) {
                if (!strcmp(mode, map[i].name)) {
-                       internal_config.vfio_intr_mode = map[i].value;
+                       internal_conf->vfio_intr_mode = map[i].value;
                        return 0;
                }
        }
@@ -659,6 +644,8 @@ eal_log_level_parse(int argc, char **argv)
        const int old_optind = optind;
        const int old_optopt = optopt;
        char * const old_optarg = optarg;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        argvopt = argv;
        optind = 1;
@@ -673,7 +660,7 @@ eal_log_level_parse(int argc, char **argv)
                        break;
 
                ret = (opt == OPT_LOG_LEVEL_NUM) ?
-                       eal_parse_common_option(opt, optarg, &internal_config) : 0;
+                       eal_parse_common_option(opt, optarg, internal_conf) : 0;
 
                /* common parser is not happy */
                if (ret < 0)
@@ -697,6 +684,8 @@ eal_parse_args(int argc, char **argv)
        const int old_optind = optind;
        const int old_optopt = optopt;
        char * const old_optarg = optarg;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        argvopt = argv;
        optind = 1;
@@ -711,7 +700,7 @@ eal_parse_args(int argc, char **argv)
                        goto out;
                }
 
-               ret = eal_parse_common_option(opt, optarg, &internal_config);
+               ret = eal_parse_common_option(opt, optarg, internal_conf);
                /* common parser is not happy */
                if (ret < 0) {
                        eal_usage(prgname);
@@ -734,9 +723,9 @@ eal_parse_args(int argc, char **argv)
                                RTE_LOG(ERR, EAL, "Could not store hugepage directory\n");
                        else {
                                /* free old hugepage dir */
-                               if (internal_config.hugepage_dir != NULL)
-                                       free(internal_config.hugepage_dir);
-                               internal_config.hugepage_dir = hdir;
+                               if (internal_conf->hugepage_dir != NULL)
+                                       free(internal_conf->hugepage_dir);
+                               internal_conf->hugepage_dir = hdir;
                        }
                        break;
                }
@@ -747,34 +736,34 @@ eal_parse_args(int argc, char **argv)
                                RTE_LOG(ERR, EAL, "Could not store file prefix\n");
                        else {
                                /* free old prefix */
-                               if (internal_config.hugefile_prefix != NULL)
-                                       free(internal_config.hugefile_prefix);
-                               internal_config.hugefile_prefix = prefix;
+                               if (internal_conf->hugefile_prefix != NULL)
+                                       free(internal_conf->hugefile_prefix);
+                               internal_conf->hugefile_prefix = prefix;
                        }
                        break;
                }
                case OPT_SOCKET_MEM_NUM:
                        if (eal_parse_socket_arg(optarg,
-                                       internal_config.socket_mem) < 0) {
+                                       internal_conf->socket_mem) < 0) {
                                RTE_LOG(ERR, EAL, "invalid parameters for --"
                                                OPT_SOCKET_MEM "\n");
                                eal_usage(prgname);
                                ret = -1;
                                goto out;
                        }
-                       internal_config.force_sockets = 1;
+                       internal_conf->force_sockets = 1;
                        break;
 
                case OPT_SOCKET_LIMIT_NUM:
                        if (eal_parse_socket_arg(optarg,
-                                       internal_config.socket_limit) < 0) {
+                                       internal_conf->socket_limit) < 0) {
                                RTE_LOG(ERR, EAL, "invalid parameters for --"
                                                OPT_SOCKET_LIMIT "\n");
                                eal_usage(prgname);
                                ret = -1;
                                goto out;
                        }
-                       internal_config.force_socket_limits = 1;
+                       internal_conf->force_socket_limits = 1;
                        break;
 
                case OPT_VFIO_INTR_NUM:
@@ -788,7 +777,7 @@ eal_parse_args(int argc, char **argv)
                        break;
 
                case OPT_CREATE_UIO_DEV_NUM:
-                       internal_config.create_uio_dev = 1;
+                       internal_conf->create_uio_dev = 1;
                        break;
 
                case OPT_MBUF_POOL_OPS_NAME_NUM:
@@ -798,17 +787,17 @@ eal_parse_args(int argc, char **argv)
                                RTE_LOG(ERR, EAL, "Could not store mbuf pool ops name\n");
                        else {
                                /* free old ops name */
-                               if (internal_config.user_mbuf_pool_ops_name !=
+                               if (internal_conf->user_mbuf_pool_ops_name !=
                                                NULL)
-                                       free(internal_config.user_mbuf_pool_ops_name);
+                                       free(internal_conf->user_mbuf_pool_ops_name);
 
-                               internal_config.user_mbuf_pool_ops_name =
+                               internal_conf->user_mbuf_pool_ops_name =
                                                ops_name;
                        }
                        break;
                }
                case OPT_MATCH_ALLOCATIONS_NUM:
-                       internal_config.match_allocations = 1;
+                       internal_conf->match_allocations = 1;
                        break;
 
                default:
@@ -831,20 +820,20 @@ eal_parse_args(int argc, char **argv)
        }
 
        /* create runtime data directory */
-       if (internal_config.no_shconf == 0 &&
+       if (internal_conf->no_shconf == 0 &&
                        eal_create_runtime_dir() < 0) {
                RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
                ret = -1;
                goto out;
        }
 
-       if (eal_adjust_config(&internal_config) != 0) {
+       if (eal_adjust_config(internal_conf) != 0) {
                ret = -1;
                goto out;
        }
 
        /* sanity checks */
-       if (eal_check_common_options(&internal_config) != 0) {
+       if (eal_check_common_options(internal_conf) != 0) {
                eal_usage(prgname);
                ret = -1;
                goto out;
@@ -878,8 +867,9 @@ static void
 eal_check_mem_on_local_socket(void)
 {
        int socket_id;
+       const struct rte_config *config = rte_eal_get_configuration();
 
-       socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
+       socket_id = rte_lcore_to_socket_id(config->master_lcore);
 
        if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
                RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
@@ -961,6 +951,9 @@ rte_eal_init(int argc, char **argv)
        char cpuset[RTE_CPU_AFFINITY_STR_LEN];
        char thread_name[RTE_MAX_THREAD_NAME_LEN];
        bool phys_addrs;
+       const struct rte_config *config = rte_eal_get_configuration();
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* checks if the machine is adequate */
        if (!rte_cpu_is_supported()) {
@@ -979,7 +972,7 @@ rte_eal_init(int argc, char **argv)
        strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
        thread_id = pthread_self();
 
-       eal_reset_internal_config(&internal_config);
+       eal_reset_internal_config(internal_conf);
 
        /* set log level as early as possible */
        eal_log_level_parse(argc, argv);
@@ -1063,7 +1056,7 @@ rte_eal_init(int argc, char **argv)
        phys_addrs = rte_eal_using_phys_addrs() != 0;
 
        /* if no EAL option "--iova-mode=<pa|va>", use bus IOVA scheme */
-       if (internal_config.iova_mode == RTE_IOVA_DC) {
+       if (internal_conf->iova_mode == RTE_IOVA_DC) {
                /* autodetect the IOVA mapping mode */
                enum rte_iova_mode iova_mode = rte_bus_get_iommu_class();
 
@@ -1110,7 +1103,7 @@ rte_eal_init(int argc, char **argv)
                rte_eal_get_configuration()->iova_mode = iova_mode;
        } else {
                rte_eal_get_configuration()->iova_mode =
-                       internal_config.iova_mode;
+                       internal_conf->iova_mode;
        }
 
        if (rte_eal_iova_mode() == RTE_IOVA_PA && !phys_addrs) {
@@ -1122,9 +1115,9 @@ rte_eal_init(int argc, char **argv)
        RTE_LOG(INFO, EAL, "Selected IOVA mode '%s'\n",
                rte_eal_iova_mode() == RTE_IOVA_PA ? "PA" : "VA");
 
-       if (internal_config.no_hugetlbfs == 0) {
+       if (internal_conf->no_hugetlbfs == 0) {
                /* rte_config isn't initialized yet */
-               ret = internal_config.process_type == RTE_PROC_PRIMARY ?
+               ret = internal_conf->process_type == RTE_PROC_PRIMARY ?
                                eal_hugepage_info_init() :
                                eal_hugepage_info_read();
                if (ret < 0) {
@@ -1135,12 +1128,12 @@ rte_eal_init(int argc, char **argv)
                }
        }
 
-       if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
-               if (internal_config.no_hugetlbfs)
-                       internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+       if (internal_conf->memory == 0 && internal_conf->force_sockets == 0) {
+               if (internal_conf->no_hugetlbfs)
+                       internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
        }
 
-       if (internal_config.vmware_tsc_map == 1) {
+       if (internal_conf->vmware_tsc_map == 1) {
 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
                rte_cycles_vmware_tsc_map = 1;
                RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
@@ -1151,7 +1144,7 @@ rte_eal_init(int argc, char **argv)
 #endif
        }
 
-       if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0) {
+       if (rte_eal_log_init(logid, internal_conf->syslog_facility) < 0) {
                rte_eal_init_alert("Cannot init logging.");
                rte_errno = ENOMEM;
                rte_atomic32_clear(&run_once);
@@ -1205,12 +1198,12 @@ rte_eal_init(int argc, char **argv)
 
        eal_check_mem_on_local_socket();
 
-       eal_thread_init_master(rte_config.master_lcore);
+       eal_thread_init_master(config->master_lcore);
 
        ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
 
        RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
-               rte_config.master_lcore, (uintptr_t)thread_id, cpuset,
+               config->master_lcore, (uintptr_t)thread_id, cpuset,
                ret == 0 ? "" : "...");
 
        RTE_LCORE_FOREACH_SLAVE(i) {
@@ -1289,14 +1282,14 @@ rte_eal_init(int argc, char **argv)
         * In no_shconf mode, no runtime directory is created in the first
         * place, so no cleanup needed.
         */
-       if (!internal_config.no_shconf && eal_clean_runtime_dir() < 0) {
+       if (!internal_conf->no_shconf && eal_clean_runtime_dir() < 0) {
                rte_eal_init_alert("Cannot clear runtime directory");
                return -1;
        }
-       if (!internal_config.no_telemetry) {
+       if (!internal_conf->no_telemetry) {
                const char *error_str = NULL;
                if (rte_telemetry_init(rte_eal_get_runtime_dir(),
-                               &internal_config.ctrl_cpuset, &error_str)
+                               &internal_conf->ctrl_cpuset, &error_str)
                                != 0) {
                        rte_eal_init_alert(error_str);
                        return -1;
@@ -1333,41 +1326,34 @@ rte_eal_cleanup(void)
        /* if we're in a primary process, we need to mark hugepages as freeable
         * so that finalization can release them back to the system.
         */
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
                rte_memseg_walk(mark_freeable, NULL);
        rte_service_finalize();
        rte_mp_channel_cleanup();
        rte_trace_save();
        eal_trace_fini();
-       eal_cleanup_config(&internal_config);
+       eal_cleanup_config(internal_conf);
        return 0;
 }
 
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
-       return rte_config.process_type;
-}
-
-int rte_eal_has_hugepages(void)
-{
-       return ! internal_config.no_hugetlbfs;
-}
-
-int rte_eal_has_pci(void)
-{
-       return !internal_config.no_pci;
-}
-
 int rte_eal_create_uio_dev(void)
 {
-       return internal_config.create_uio_dev;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       return internal_conf->create_uio_dev;
 }
 
 enum rte_intr_mode
 rte_eal_vfio_intr_mode(void)
 {
-       return internal_config.vfio_intr_mode;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       return internal_conf->vfio_intr_mode;
 }
 
 int
index 91a4fed..4191af6 100644 (file)
@@ -30,6 +30,8 @@
 #include <rte_log.h>
 #include <rte_common.h>
 #include "rte_string_fns.h"
+
+#include "eal_private.h"
 #include "eal_internal_cfg.h"
 #include "eal_hugepages.h"
 #include "eal_filesystem.h"
@@ -213,6 +215,8 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len)
        char *splitstr[_FIELDNAME_MAX];
        char buf[BUFSIZ];
        int retval = -1;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        FILE *fd = fopen(proc_mounts, "r");
        if (fd == NULL)
@@ -229,8 +233,8 @@ get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len)
                }
 
                /* we have a specified --huge-dir option, only examine that dir */
-               if (internal_config.hugepage_dir != NULL &&
-                               strcmp(splitstr[MOUNTPT], internal_config.hugepage_dir) != 0)
+               if (internal_conf->hugepage_dir != NULL &&
+                               strcmp(splitstr[MOUNTPT], internal_conf->hugepage_dir) != 0)
                        continue;
 
                if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 0){
@@ -342,6 +346,8 @@ calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
 {
        uint64_t total_pages = 0;
        unsigned int i;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /*
         * first, try to put all hugepages into relevant sockets, but
@@ -350,7 +356,7 @@ calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
         */
        total_pages = 0;
        /* we also don't want to do this for legacy init */
-       if (!internal_config.legacy_mem)
+       if (!internal_conf->legacy_mem)
                for (i = 0; i < rte_socket_count(); i++) {
                        int socket = rte_socket_id_by_idx(i);
                        unsigned int num_pages =
@@ -382,6 +388,8 @@ hugepage_info_init(void)
        unsigned int i, num_sizes = 0;
        DIR *dir;
        struct dirent *dirent;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        dir = opendir(sys_dir_path);
        if (dir == NULL) {
@@ -401,7 +409,7 @@ hugepage_info_init(void)
                if (num_sizes >= MAX_HUGEPAGE_SIZES)
                        break;
 
-               hpi = &internal_config.hugepage_info[num_sizes];
+               hpi = &internal_conf->hugepage_info[num_sizes];
                hpi->hugepage_sz =
                        rte_str_to_size(&dirent->d_name[dirent_start_len]);
 
@@ -424,7 +432,7 @@ hugepage_info_init(void)
                         * init process.
                         */
 #ifdef MAP_HUGE_SHIFT
-                       if (internal_config.in_memory) {
+                       if (internal_conf->in_memory) {
                                RTE_LOG(DEBUG, EAL, "In-memory mode enabled, "
                                        "hugepages of size %" PRIu64 " bytes "
                                        "will be allocated anonymously\n",
@@ -459,17 +467,17 @@ hugepage_info_init(void)
        if (dirent != NULL)
                return -1;
 
-       internal_config.num_hugepage_sizes = num_sizes;
+       internal_conf->num_hugepage_sizes = num_sizes;
 
        /* sort the page directory entries by size, largest to smallest */
-       qsort(&internal_config.hugepage_info[0], num_sizes,
-             sizeof(internal_config.hugepage_info[0]), compare_hpi);
+       qsort(&internal_conf->hugepage_info[0], num_sizes,
+             sizeof(internal_conf->hugepage_info[0]), compare_hpi);
 
        /* now we have all info, check we have at least one valid size */
        for (i = 0; i < num_sizes; i++) {
                /* pages may no longer all be on socket 0, so check all */
                unsigned int j, num_pages = 0;
-               struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+               struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
 
                for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
                        num_pages += hpi->num_pages[j];
@@ -491,34 +499,36 @@ eal_hugepage_info_init(void)
 {
        struct hugepage_info *hpi, *tmp_hpi;
        unsigned int i;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (hugepage_info_init() < 0)
                return -1;
 
        /* for no shared files mode, we're done */
-       if (internal_config.no_shconf)
+       if (internal_conf->no_shconf)
                return 0;
 
-       hpi = &internal_config.hugepage_info[0];
+       hpi = &internal_conf->hugepage_info[0];
 
        tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
-                       sizeof(internal_config.hugepage_info));
+                       sizeof(internal_conf->hugepage_info));
        if (tmp_hpi == NULL) {
                RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
                return -1;
        }
 
-       memcpy(tmp_hpi, hpi, sizeof(internal_config.hugepage_info));
+       memcpy(tmp_hpi, hpi, sizeof(internal_conf->hugepage_info));
 
        /* we've copied file descriptors along with everything else, but they
         * will be invalid in secondary process, so overwrite them
         */
-       for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+       for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
                struct hugepage_info *tmp = &tmp_hpi[i];
                tmp->lock_descriptor = -1;
        }
 
-       if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+       if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
                RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
                return -1;
        }
@@ -527,19 +537,21 @@ eal_hugepage_info_init(void)
 
 int eal_hugepage_info_read(void)
 {
-       struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+       struct hugepage_info *hpi = &internal_conf->hugepage_info[0];
        struct hugepage_info *tmp_hpi;
 
        tmp_hpi = open_shared_memory(eal_hugepage_info_path(),
-                                 sizeof(internal_config.hugepage_info));
+                                 sizeof(internal_conf->hugepage_info));
        if (tmp_hpi == NULL) {
                RTE_LOG(ERR, EAL, "Failed to open shared memory!\n");
                return -1;
        }
 
-       memcpy(hpi, tmp_hpi, sizeof(internal_config.hugepage_info));
+       memcpy(hpi, tmp_hpi, sizeof(internal_conf->hugepage_info));
 
-       if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+       if (munmap(tmp_hpi, sizeof(internal_conf->hugepage_info)) < 0) {
                RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
                return -1;
        }
index bf29b83..db60e79 100644 (file)
@@ -249,8 +249,10 @@ get_seg_memfd(struct hugepage_info *hi __rte_unused,
        char segname[250]; /* as per manpage, limit is 249 bytes plus null */
 
        int flags = RTE_MFD_HUGETLB | pagesz_flags(hi->hugepage_sz);
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.single_file_segments) {
+       if (internal_conf->single_file_segments) {
                fd = fd_list[list_idx].memseg_list_fd;
 
                if (fd < 0) {
@@ -288,14 +290,16 @@ get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
                unsigned int list_idx, unsigned int seg_idx)
 {
        int fd;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* for in-memory mode, we only make it here when we're sure we support
         * memfd, and this is a special case.
         */
-       if (internal_config.in_memory)
+       if (internal_conf->in_memory)
                return get_seg_memfd(hi, list_idx, seg_idx);
 
-       if (internal_config.single_file_segments) {
+       if (internal_conf->single_file_segments) {
                /* create a hugepage file path */
                eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
 
@@ -439,11 +443,13 @@ resize_hugefile_in_filesystem(int fd, uint64_t fa_offset, uint64_t page_sz,
 static void
 close_hugefile(int fd, char *path, int list_idx)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
        /*
         * primary process must unlink the file, but only when not in in-memory
         * mode (as in that case there is no file to unlink).
         */
-       if (!internal_config.in_memory &&
+       if (!internal_conf->in_memory &&
                        rte_eal_process_type() == RTE_PROC_PRIMARY &&
                        unlink(path))
                RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
@@ -459,7 +465,10 @@ resize_hugefile(int fd, uint64_t fa_offset, uint64_t page_sz, bool grow)
        /* in-memory mode is a special case, because we can be sure that
         * fallocate() is supported.
         */
-       if (internal_config.in_memory)
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       if (internal_conf->in_memory)
                return resize_hugefile_in_memory(fd, fa_offset,
                                page_sz, grow);
 
@@ -484,16 +493,18 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
        size_t alloc_sz;
        int flags;
        void *new_addr;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        alloc_sz = hi->hugepage_sz;
 
        /* these are checked at init, but code analyzers don't know that */
-       if (internal_config.in_memory && !anonymous_hugepages_supported) {
+       if (internal_conf->in_memory && !anonymous_hugepages_supported) {
                RTE_LOG(ERR, EAL, "Anonymous hugepages not supported, in-memory mode cannot allocate memory\n");
                return -1;
        }
-       if (internal_config.in_memory && !memfd_create_supported &&
-                       internal_config.single_file_segments) {
+       if (internal_conf->in_memory && !memfd_create_supported &&
+                       internal_conf->single_file_segments) {
                RTE_LOG(ERR, EAL, "Single-file segments are not supported without memfd support\n");
                return -1;
        }
@@ -501,7 +512,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
        /* in-memory without memfd is a special case */
        int mmap_flags;
 
-       if (internal_config.in_memory && !memfd_create_supported) {
+       if (internal_conf->in_memory && !memfd_create_supported) {
                const int in_memory_flags = MAP_HUGETLB | MAP_FIXED |
                                MAP_PRIVATE | MAP_ANONYMOUS;
                int pagesz_flag;
@@ -524,7 +535,7 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
                        return -1;
                }
 
-               if (internal_config.single_file_segments) {
+               if (internal_conf->single_file_segments) {
                        map_offset = seg_idx * alloc_sz;
                        ret = resize_hugefile(fd, map_offset, alloc_sz, true);
                        if (ret < 0)
@@ -538,8 +549,8 @@ alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
                                        __func__, strerror(errno));
                                goto resized;
                        }
-                       if (internal_config.hugepage_unlink &&
-                                       !internal_config.in_memory) {
+                       if (internal_conf->hugepage_unlink &&
+                                       !internal_conf->in_memory) {
                                if (unlink(path)) {
                                        RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
                                                __func__, strerror(errno));
@@ -642,14 +653,14 @@ unmapped:
                RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
        }
        /* roll back the ref count */
-       if (internal_config.single_file_segments)
+       if (internal_conf->single_file_segments)
                fd_list[list_idx].count--;
 resized:
        /* some codepaths will return negative fd, so exit early */
        if (fd < 0)
                return -1;
 
-       if (internal_config.single_file_segments) {
+       if (internal_conf->single_file_segments) {
                resize_hugefile(fd, map_offset, alloc_sz, false);
                /* ignore failure, can't make it any worse */
 
@@ -658,8 +669,8 @@ resized:
                        close_hugefile(fd, path, list_idx);
        } else {
                /* only remove file if we can take out a write lock */
-               if (internal_config.hugepage_unlink == 0 &&
-                               internal_config.in_memory == 0 &&
+               if (internal_conf->hugepage_unlink == 0 &&
+                               internal_conf->in_memory == 0 &&
                                lock(fd, LOCK_EX) == 1)
                        unlink(path);
                close(fd);
@@ -676,6 +687,8 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
        char path[PATH_MAX];
        int fd, ret = 0;
        bool exit_early;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* erase page data */
        memset(ms->addr, 0, ms->len);
@@ -692,11 +705,11 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
        exit_early = false;
 
        /* if we're using anonymous hugepages, nothing to be done */
-       if (internal_config.in_memory && !memfd_create_supported)
+       if (internal_conf->in_memory && !memfd_create_supported)
                exit_early = true;
 
        /* if we've already unlinked the page, nothing needs to be done */
-       if (!internal_config.in_memory && internal_config.hugepage_unlink)
+       if (!internal_conf->in_memory && internal_conf->hugepage_unlink)
                exit_early = true;
 
        if (exit_early) {
@@ -712,7 +725,7 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
        if (fd < 0)
                return -1;
 
-       if (internal_config.single_file_segments) {
+       if (internal_conf->single_file_segments) {
                map_offset = seg_idx * ms->len;
                if (resize_hugefile(fd, map_offset, ms->len, false))
                        return -1;
@@ -725,7 +738,7 @@ free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
                /* if we're able to take out a write lock, we're the last one
                 * holding onto this page.
                 */
-               if (!internal_config.in_memory) {
+               if (!internal_conf->in_memory) {
                        ret = lock(fd, LOCK_EX);
                        if (ret >= 0) {
                                /* no one else is using this page */
@@ -761,6 +774,8 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
        size_t page_sz;
        int cur_idx, start_idx, j, dir_fd = -1;
        unsigned int msl_idx, need, i;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (msl->page_sz != wa->page_sz)
                return 0;
@@ -810,7 +825,7 @@ alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
         * during init, we already hold a write lock, so don't try to take out
         * another one.
         */
-       if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+       if (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {
                dir_fd = open(wa->hi->hugedir, O_RDONLY);
                if (dir_fd < 0) {
                        RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
@@ -893,6 +908,8 @@ free_seg_walk(const struct rte_memseg_list *msl, void *arg)
        struct free_walk_param *wa = arg;
        uintptr_t start_addr, end_addr;
        int msl_idx, seg_idx, ret, dir_fd = -1;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        start_addr = (uintptr_t) msl->base_va;
        end_addr = start_addr + msl->len;
@@ -915,7 +932,7 @@ free_seg_walk(const struct rte_memseg_list *msl, void *arg)
         * during init, we already hold a write lock, so don't try to take out
         * another one.
         */
-       if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+       if (wa->hi->lock_descriptor == -1 && !internal_conf->in_memory) {
                dir_fd = open(wa->hi->hugedir, O_RDONLY);
                if (dir_fd < 0) {
                        RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
@@ -958,17 +975,19 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
 #endif
        struct alloc_walk_param wa;
        struct hugepage_info *hi = NULL;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        memset(&wa, 0, sizeof(wa));
 
        /* dynamic allocation not supported in legacy mode */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                return -1;
 
-       for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
+       for (i = 0; i < (int) RTE_DIM(internal_conf->hugepage_info); i++) {
                if (page_sz ==
-                               internal_config.hugepage_info[i].hugepage_sz) {
-                       hi = &internal_config.hugepage_info[i];
+                               internal_conf->hugepage_info[i].hugepage_sz) {
+                       hi = &internal_conf->hugepage_info[i];
                        break;
                }
        }
@@ -1025,9 +1044,11 @@ int
 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
 {
        int seg, ret = 0;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* dynamic free not supported in legacy mode */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                return -1;
 
        for (seg = 0; seg < n_segs; seg++) {
@@ -1045,13 +1066,13 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
 
                memset(&wa, 0, sizeof(wa));
 
-               for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
+               for (i = 0; i < (int)RTE_DIM(internal_conf->hugepage_info);
                                i++) {
-                       hi = &internal_config.hugepage_info[i];
+                       hi = &internal_conf->hugepage_info[i];
                        if (cur->hugepage_sz == hi->hugepage_sz)
                                break;
                }
-               if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
+               if (i == (int)RTE_DIM(internal_conf->hugepage_info)) {
                        RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
                        ret = -1;
                        continue;
@@ -1076,8 +1097,11 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
 int
 eal_memalloc_free_seg(struct rte_memseg *ms)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* dynamic free not supported in legacy mode */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                return -1;
 
        return eal_memalloc_free_seg_bulk(&ms, 1);
@@ -1316,6 +1340,8 @@ sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
        struct hugepage_info *hi = NULL;
        unsigned int i;
        int msl_idx;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (msl->external)
                return 0;
@@ -1324,12 +1350,12 @@ sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
        primary_msl = &mcfg->memsegs[msl_idx];
        local_msl = &local_memsegs[msl_idx];
 
-       for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+       for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
                uint64_t cur_sz =
-                       internal_config.hugepage_info[i].hugepage_sz;
+                       internal_conf->hugepage_info[i].hugepage_sz;
                uint64_t msl_sz = primary_msl->page_sz;
                if (msl_sz == cur_sz) {
-                       hi = &internal_config.hugepage_info[i];
+                       hi = &internal_conf->hugepage_info[i];
                        break;
                }
        }
@@ -1397,9 +1423,11 @@ alloc_list(int list_idx, int len)
 {
        int *data;
        int i;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* single-file segments mode does not need fd list */
-       if (!internal_config.single_file_segments) {
+       if (!internal_conf->single_file_segments) {
                /* ensure we have space to store fd per each possible segment */
                data = malloc(sizeof(int) * len);
                if (data == NULL) {
@@ -1443,9 +1471,11 @@ int
 eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* single file segments mode doesn't support individual segment fd's */
-       if (internal_config.single_file_segments)
+       if (internal_conf->single_file_segments)
                return -ENOTSUP;
 
        /* if list is not allocated, allocate it */
@@ -1463,8 +1493,11 @@ eal_memalloc_set_seg_fd(int list_idx, int seg_idx, int fd)
 int
 eal_memalloc_set_seg_list_fd(int list_idx, int fd)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        /* non-single file segment mode doesn't support segment list fd's */
-       if (!internal_config.single_file_segments)
+       if (!internal_conf->single_file_segments)
                return -ENOTSUP;
 
        fd_list[list_idx].memseg_list_fd = fd;
@@ -1476,18 +1509,20 @@ int
 eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
 {
        int fd;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.in_memory || internal_config.no_hugetlbfs) {
+       if (internal_conf->in_memory || internal_conf->no_hugetlbfs) {
 #ifndef MEMFD_SUPPORTED
                /* in in-memory or no-huge mode, we rely on memfd support */
                return -ENOTSUP;
 #endif
                /* memfd supported, but hugetlbfs memfd may not be */
-               if (!internal_config.no_hugetlbfs && !memfd_create_supported)
+               if (!internal_conf->no_hugetlbfs && !memfd_create_supported)
                        return -ENOTSUP;
        }
 
-       if (internal_config.single_file_segments) {
+       if (internal_conf->single_file_segments) {
                fd = fd_list[list_idx].memseg_list_fd;
        } else if (fd_list[list_idx].len == 0) {
                /* list not initialized */
@@ -1504,9 +1539,11 @@ static int
 test_memfd_create(void)
 {
 #ifdef MEMFD_SUPPORTED
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
        unsigned int i;
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
-               uint64_t pagesz = internal_config.hugepage_info[i].hugepage_sz;
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+               uint64_t pagesz = internal_conf->hugepage_info[i].hugepage_sz;
                int pagesz_flag = pagesz_flags(pagesz);
                int flags;
 
@@ -1533,18 +1570,20 @@ int
 eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
 {
        struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.in_memory || internal_config.no_hugetlbfs) {
+       if (internal_conf->in_memory || internal_conf->no_hugetlbfs) {
 #ifndef MEMFD_SUPPORTED
                /* in in-memory or no-huge mode, we rely on memfd support */
                return -ENOTSUP;
 #endif
                /* memfd supported, but hugetlbfs memfd may not be */
-               if (!internal_config.no_hugetlbfs && !memfd_create_supported)
+               if (!internal_conf->no_hugetlbfs && !memfd_create_supported)
                        return -ENOTSUP;
        }
 
-       if (internal_config.single_file_segments) {
+       if (internal_conf->single_file_segments) {
                size_t pgsz = mcfg->memsegs[list_idx].page_sz;
 
                /* segment not active? */
@@ -1567,11 +1606,14 @@ eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
 int
 eal_memalloc_init(void)
 {
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
        if (rte_eal_process_type() == RTE_PROC_SECONDARY)
                if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
                        return -1;
        if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
-                       internal_config.in_memory) {
+                       internal_conf->in_memory) {
                int mfd_res = test_memfd_create();
 
                if (mfd_res < 0) {
@@ -1587,7 +1629,7 @@ eal_memalloc_init(void)
                 * if we support hugetlbfs with memfd_create. this code will
                 * test if we do.
                 */
-               if (internal_config.single_file_segments &&
+               if (internal_conf->single_file_segments &&
                                mfd_res != 1) {
                        RTE_LOG(ERR, EAL, "Single-file segments mode cannot be used without memfd support\n");
                        return -1;
index 5986dab..8972529 100644 (file)
@@ -267,6 +267,8 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
        struct bitmask *oldmask = NULL;
        bool have_numa = true;
        unsigned long maxnode = 0;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* Check if kernel supports NUMA. */
        if (numa_available() != 0) {
@@ -285,7 +287,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
                        oldpolicy = MPOL_DEFAULT;
                }
                for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
-                       if (internal_config.socket_mem[i])
+                       if (internal_conf->socket_mem[i])
                                maxnode = i + 1;
        }
 #endif
@@ -304,7 +306,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
 
                        if (j == maxnode) {
                                node_id = (node_id + 1) % maxnode;
-                               while (!internal_config.socket_mem[node_id]) {
+                               while (!internal_conf->socket_mem[node_id]) {
                                        node_id++;
                                        node_id %= maxnode;
                                }
@@ -525,9 +527,11 @@ create_shared_memory(const char *filename, const size_t mem_size)
 {
        void *retval;
        int fd;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* if no shared files mode is used, create anonymous memory instead */
-       if (internal_config.no_shconf) {
+       if (internal_conf->no_shconf) {
                retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
                                MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
                if (retval == MAP_FAILED)
@@ -577,12 +581,14 @@ unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
 {
        unsigned socket, size;
        int page, nrpages = 0;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* get total number of hugepages */
        for (size = 0; size < num_hp_info; size++)
                for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
                        nrpages +=
-                       internal_config.hugepage_info[size].num_pages[socket];
+                       internal_conf->hugepage_info[size].num_pages[socket];
 
        for (page = 0; page < nrpages; page++) {
                struct hugepage_file *hp = &hugepg_tbl[page];
@@ -606,11 +612,13 @@ unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
 {
        unsigned socket, size;
        int page, nrpages = 0;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* get total number of hugepages */
        for (size = 0; size < num_hp_info; size++)
                for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
-                       nrpages += internal_config.hugepage_info[size].num_pages[socket];
+                       nrpages += internal_conf->hugepage_info[size].num_pages[socket];
 
        for (size = 0; size < num_hp_info; size++) {
                for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
@@ -665,7 +673,10 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
        uint64_t page_sz;
        size_t memseg_len;
        int socket_id;
-
+#ifndef RTE_ARCH_64
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+#endif
        page_sz = hugepages[seg_start].size;
        socket_id = hugepages[seg_start].socket_id;
        seg_len = seg_end - seg_start;
@@ -750,7 +761,7 @@ remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
                /* we have a new address, so unmap previous one */
 #ifndef RTE_ARCH_64
                /* in 32-bit legacy mode, we have already unmapped the page */
-               if (!internal_config.legacy_mem)
+               if (!internal_conf->legacy_mem)
                        munmap(hfile->orig_va, page_sz);
 #else
                munmap(hfile->orig_va, page_sz);
@@ -828,6 +839,8 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages)
        unsigned int hpi_idx, socket, i;
        int n_contig_segs, n_segs;
        int msl_idx;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* before we preallocate segments, we need to free up our VA space.
         * we're not removing files, and we already have information about
@@ -842,10 +855,10 @@ prealloc_segments(struct hugepage_file *hugepages, int n_pages)
        /* we cannot know how many page sizes and sockets we have discovered, so
         * loop over all of them
         */
-       for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes;
+       for (hpi_idx = 0; hpi_idx < internal_conf->num_hugepage_sizes;
                        hpi_idx++) {
                uint64_t page_sz =
-                       internal_config.hugepage_info[hpi_idx].hugepage_sz;
+                       internal_conf->hugepage_info[hpi_idx].hugepage_sz;
 
                for (i = 0; i < rte_socket_count(); i++) {
                        struct rte_memseg_list *msl;
@@ -1039,9 +1052,11 @@ eal_get_hugepage_mem_size(void)
 {
        uint64_t size = 0;
        unsigned i, j;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
-               struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+               struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
                if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
                        for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
                                size += hpi->hugepage_sz * hpi->num_pages[j];
@@ -1096,6 +1111,8 @@ eal_legacy_hugepage_init(void)
        struct rte_mem_config *mcfg;
        struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
        struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        uint64_t memory[RTE_MAX_NUMA_NODES];
 
@@ -1110,7 +1127,7 @@ eal_legacy_hugepage_init(void)
        mcfg = rte_eal_get_configuration()->mem_config;
 
        /* hugetlbfs can be disabled */
-       if (internal_config.no_hugetlbfs) {
+       if (internal_conf->no_hugetlbfs) {
                void *prealloc_addr;
                size_t mem_sz;
                struct rte_memseg_list *msl;
@@ -1121,15 +1138,15 @@ eal_legacy_hugepage_init(void)
                uint64_t page_sz;
 
                /* nohuge mode is legacy mode */
-               internal_config.legacy_mem = 1;
+               internal_conf->legacy_mem = 1;
 
                /* nohuge mode is single-file segments mode */
-               internal_config.single_file_segments = 1;
+               internal_conf->single_file_segments = 1;
 
                /* create a memseg list */
                msl = &mcfg->memsegs[0];
 
-               mem_sz = internal_config.memory;
+               mem_sz = internal_conf->memory;
                page_sz = RTE_PGSIZE_4K;
                n_segs = mem_sz / page_sz;
 
@@ -1151,7 +1168,7 @@ eal_legacy_hugepage_init(void)
                        RTE_LOG(DEBUG, EAL, "Falling back to anonymous map\n");
                } else {
                        /* we got an fd - now resize it */
-                       if (ftruncate(memfd, internal_config.memory) < 0) {
+                       if (ftruncate(memfd, internal_conf->memory) < 0) {
                                RTE_LOG(ERR, EAL, "Cannot resize memfd: %s\n",
                                                strerror(errno));
                                RTE_LOG(ERR, EAL, "Falling back to anonymous map\n");
@@ -1215,11 +1232,11 @@ eal_legacy_hugepage_init(void)
 
        /* calculate total number of hugepages available. at this point we haven't
         * yet started sorting them so they all are on socket 0 */
-       for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+       for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
                /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
-               used_hp[i].hugepage_sz = internal_config.hugepage_info[i].hugepage_sz;
+               used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
 
-               nr_hugepages += internal_config.hugepage_info[i].num_pages[0];
+               nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
        }
 
        /*
@@ -1240,10 +1257,10 @@ eal_legacy_hugepage_init(void)
 
        /* make a copy of socket_mem, needed for balanced allocation. */
        for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
-               memory[i] = internal_config.socket_mem[i];
+               memory[i] = internal_conf->socket_mem[i];
 
        /* map all hugepages and sort them */
-       for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+       for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
                unsigned pages_old, pages_new;
                struct hugepage_info *hpi;
 
@@ -1252,7 +1269,7 @@ eal_legacy_hugepage_init(void)
                 * we just map all hugepages available to the system
                 * all hugepages are still located on socket 0
                 */
-               hpi = &internal_config.hugepage_info[i];
+               hpi = &internal_conf->hugepage_info[i];
 
                if (hpi->num_pages[0] == 0)
                        continue;
@@ -1308,16 +1325,16 @@ eal_legacy_hugepage_init(void)
 
        huge_recover_sigbus();
 
-       if (internal_config.memory == 0 && internal_config.force_sockets == 0)
-               internal_config.memory = eal_get_hugepage_mem_size();
+       if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
+               internal_conf->memory = eal_get_hugepage_mem_size();
 
        nr_hugefiles = nr_hugepages;
 
 
        /* clean out the numbers of pages */
-       for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++)
+       for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++)
                for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
-                       internal_config.hugepage_info[i].num_pages[j] = 0;
+                       internal_conf->hugepage_info[i].num_pages[j] = 0;
 
        /* get hugepages for each socket */
        for (i = 0; i < nr_hugefiles; i++) {
@@ -1325,30 +1342,30 @@ eal_legacy_hugepage_init(void)
 
                /* find a hugepage info with right size and increment num_pages */
                const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
-                               (int)internal_config.num_hugepage_sizes);
+                               (int)internal_conf->num_hugepage_sizes);
                for (j = 0; j < nb_hpsizes; j++) {
                        if (tmp_hp[i].size ==
-                                       internal_config.hugepage_info[j].hugepage_sz) {
-                               internal_config.hugepage_info[j].num_pages[socket]++;
+                                       internal_conf->hugepage_info[j].hugepage_sz) {
+                               internal_conf->hugepage_info[j].num_pages[socket]++;
                        }
                }
        }
 
        /* make a copy of socket_mem, needed for number of pages calculation */
        for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
-               memory[i] = internal_config.socket_mem[i];
+               memory[i] = internal_conf->socket_mem[i];
 
        /* calculate final number of pages */
        nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
-                       internal_config.hugepage_info, used_hp,
-                       internal_config.num_hugepage_sizes);
+                       internal_conf->hugepage_info, used_hp,
+                       internal_conf->num_hugepage_sizes);
 
        /* error if not enough memory available */
        if (nr_hugepages < 0)
                goto fail;
 
        /* reporting in! */
-       for (i = 0; i < (int) internal_config.num_hugepage_sizes; i++) {
+       for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
                for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
                        if (used_hp[i].num_pages[j] > 0) {
                                RTE_LOG(DEBUG, EAL,
@@ -1377,7 +1394,7 @@ eal_legacy_hugepage_init(void)
         * also, sets final_va to NULL on pages that were unmapped.
         */
        if (unmap_unneeded_hugepages(tmp_hp, used_hp,
-                       internal_config.num_hugepage_sizes) < 0) {
+                       internal_conf->num_hugepage_sizes) < 0) {
                RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
                goto fail;
        }
@@ -1395,7 +1412,7 @@ eal_legacy_hugepage_init(void)
 
 #ifndef RTE_ARCH_64
        /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
-       if (internal_config.legacy_mem &&
+       if (internal_conf->legacy_mem &&
                        prealloc_segments(hugepage, nr_hugefiles)) {
                RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
                goto fail;
@@ -1411,8 +1428,8 @@ eal_legacy_hugepage_init(void)
        }
 
        /* free the hugepage backing files */
-       if (internal_config.hugepage_unlink &&
-               unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
+       if (internal_conf->hugepage_unlink &&
+               unlink_hugepage_files(tmp_hp, internal_conf->num_hugepage_sizes) < 0) {
                RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
                goto fail;
        }
@@ -1622,7 +1639,10 @@ eal_hugepage_attach(void)
 int
 rte_eal_hugepage_init(void)
 {
-       return internal_config.legacy_mem ?
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       return internal_conf->legacy_mem ?
                        eal_legacy_hugepage_init() :
                        eal_dynmem_hugepage_init();
 }
@@ -1630,7 +1650,10 @@ rte_eal_hugepage_init(void)
 int
 rte_eal_hugepage_attach(void)
 {
-       return internal_config.legacy_mem ?
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       return internal_conf->legacy_mem ?
                        eal_legacy_hugepage_attach() :
                        eal_hugepage_attach();
 }
@@ -1659,9 +1682,11 @@ memseg_primary_init_32(void)
        struct rte_memseg_list *msl;
        uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
        uint64_t max_mem;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* no-huge does not need this at all */
-       if (internal_config.no_hugetlbfs)
+       if (internal_conf->no_hugetlbfs)
                return 0;
 
        /* this is a giant hack, but desperate times call for desperate
@@ -1674,7 +1699,7 @@ memseg_primary_init_32(void)
         * unneeded pages. this will not affect secondary processes, as those
         * should be able to mmap the space without (too many) problems.
         */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                return 0;
 
        /* 32-bit mode is a very special case. we cannot know in advance where
@@ -1683,12 +1708,12 @@ memseg_primary_init_32(void)
         */
        active_sockets = 0;
        total_requested_mem = 0;
-       if (internal_config.force_sockets)
+       if (internal_conf->force_sockets)
                for (i = 0; i < rte_socket_count(); i++) {
                        uint64_t mem;
 
                        socket_id = rte_socket_id_by_idx(i);
-                       mem = internal_config.socket_mem[socket_id];
+                       mem = internal_conf->socket_mem[socket_id];
 
                        if (mem == 0)
                                continue;
@@ -1697,7 +1722,7 @@ memseg_primary_init_32(void)
                        total_requested_mem += mem;
                }
        else
-               total_requested_mem = internal_config.memory;
+               total_requested_mem = internal_conf->memory;
 
        max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
        if (total_requested_mem > max_mem) {
@@ -1724,7 +1749,7 @@ memseg_primary_init_32(void)
 
        /* create memseg lists */
        for (i = 0; i < rte_socket_count(); i++) {
-               int hp_sizes = (int) internal_config.num_hugepage_sizes;
+               int hp_sizes = (int) internal_conf->num_hugepage_sizes;
                uint64_t max_socket_mem, cur_socket_mem;
                unsigned int master_lcore_socket;
                struct rte_config *cfg = rte_eal_get_configuration();
@@ -1734,13 +1759,13 @@ memseg_primary_init_32(void)
 
 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
                /* we can still sort pages by socket in legacy mode */
-               if (!internal_config.legacy_mem && socket_id > 0)
+               if (!internal_conf->legacy_mem && socket_id > 0)
                        break;
 #endif
 
                /* if we didn't specifically request memory on this socket */
                skip = active_sockets != 0 &&
-                               internal_config.socket_mem[socket_id] == 0;
+                               internal_conf->socket_mem[socket_id] == 0;
                /* ...or if we didn't specifically request memory on *any*
                 * socket, and this is not master lcore
                 */
@@ -1755,8 +1780,8 @@ memseg_primary_init_32(void)
 
                /* max amount of memory on this socket */
                max_socket_mem = (active_sockets != 0 ?
-                                       internal_config.socket_mem[socket_id] :
-                                       internal_config.memory) +
+                                       internal_conf->socket_mem[socket_id] :
+                                       internal_conf->memory) +
                                        extra_mem_per_socket;
                cur_socket_mem = 0;
 
@@ -1766,7 +1791,7 @@ memseg_primary_init_32(void)
                        struct hugepage_info *hpi;
                        int type_msl_idx, max_segs, total_segs = 0;
 
-                       hpi = &internal_config.hugepage_info[hpi_idx];
+                       hpi = &internal_conf->hugepage_info[hpi_idx];
                        hugepage_sz = hpi->hugepage_sz;
 
                        /* check if pages are actually available */
@@ -1883,6 +1908,10 @@ rte_eal_memseg_init(void)
        /* increase rlimit to maximum */
        struct rlimit lim;
 
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+#endif
        if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
                /* set limit to maximum */
                lim.rlim_cur = lim.rlim_max;
@@ -1899,7 +1928,7 @@ rte_eal_memseg_init(void)
                RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
        }
 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
-       if (!internal_config.legacy_mem && rte_socket_count() > 1) {
+       if (!internal_conf->legacy_mem && rte_socket_count() > 1) {
                RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
                RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
                RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
index 6dc6b56..c0a67cf 100644 (file)
@@ -104,7 +104,10 @@ hpet_msb_inc(__rte_unused void *arg)
 uint64_t
 rte_get_hpet_hz(void)
 {
-       if(internal_config.no_hpet)
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       if (internal_conf->no_hpet)
                rte_panic("Error, HPET called, but no HPET present\n");
 
        return eal_hpet_resolution_hz;
@@ -115,8 +118,10 @@ rte_get_hpet_cycles(void)
 {
        uint32_t t, msb;
        uint64_t ret;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if(internal_config.no_hpet)
+       if (internal_conf->no_hpet)
                rte_panic("Error, HPET called, but no HPET present\n");
 
        t = eal_hpet->counter_l;
@@ -138,8 +143,10 @@ int
 rte_eal_hpet_init(int make_default)
 {
        int fd, ret;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.no_hpet) {
+       if (internal_conf->no_hpet) {
                RTE_LOG(NOTICE, EAL, "HPET is disabled\n");
                return -1;
        }
@@ -148,7 +155,7 @@ rte_eal_hpet_init(int make_default)
        if (fd < 0) {
                RTE_LOG(ERR, EAL, "ERROR: Cannot open "DEV_HPET": %s!\n",
                        strerror(errno));
-               internal_config.no_hpet = 1;
+               internal_conf->no_hpet = 1;
                return -1;
        }
        eal_hpet = mmap(NULL, 1024, PROT_READ, MAP_SHARED, fd, 0);
@@ -159,7 +166,7 @@ rte_eal_hpet_init(int make_default)
                                "To run without using HPET, set CONFIG_RTE_LIBEAL_USE_HPET=n "
                                "in your build configuration or use '--no-hpet' EAL flag.\n");
                close(fd);
-               internal_config.no_hpet = 1;
+               internal_conf->no_hpet = 1;
                return -1;
        }
        close(fd);
@@ -182,7 +189,7 @@ rte_eal_hpet_init(int make_default)
                                     hpet_msb_inc, NULL);
        if (ret != 0) {
                RTE_LOG(ERR, EAL, "ERROR: Cannot create HPET timer thread!\n");
-               internal_config.no_hpet = 1;
+               internal_conf->no_hpet = 1;
                return -1;
        }
 
index d26e164..abb12a3 100644 (file)
@@ -267,9 +267,11 @@ vfio_open_group_fd(int iommu_group_num)
        struct rte_mp_reply mp_reply = {0};
        struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
        struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* if primary, try to open the group */
-       if (internal_config.process_type == RTE_PROC_PRIMARY) {
+       if (internal_conf->process_type == RTE_PROC_PRIMARY) {
                /* try regular group format */
                snprintf(filename, sizeof(filename),
                                 VFIO_GROUP_FMT, iommu_group_num);
@@ -713,6 +715,8 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
        int vfio_group_fd;
        int iommu_group_num;
        int i, ret;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* get group number */
        ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
@@ -788,7 +792,7 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
                 * Note this can happen several times with the hotplug
                 * functionality.
                 */
-               if (internal_config.process_type == RTE_PROC_PRIMARY &&
+               if (internal_conf->process_type == RTE_PROC_PRIMARY &&
                                vfio_cfg->vfio_active_groups == 1 &&
                                vfio_group_device_count(vfio_group_fd) == 0) {
                        const struct vfio_iommu_type *t;
@@ -1022,6 +1026,8 @@ rte_vfio_enable(const char *modname)
        /* initialize group list */
        int i, j;
        int vfio_available;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
 
@@ -1057,7 +1063,7 @@ rte_vfio_enable(const char *modname)
                return 0;
        }
 
-       if (internal_config.process_type == RTE_PROC_PRIMARY) {
+       if (internal_conf->process_type == RTE_PROC_PRIMARY) {
                /* open a new container */
                default_vfio_cfg->vfio_container_fd =
                                rte_vfio_get_container_fd();
@@ -1093,11 +1099,13 @@ vfio_get_default_container_fd(void)
        struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
        struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
        int container_fd;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        if (default_vfio_cfg->vfio_enabled)
                return default_vfio_cfg->vfio_container_fd;
 
-       if (internal_config.process_type == RTE_PROC_PRIMARY) {
+       if (internal_conf->process_type == RTE_PROC_PRIMARY) {
                /* if we were secondary process we would try requesting
                 * container fd from the primary, but we're the primary
                 * process so just exit here
@@ -1200,10 +1208,12 @@ rte_vfio_get_container_fd(void)
        struct rte_mp_reply mp_reply = {0};
        struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
        struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
 
        /* if we're in a primary process, try to open the container */
-       if (internal_config.process_type == RTE_PROC_PRIMARY) {
+       if (internal_conf->process_type == RTE_PROC_PRIMARY) {
                vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
                if (vfio_container_fd < 0) {
                        RTE_LOG(ERR, EAL, "  cannot open VFIO container, "
index 97c8427..11801e6 100644 (file)
@@ -32,42 +32,16 @@ static rte_usage_hook_t     rte_application_usage_hook;
  */
 static int mem_cfg_fd = -1;
 
-/* early configuration structure, when memory config is not mmapped */
-static struct rte_mem_config early_mem_config;
-
-/* Address of global and public configuration */
-static struct rte_config rte_config = {
-               .mem_config = &early_mem_config,
-};
-
 /* internal configuration (per-core) */
 struct lcore_config lcore_config[RTE_MAX_LCORE];
 
-/* internal configuration */
-struct internal_config internal_config;
-
-/* platform-specific runtime dir */
-static char runtime_dir[PATH_MAX];
-
-const char *
-rte_eal_get_runtime_dir(void)
-{
-       return runtime_dir;
-}
-
-/* Return a pointer to the configuration structure */
-struct rte_config *
-rte_eal_get_configuration(void)
-{
-       return &rte_config;
-}
-
 /* Detect if we are a primary or a secondary process */
 enum rte_proc_type_t
 eal_proc_type_detect(void)
 {
        enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
        const char *pathname = eal_runtime_config_path();
+       const struct rte_config *config = rte_eal_get_configuration();
 
        /* if we can open the file but not get a write-lock we are a secondary
         * process. NOTE: if we get a file handle back, we keep that open
@@ -77,14 +51,14 @@ eal_proc_type_detect(void)
                _O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE);
        if (err == 0) {
                OVERLAPPED soverlapped = { 0 };
-               soverlapped.Offset = sizeof(*rte_config.mem_config);
+               soverlapped.Offset = sizeof(*config->mem_config);
                soverlapped.OffsetHigh = 0;
 
                HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
 
                if (!LockFileEx(hwinfilehandle,
                        LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
-                       sizeof(*rte_config.mem_config), 0, &soverlapped))
+                       sizeof(*config->mem_config), 0, &soverlapped))
                        ptype = RTE_PROC_SECONDARY;
        }
 
@@ -94,24 +68,6 @@ eal_proc_type_detect(void)
        return ptype;
 }
 
-enum rte_proc_type_t
-rte_eal_process_type(void)
-{
-       return rte_config.process_type;
-}
-
-int
-rte_eal_has_hugepages(void)
-{
-       return !internal_config.no_hugetlbfs;
-}
-
-enum rte_iova_mode
-rte_eal_iova_mode(void)
-{
-       return rte_config.iova_mode;
-}
-
 /* display usage */
 static void
 eal_usage(const char *prgname)
@@ -134,10 +90,12 @@ eal_log_level_parse(int argc, char **argv)
        int opt;
        char **argvopt;
        int option_index;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        argvopt = argv;
 
-       eal_reset_internal_config(&internal_config);
+       eal_reset_internal_config(internal_conf);
 
        while ((opt = getopt_long(argc, argvopt, eal_short_options,
                eal_long_options, &option_index)) != EOF) {
@@ -150,7 +108,7 @@ eal_log_level_parse(int argc, char **argv)
 
                ret = (opt == OPT_LOG_LEVEL_NUM) ?
                        eal_parse_common_option(opt, optarg,
-                               &internal_config) : 0;
+                               internal_conf) : 0;
 
                /* common parser is not happy */
                if (ret < 0)
@@ -168,6 +126,8 @@ eal_parse_args(int argc, char **argv)
        char **argvopt;
        int option_index;
        char *prgname = argv[0];
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        argvopt = argv;
 
@@ -182,7 +142,7 @@ eal_parse_args(int argc, char **argv)
                        return -1;
                }
 
-               ret = eal_parse_common_option(opt, optarg, &internal_config);
+               ret = eal_parse_common_option(opt, optarg, internal_conf);
                /* common parser is not happy */
                if (ret < 0) {
                        eal_usage(prgname);
@@ -214,11 +174,11 @@ eal_parse_args(int argc, char **argv)
                }
        }
 
-       if (eal_adjust_config(&internal_config) != 0)
+       if (eal_adjust_config(internal_conf) != 0)
                return -1;
 
        /* sanity checks */
-       if (eal_check_common_options(&internal_config) != 0) {
+       if (eal_check_common_options(internal_conf) != 0) {
                eal_usage(prgname);
                return -1;
        }
@@ -277,7 +237,10 @@ __rte_trace_point_register(rte_trace_point_t *trace, const char *name,
 int
 rte_eal_cleanup(void)
 {
-       eal_cleanup_config(&internal_config);
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       eal_cleanup_config(internal_conf);
        return 0;
 }
 
@@ -286,6 +249,9 @@ int
 rte_eal_init(int argc, char **argv)
 {
        int i, fctret;
+       const struct rte_config *config = rte_eal_get_configuration();
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        rte_eal_log_init(NULL, 0);
 
@@ -308,21 +274,21 @@ rte_eal_init(int argc, char **argv)
                exit(1);
 
        /* Prevent creation of shared memory files. */
-       if (internal_config.in_memory == 0) {
+       if (internal_conf->in_memory == 0) {
                RTE_LOG(WARNING, EAL, "Multi-process support is requested, "
                        "but not available.\n");
-               internal_config.in_memory = 1;
+               internal_conf->in_memory = 1;
        }
 
-       if (!internal_config.no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
+       if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
                rte_eal_init_alert("Cannot get hugepage information");
                rte_errno = EACCES;
                return -1;
        }
 
-       if (internal_config.memory == 0 && !internal_config.force_sockets) {
-               if (internal_config.no_hugetlbfs)
-                       internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+       if (internal_conf->memory == 0 && !internal_conf->force_sockets) {
+               if (internal_conf->no_hugetlbfs)
+                       internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
        }
 
        if (eal_mem_win32api_init() < 0) {
@@ -367,7 +333,7 @@ rte_eal_init(int argc, char **argv)
                return -1;
        }
 
-       eal_thread_init_master(rte_config.master_lcore);
+       eal_thread_init_master(config->master_lcore);
 
        RTE_LCORE_FOREACH_SLAVE(i) {
 
index 61d0dcd..5779cd3 100644 (file)
@@ -4,6 +4,7 @@
 #include <rte_memzone.h>
 #include <rte_os.h>
 
+#include "eal_private.h"
 #include "eal_filesystem.h"
 #include "eal_hugepages.h"
 #include "eal_internal_cfg.h"
@@ -54,10 +55,12 @@ hugepage_info_init(void)
        struct hugepage_info *hpi;
        unsigned int socket_id;
        int ret = 0;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* Only one hugepage size available on Windows. */
-       internal_config.num_hugepage_sizes = 1;
-       hpi = &internal_config.hugepage_info[0];
+       internal_conf->num_hugepage_sizes = 1;
+       hpi = &internal_conf->hugepage_info[0];
 
        hpi->hugepage_sz = GetLargePageMinimum();
        if (hpi->hugepage_sz == 0)
index a7452b6..d8cae3e 100644 (file)
@@ -320,14 +320,16 @@ eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs,
        int ret = -1;
        struct alloc_walk_param wa;
        struct hugepage_info *hi = NULL;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
-       if (internal_config.legacy_mem) {
+       if (internal_conf->legacy_mem) {
                RTE_LOG(ERR, EAL, "dynamic allocation not supported in legacy mode\n");
                return -ENOTSUP;
        }
 
-       for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
-               struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+       for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
+               struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
                if (page_sz == hpi->hugepage_sz) {
                        hi = hpi;
                        break;
@@ -371,9 +373,11 @@ int
 eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
 {
        int seg, ret = 0;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* dynamic free not supported in legacy mode */
-       if (internal_config.legacy_mem)
+       if (internal_conf->legacy_mem)
                return -1;
 
        for (seg = 0; seg < n_segs; seg++) {
@@ -392,12 +396,12 @@ eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
 
                memset(&wa, 0, sizeof(wa));
 
-               for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
-                       hi = &internal_config.hugepage_info[i];
+               for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
+                       hi = &internal_conf->hugepage_info[i];
                        if (cur->hugepage_sz == hi->hugepage_sz)
                                break;
                }
-               if (i == RTE_DIM(internal_config.hugepage_info)) {
+               if (i == RTE_DIM(internal_conf->hugepage_info)) {
                        RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
                        ret = -1;
                        continue;
index 73be1cf..7f8d3c2 100644 (file)
@@ -656,13 +656,15 @@ eal_nohuge_init(void)
        void *addr;
 
        mcfg = rte_eal_get_configuration()->mem_config;
+       struct internal_config *internal_conf =
+               eal_get_internal_configuration();
 
        /* nohuge mode is legacy mode */
-       internal_config.legacy_mem = 1;
+       internal_conf->legacy_mem = 1;
 
        msl = &mcfg->memsegs[0];
 
-       mem_sz = internal_config.memory;
+       mem_sz = internal_conf->memory;
        page_sz = RTE_PGSIZE_4K;
        n_segs = mem_sz / page_sz;
 
@@ -698,7 +700,10 @@ eal_nohuge_init(void)
 int
 rte_eal_hugepage_init(void)
 {
-       return internal_config.no_hugetlbfs ?
+       const struct internal_config *internal_conf =
+               eal_get_internal_configuration();
+
+       return internal_conf->no_hugetlbfs ?
                eal_nohuge_init() : eal_dynmem_hugepage_init();
 }