X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Feal_common_options.c;h=f242d56a76310d854106020fe205e805fda6a0dc;hb=8af866df8d8c;hp=481c732b1858bc6c4a8c6564af1e916fd9983abe;hpb=a5d7a3f77ddc3c3ae18bce04d7555b458360cc65;p=dpdk.git diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c index 481c732b18..f242d56a76 100644 --- a/lib/librte_eal/common/eal_common_options.c +++ b/lib/librte_eal/common/eal_common_options.c @@ -1,52 +1,31 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 6WIND S.A. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. */ #include #include #include +#ifndef RTE_EXEC_ENV_WINDOWS #include +#endif #include #include #include #include +#ifndef RTE_EXEC_ENV_WINDOWS #include +#endif #include #include #include +#include #include #include #include +#include +#include #include #include #include @@ -54,16 +33,23 @@ #include "eal_internal_cfg.h" #include "eal_options.h" #include "eal_filesystem.h" +#include "eal_private.h" +#include "eal_trace.h" #define BITS_PER_HEX 4 +#define LCORE_OPT_LST 1 +#define LCORE_OPT_MSK 2 +#define LCORE_OPT_MAP 3 const char eal_short_options[] = "b:" /* pci-blacklist */ "c:" /* coremask */ + "s:" /* service coremask */ "d:" /* driver */ "h" /* help */ "l:" /* corelist */ + "S:" /* service corelist */ "m:" /* memory size */ "n:" /* memory channels */ "r:" /* memory ranks */ @@ -79,22 +65,30 @@ eal_long_options[] = { {OPT_HELP, 0, NULL, OPT_HELP_NUM }, {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM }, {OPT_HUGE_UNLINK, 0, NULL, OPT_HUGE_UNLINK_NUM }, + {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM }, {OPT_LCORES, 1, NULL, OPT_LCORES_NUM }, {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM }, + {OPT_TRACE, 1, NULL, OPT_TRACE_NUM }, + {OPT_TRACE_DIR, 1, NULL, OPT_TRACE_DIR_NUM }, {OPT_MASTER_LCORE, 1, NULL, OPT_MASTER_LCORE_NUM }, + {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM}, {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM }, {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM }, {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM }, {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM }, + {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM }, {OPT_PCI_BLACKLIST, 1, NULL, OPT_PCI_BLACKLIST_NUM }, {OPT_PCI_WHITELIST, 1, NULL, OPT_PCI_WHITELIST_NUM }, {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM }, {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM }, + {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM }, {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM }, {OPT_VDEV, 1, NULL, OPT_VDEV_NUM }, {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM }, {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM }, - {OPT_XEN_DOM0, 0, NULL, OPT_XEN_DOM0_NUM }, + {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM }, + {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM}, + {OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM}, {0, 0, NULL, 0 } }; @@ -118,14 +112,79 @@ static const char *default_solib_dir = RTE_EAL_PMD_PATH; /* * Stringified version of solib path used by dpdk-pmdinfo.py * Note: PLEASE DO NOT ALTER THIS without making a corresponding - * change to tools/dpdk-pmdinfo.py + * change to usertools/dpdk-pmdinfo.py */ -static const char dpdk_solib_path[] __attribute__((used)) = +static const char dpdk_solib_path[] __rte_used = "DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH; +TAILQ_HEAD(device_option_list, device_option); + +struct device_option { + TAILQ_ENTRY(device_option) next; + + enum rte_devtype type; + char arg[]; +}; + +static struct device_option_list devopt_list = +TAILQ_HEAD_INITIALIZER(devopt_list); static int master_lcore_parsed; static int mem_parsed; +static int core_parsed; + +static int +eal_option_device_add(enum rte_devtype type, const char *optarg) +{ + struct device_option *devopt; + size_t optlen; + int ret; + + optlen = strlen(optarg) + 1; + devopt = calloc(1, sizeof(*devopt) + optlen); + if (devopt == NULL) { + RTE_LOG(ERR, EAL, "Unable to allocate device option\n"); + return -ENOMEM; + } + + devopt->type = type; + ret = strlcpy(devopt->arg, optarg, optlen); + if (ret < 0) { + RTE_LOG(ERR, EAL, "Unable to copy device option\n"); + free(devopt); + return -EINVAL; + } + TAILQ_INSERT_TAIL(&devopt_list, devopt, next); + return 0; +} + +int +eal_option_device_parse(void) +{ + struct device_option *devopt; + void *tmp; + int ret = 0; + + TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) { + if (ret == 0) { + ret = rte_devargs_add(devopt->type, devopt->arg); + if (ret) + RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n", + devopt->arg); + } + TAILQ_REMOVE(&devopt_list, devopt, next); + free(devopt); + } + return ret; +} + +const char * +eal_get_hugefile_prefix(void) +{ + if (internal_config.hugefile_prefix != NULL) + return internal_config.hugefile_prefix; + return HUGEFILE_PREFIX_DEFAULT; +} void eal_reset_internal_config(struct internal_config *internal_cfg) @@ -135,27 +194,28 @@ eal_reset_internal_config(struct internal_config *internal_cfg) internal_cfg->memory = 0; internal_cfg->force_nrank = 0; internal_cfg->force_nchannel = 0; - internal_cfg->hugefile_prefix = HUGEFILE_PREFIX_DEFAULT; + internal_cfg->hugefile_prefix = NULL; internal_cfg->hugepage_dir = NULL; internal_cfg->force_sockets = 0; /* zero out the NUMA config */ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) internal_cfg->socket_mem[i] = 0; + internal_cfg->force_socket_limits = 0; + /* zero out the NUMA limits config */ + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) + internal_cfg->socket_limit[i] = 0; /* zero out hugedir descriptors */ - for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) + for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) { + memset(&internal_cfg->hugepage_info[i], 0, + sizeof(internal_cfg->hugepage_info[0])); internal_cfg->hugepage_info[i].lock_descriptor = -1; + } internal_cfg->base_virtaddr = 0; +#ifdef LOG_DAEMON internal_cfg->syslog_facility = LOG_DAEMON; - /* default value from build option */ -#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG - internal_cfg->log_level = RTE_LOG_INFO; -#else - internal_cfg->log_level = RTE_LOG_LEVEL; #endif - internal_cfg->xen_dom0_support = 0; - /* if set to NONE, interrupt mode is determined automatically */ internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE; @@ -166,6 +226,10 @@ eal_reset_internal_config(struct internal_config *internal_cfg) #endif internal_cfg->vmware_tsc_map = 0; internal_cfg->create_uio_dev = 0; + internal_cfg->iova_mode = RTE_IOVA_DC; + internal_cfg->user_mbuf_pool_ops_name = NULL; + CPU_ZERO(&internal_cfg->ctrl_cpuset); + internal_cfg->init_complete = 0; } static int @@ -179,7 +243,7 @@ eal_plugin_add(const char *path) return -1; } memset(solib, 0, sizeof(*solib)); - strncpy(solib->name, path, PATH_MAX-1); + strlcpy(solib->name, path, PATH_MAX-1); solib->name[PATH_MAX-1] = 0; TAILQ_INSERT_TAIL(&solib_list, solib, next); @@ -206,8 +270,7 @@ eal_plugindir_init(const char *path) while ((dent = readdir(d)) != NULL) { struct stat sb; - snprintf(sopath, PATH_MAX-1, "%s/%s", path, dent->d_name); - sopath[PATH_MAX-1] = 0; + snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name); if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode))) continue; @@ -224,13 +287,15 @@ eal_plugindir_init(const char *path) int eal_plugins_init(void) { +#ifndef RTE_EXEC_ENV_WINDOWS struct shared_driver *solib = NULL; + struct stat sb; - if (*default_solib_dir != '\0') + if (*default_solib_dir != '\0' && stat(default_solib_dir, &sb) == 0 && + S_ISDIR(sb.st_mode)) eal_plugin_add(default_solib_dir); TAILQ_FOREACH(solib, &solib_list, next) { - struct stat sb; if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) { if (eal_plugindir_init(solib->name) == -1) { @@ -251,6 +316,7 @@ eal_plugins_init(void) } return 0; +#endif } /* @@ -272,13 +338,14 @@ static int xdigit2val(unsigned char c) } static int -eal_parse_coremask(const char *coremask) +eal_parse_service_coremask(const char *coremask) { struct rte_config *cfg = rte_eal_get_configuration(); int i, j, idx = 0; - unsigned count = 0; + unsigned int count = 0; char c; int val; + uint32_t taken_lcore_count = 0; if (coremask == NULL) return -1; @@ -293,6 +360,7 @@ eal_parse_coremask(const char *coremask) i = strlen(coremask); while ((i > 0) && isblank(coremask[i - 1])) i--; + if (i == 0) return -1; @@ -303,45 +371,150 @@ eal_parse_coremask(const char *coremask) return -1; } val = xdigit2val(c); - for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++) - { + for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; + j++, idx++) { if ((1 << j) & val) { - if (!lcore_config[idx].detected) { - RTE_LOG(ERR, EAL, "lcore %u " - "unavailable\n", idx); + /* handle master lcore already parsed */ + uint32_t lcore = idx; + if (master_lcore_parsed && + cfg->master_lcore == lcore) { + RTE_LOG(ERR, EAL, + "lcore %u is master lcore, cannot use as service core\n", + idx); return -1; } - cfg->lcore_role[idx] = ROLE_RTE; - lcore_config[idx].core_index = count; + + if (eal_cpu_detected(idx) == 0) { + RTE_LOG(ERR, EAL, + "lcore %u unavailable\n", idx); + return -1; + } + + if (cfg->lcore_role[idx] == ROLE_RTE) + taken_lcore_count++; + + lcore_config[idx].core_role = ROLE_SERVICE; count++; - } else { - cfg->lcore_role[idx] = ROLE_OFF; - lcore_config[idx].core_index = -1; } } } + for (; i >= 0; i--) if (coremask[i] != '0') return -1; - for (; idx < RTE_MAX_LCORE; idx++) { - cfg->lcore_role[idx] = ROLE_OFF; + + for (; idx < RTE_MAX_LCORE; idx++) lcore_config[idx].core_index = -1; + + if (count == 0) + return -1; + + if (core_parsed && taken_lcore_count != count) { + RTE_LOG(WARNING, EAL, + "Not all service cores are in the coremask. " + "Please ensure -c or -l includes service cores\n"); + } + + cfg->service_lcore_count = count; + return 0; +} + +static int +eal_service_cores_parsed(void) +{ + int idx; + for (idx = 0; idx < RTE_MAX_LCORE; idx++) { + if (lcore_config[idx].core_role == ROLE_SERVICE) + return 1; } + return 0; +} + +static int +update_lcore_config(int *cores) +{ + struct rte_config *cfg = rte_eal_get_configuration(); + unsigned int count = 0; + unsigned int i; + int ret = 0; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (cores[i] != -1) { + if (eal_cpu_detected(i) == 0) { + RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i); + ret = -1; + continue; + } + cfg->lcore_role[i] = ROLE_RTE; + count++; + } else { + cfg->lcore_role[i] = ROLE_OFF; + } + lcore_config[i].core_index = cores[i]; + } + if (!ret) + cfg->lcore_count = count; + return ret; +} + +static int +eal_parse_coremask(const char *coremask, int *cores) +{ + unsigned count = 0; + int i, j, idx; + int val; + char c; + + for (idx = 0; idx < RTE_MAX_LCORE; idx++) + cores[idx] = -1; + idx = 0; + + /* Remove all blank characters ahead and after . + * Remove 0x/0X if exists. + */ + while (isblank(*coremask)) + coremask++; + if (coremask[0] == '0' && ((coremask[1] == 'x') + || (coremask[1] == 'X'))) + coremask += 2; + i = strlen(coremask); + while ((i > 0) && isblank(coremask[i - 1])) + i--; + if (i == 0) + return -1; + + for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { + c = coremask[i]; + if (isxdigit(c) == 0) { + /* invalid characters */ + return -1; + } + val = xdigit2val(c); + for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++) + { + if ((1 << j) & val) { + cores[idx] = count; + count++; + } + } + } + for (; i >= 0; i--) + if (coremask[i] != '0') + return -1; if (count == 0) return -1; - /* Update the count of enabled logical cores of the EAL configuration */ - cfg->lcore_count = count; return 0; } static int -eal_parse_corelist(const char *corelist) +eal_parse_service_corelist(const char *corelist) { struct rte_config *cfg = rte_eal_get_configuration(); int i, idx = 0; unsigned count = 0; char *end = NULL; int min, max; + uint32_t taken_lcore_count = 0; if (corelist == NULL) return -1; @@ -353,12 +526,6 @@ eal_parse_corelist(const char *corelist) while ((i > 0) && isblank(corelist[i - 1])) i--; - /* Reset config */ - for (idx = 0; idx < RTE_MAX_LCORE; idx++) { - cfg->lcore_role[idx] = ROLE_OFF; - lcore_config[idx].core_index = -1; - } - /* Get list of cores */ min = RTE_MAX_LCORE; do { @@ -379,9 +546,21 @@ eal_parse_corelist(const char *corelist) if (min == RTE_MAX_LCORE) min = idx; for (idx = min; idx <= max; idx++) { - if (cfg->lcore_role[idx] != ROLE_RTE) { - cfg->lcore_role[idx] = ROLE_RTE; - lcore_config[idx].core_index = count; + if (cfg->lcore_role[idx] != ROLE_SERVICE) { + /* handle master lcore already parsed */ + uint32_t lcore = idx; + if (cfg->master_lcore == lcore && + master_lcore_parsed) { + RTE_LOG(ERR, EAL, + "Error: lcore %u is master lcore, cannot use as service core\n", + idx); + return -1; + } + if (cfg->lcore_role[idx] == ROLE_RTE) + taken_lcore_count++; + + lcore_config[idx].core_role = + ROLE_SERVICE; count++; } } @@ -394,9 +573,65 @@ eal_parse_corelist(const char *corelist) if (count == 0) return -1; - /* Update the count of enabled logical cores of the EAL configuration */ - cfg->lcore_count = count; + if (core_parsed && taken_lcore_count != count) { + RTE_LOG(WARNING, EAL, + "Not all service cores were in the coremask. " + "Please ensure -c or -l includes service cores\n"); + } + + return 0; +} + +static int +eal_parse_corelist(const char *corelist, int *cores) +{ + unsigned count = 0; + char *end = NULL; + int min, max; + int idx; + + for (idx = 0; idx < RTE_MAX_LCORE; idx++) + cores[idx] = -1; + + /* Remove all blank characters ahead */ + while (isblank(*corelist)) + corelist++; + + /* Get list of cores */ + min = RTE_MAX_LCORE; + do { + while (isblank(*corelist)) + corelist++; + if (*corelist == '\0') + return -1; + errno = 0; + idx = strtol(corelist, &end, 10); + if (errno || end == NULL) + return -1; + if (idx < 0 || idx >= RTE_MAX_LCORE) + return -1; + while (isblank(*end)) + end++; + if (*end == '-') { + min = idx; + } else if ((*end == ',') || (*end == '\0')) { + max = idx; + if (min == RTE_MAX_LCORE) + min = idx; + for (idx = min; idx <= max; idx++) { + if (cores[idx] == -1) { + cores[idx] = count; + count++; + } + } + min = RTE_MAX_LCORE; + } else + return -1; + corelist = end + 1; + } while (*end != '\0'); + if (count == 0) + return -1; return 0; } @@ -414,6 +649,14 @@ eal_parse_master_lcore(const char *arg) if (cfg->master_lcore >= RTE_MAX_LCORE) return -1; master_lcore_parsed = 1; + + /* ensure master core is not used as service core */ + if (lcore_config[cfg->master_lcore].core_role == ROLE_SERVICE) { + RTE_LOG(ERR, EAL, + "Error: Master lcore is used as a service core\n"); + return -1; + } + return 0; } @@ -426,14 +669,14 @@ eal_parse_master_lcore(const char *arg) * ',' used for a single number. */ static int -eal_parse_set(const char *input, uint16_t set[], unsigned num) +eal_parse_set(const char *input, rte_cpuset_t *set) { unsigned idx; const char *str = input; char *end = NULL; unsigned min, max; - memset(set, 0, num * sizeof(uint16_t)); + CPU_ZERO(set); while (isblank(*str)) str++; @@ -446,7 +689,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) if (*str != '(') { errno = 0; idx = strtoul(str, &end, 10); - if (errno || end == NULL || idx >= num) + if (errno || end == NULL || idx >= CPU_SETSIZE) return -1; else { while (isblank(*end)) @@ -464,7 +707,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) errno = 0; idx = strtoul(end, &end, 10); - if (errno || end == NULL || idx >= num) + if (errno || end == NULL || idx >= CPU_SETSIZE) return -1; max = idx; while (isblank(*end)) @@ -479,7 +722,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) for (idx = RTE_MIN(min, max); idx <= RTE_MAX(min, max); idx++) - set[idx] = 1; + CPU_SET(idx, set); return end - input; } @@ -504,7 +747,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) /* get the digit value */ errno = 0; idx = strtoul(str, &end, 10); - if (errno || end == NULL || idx >= num) + if (errno || end == NULL || idx >= CPU_SETSIZE) return -1; /* go ahead to separator '-',',' and ')' */ @@ -521,7 +764,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) min = idx; for (idx = RTE_MIN(min, max); idx <= RTE_MAX(min, max); idx++) - set[idx] = 1; + CPU_SET(idx, set); min = RTE_MAX_LCORE; } else @@ -530,31 +773,31 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) str = end + 1; } while (*end != '\0' && *end != ')'); + /* + * to avoid failure that tail blank makes end character check fail + * in eal_parse_lcores( ) + */ + while (isblank(*str)) + str++; + return str - input; } -/* convert from set array to cpuset bitmap */ static int -convert_to_cpuset(rte_cpuset_t *cpusetp, - uint16_t *set, unsigned num) +check_cpuset(rte_cpuset_t *set) { - unsigned idx; - - CPU_ZERO(cpusetp); + unsigned int idx; - for (idx = 0; idx < num; idx++) { - if (!set[idx]) + for (idx = 0; idx < CPU_SETSIZE; idx++) { + if (!CPU_ISSET(idx, set)) continue; - if (!lcore_config[idx].detected) { + if (eal_cpu_detected(idx) == 0) { RTE_LOG(ERR, EAL, "core %u " "unavailable\n", idx); return -1; } - - CPU_SET(idx, cpusetp); } - return 0; } @@ -576,15 +819,15 @@ static int eal_parse_lcores(const char *lcores) { struct rte_config *cfg = rte_eal_get_configuration(); - static uint16_t set[RTE_MAX_LCORE]; + rte_cpuset_t lcore_set; + unsigned int set_count; unsigned idx = 0; - int i; unsigned count = 0; const char *lcore_start = NULL; const char *end = NULL; int offset; rte_cpuset_t cpuset; - int lflags = 0; + int lflags; int ret = -1; if (lcores == NULL) @@ -593,9 +836,6 @@ eal_parse_lcores(const char *lcores) /* Remove all blank characters ahead and after */ while (isblank(*lcores)) lcores++; - i = strlen(lcores); - while ((i > 0) && isblank(lcores[i - 1])) - i--; CPU_ZERO(&cpuset); @@ -613,6 +853,8 @@ eal_parse_lcores(const char *lcores) if (*lcores == '\0') goto err; + lflags = 0; + /* record lcore_set start point */ lcore_start = lcores; @@ -627,18 +869,13 @@ eal_parse_lcores(const char *lcores) lcores += strcspn(lcores, "@,"); if (*lcores == '@') { - /* explicit assign cpu_set */ - offset = eal_parse_set(lcores + 1, set, RTE_DIM(set)); + /* explicit assign cpuset and update the end cursor */ + offset = eal_parse_set(lcores + 1, &cpuset); if (offset < 0) goto err; - - /* prepare cpu_set and update the end cursor */ - if (0 > convert_to_cpuset(&cpuset, - set, RTE_DIM(set))) - goto err; end = lcores + 1 + offset; } else { /* ',' or '\0' */ - /* haven't given cpu_set, current loop done */ + /* haven't given cpuset, current loop done */ end = lcores; /* go back to check - */ @@ -652,18 +889,19 @@ eal_parse_lcores(const char *lcores) goto err; /* parse lcore_set from start point */ - if (0 > eal_parse_set(lcore_start, set, RTE_DIM(set))) + if (eal_parse_set(lcore_start, &lcore_set) < 0) goto err; - /* without '@', by default using lcore_set as cpu_set */ - if (*lcores != '@' && - 0 > convert_to_cpuset(&cpuset, set, RTE_DIM(set))) - goto err; + /* without '@', by default using lcore_set as cpuset */ + if (*lcores != '@') + rte_memcpy(&cpuset, &lcore_set, sizeof(cpuset)); + set_count = CPU_COUNT(&lcore_set); /* start to update lcore_set */ for (idx = 0; idx < RTE_MAX_LCORE; idx++) { - if (!set[idx]) + if (!CPU_ISSET(idx, &lcore_set)) continue; + set_count--; if (cfg->lcore_role[idx] != ROLE_RTE) { lcore_config[idx].core_index = count; @@ -675,10 +913,17 @@ eal_parse_lcores(const char *lcores) CPU_ZERO(&cpuset); CPU_SET(idx, &cpuset); } + + if (check_cpuset(&cpuset) < 0) + goto err; rte_memcpy(&lcore_config[idx].cpuset, &cpuset, sizeof(rte_cpuset_t)); } + /* some cores from the lcore_set can't be handled by EAL */ + if (set_count != 0) + goto err; + lcores = end + 1; } while (*end != '\0'); @@ -693,11 +938,12 @@ err: return ret; } +#ifndef RTE_EXEC_ENV_WINDOWS static int eal_parse_syslog(const char *facility, struct internal_config *conf) { int i; - static struct { + static const struct { const char *name; int value; } map[] = { @@ -731,27 +977,101 @@ eal_parse_syslog(const char *facility, struct internal_config *conf) } return -1; } +#endif static int -eal_parse_log_level(const char *level, uint32_t *log_level) +eal_parse_log_priority(const char *level) { - char *end; + static const char * const levels[] = { + [RTE_LOG_EMERG] = "emergency", + [RTE_LOG_ALERT] = "alert", + [RTE_LOG_CRIT] = "critical", + [RTE_LOG_ERR] = "error", + [RTE_LOG_WARNING] = "warning", + [RTE_LOG_NOTICE] = "notice", + [RTE_LOG_INFO] = "info", + [RTE_LOG_DEBUG] = "debug", + }; + size_t len = strlen(level); unsigned long tmp; + char *end; + unsigned int i; + + if (len == 0) + return -1; + + /* look for named values, skip 0 which is not a valid level */ + for (i = 1; i < RTE_DIM(levels); i++) { + if (strncmp(levels[i], level, len) == 0) + return i; + } + /* not a string, maybe it is numeric */ errno = 0; tmp = strtoul(level, &end, 0); /* check for errors */ - if ((errno != 0) || (level[0] == '\0') || - end == NULL || (*end != '\0')) + if (errno != 0 || end == NULL || *end != '\0' || + tmp >= UINT32_MAX) return -1; - /* log_level is a uint32_t */ - if (tmp >= UINT32_MAX) + return tmp; +} + +static int +eal_parse_log_level(const char *arg) +{ + const char *pattern = NULL; + const char *regex = NULL; + char *str, *level; + int priority; + + str = strdup(arg); + if (str == NULL) return -1; - *log_level = tmp; + if ((level = strchr(str, ','))) { + regex = str; + *level++ = '\0'; + } else if ((level = strchr(str, ':'))) { + pattern = str; + *level++ = '\0'; + } else { + level = str; + } + + priority = eal_parse_log_priority(level); + if (priority < 0) { + fprintf(stderr, "invalid log priority: %s\n", level); + goto fail; + } + + if (regex) { + if (rte_log_set_level_regexp(regex, priority) < 0) { + fprintf(stderr, "cannot set log level %s,%d\n", + regex, priority); + goto fail; + } + if (rte_log_save_regexp(regex, priority) < 0) + goto fail; + } else if (pattern) { + if (rte_log_set_level_pattern(pattern, priority) < 0) { + fprintf(stderr, "cannot set log level %s:%d\n", + pattern, priority); + goto fail; + } + if (rte_log_save_pattern(pattern, priority) < 0) + goto fail; + } else { + rte_log_set_global_level(priority); + } + + free(str); return 0; + +fail: + free(str); + return -1; } static enum rte_proc_type_t @@ -767,36 +1087,228 @@ eal_parse_proc_type(const char *arg) return RTE_PROC_INVALID; } +static int +eal_parse_iova_mode(const char *name) +{ + int mode; + + if (name == NULL) + return -1; + + if (!strcmp("pa", name)) + mode = RTE_IOVA_PA; + else if (!strcmp("va", name)) + mode = RTE_IOVA_VA; + else + return -1; + + internal_config.iova_mode = mode; + return 0; +} + +static int +eal_parse_base_virtaddr(const char *arg) +{ + char *end; + uint64_t addr; + + errno = 0; + addr = strtoull(arg, &end, 16); + + /* check for errors */ + if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) + return -1; + + /* make sure we don't exceed 32-bit boundary on 32-bit target */ +#ifndef RTE_ARCH_64 + if (addr >= UINTPTR_MAX) + return -1; +#endif + + /* align the addr on 16M boundary, 16MB is the minimum huge page + * size on IBM Power architecture. If the addr is aligned to 16MB, + * it can align to 2MB for x86. So this alignment can also be used + * on x86 and other architectures. + */ + internal_config.base_virtaddr = + RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M); + + return 0; +} + +/* caller is responsible for freeing the returned string */ +static char * +available_cores(void) +{ + char *str = NULL; + int previous; + int sequence; + char *tmp; + int idx; + + /* find the first available cpu */ + for (idx = 0; idx < RTE_MAX_LCORE; idx++) { + if (eal_cpu_detected(idx) == 0) + continue; + break; + } + if (idx >= RTE_MAX_LCORE) + return NULL; + + /* first sequence */ + if (asprintf(&str, "%d", idx) < 0) + return NULL; + previous = idx; + sequence = 0; + + for (idx++ ; idx < RTE_MAX_LCORE; idx++) { + if (eal_cpu_detected(idx) == 0) + continue; + + if (idx == previous + 1) { + previous = idx; + sequence = 1; + continue; + } + + /* finish current sequence */ + if (sequence) { + if (asprintf(&tmp, "%s-%d", str, previous) < 0) { + free(str); + return NULL; + } + free(str); + str = tmp; + } + + /* new sequence */ + if (asprintf(&tmp, "%s,%d", str, idx) < 0) { + free(str); + return NULL; + } + free(str); + str = tmp; + previous = idx; + sequence = 0; + } + + /* finish last sequence */ + if (sequence) { + if (asprintf(&tmp, "%s-%d", str, previous) < 0) { + free(str); + return NULL; + } + free(str); + str = tmp; + } + + return str; +} + int eal_parse_common_option(int opt, const char *optarg, struct internal_config *conf) { + static int b_used; + static int w_used; + switch (opt) { /* blacklist */ case 'b': - if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, + if (w_used) + goto bw_used; + if (eal_option_device_add(RTE_DEVTYPE_BLACKLISTED_PCI, optarg) < 0) { return -1; } + b_used = 1; break; /* whitelist */ case 'w': - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, + if (b_used) + goto bw_used; + if (eal_option_device_add(RTE_DEVTYPE_WHITELISTED_PCI, optarg) < 0) { return -1; } + w_used = 1; break; /* coremask */ - case 'c': - if (eal_parse_coremask(optarg) < 0) { - RTE_LOG(ERR, EAL, "invalid coremask\n"); + case 'c': { + int lcore_indexes[RTE_MAX_LCORE]; + + if (eal_service_cores_parsed()) + RTE_LOG(WARNING, EAL, + "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n"); + if (eal_parse_coremask(optarg, lcore_indexes) < 0) { + RTE_LOG(ERR, EAL, "invalid coremask syntax\n"); + return -1; + } + if (update_lcore_config(lcore_indexes) < 0) { + char *available = available_cores(); + + RTE_LOG(ERR, EAL, + "invalid coremask, please check specified cores are part of %s\n", + available); + free(available); + return -1; + } + + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : + (core_parsed == LCORE_OPT_MAP) ? "--lcore" : + "-c"); return -1; } + + core_parsed = LCORE_OPT_MSK; break; + } /* corelist */ - case 'l': - if (eal_parse_corelist(optarg) < 0) { - RTE_LOG(ERR, EAL, "invalid core list\n"); + case 'l': { + int lcore_indexes[RTE_MAX_LCORE]; + + if (eal_service_cores_parsed()) + RTE_LOG(WARNING, EAL, + "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n"); + + if (eal_parse_corelist(optarg, lcore_indexes) < 0) { + RTE_LOG(ERR, EAL, "invalid core list syntax\n"); + return -1; + } + if (update_lcore_config(lcore_indexes) < 0) { + char *available = available_cores(); + + RTE_LOG(ERR, EAL, + "invalid core list, please check specified cores are part of %s\n", + available); + free(available); + return -1; + } + + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_MSK) ? "-c" : + (core_parsed == LCORE_OPT_MAP) ? "--lcore" : + "-l"); + return -1; + } + + core_parsed = LCORE_OPT_LST; + break; + } + /* service coremask */ + case 's': + if (eal_parse_service_coremask(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid service coremask\n"); + return -1; + } + break; + /* service corelist */ + case 'S': + if (eal_parse_service_corelist(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid service core list\n"); return -1; } break; @@ -844,6 +1356,8 @@ eal_parse_common_option(int opt, const char *optarg, case OPT_NO_HUGE_NUM: conf->no_hugetlbfs = 1; + /* no-huge is legacy mem */ + conf->legacy_mem = 1; break; case OPT_NO_PCI_NUM: @@ -862,6 +1376,13 @@ eal_parse_common_option(int opt, const char *optarg, conf->no_shconf = 1; break; + case OPT_IN_MEMORY_NUM: + conf->in_memory = 1; + /* in-memory is a superset of noshconf and huge-unlink */ + conf->no_shconf = 1; + conf->hugepage_unlink = 1; + break; + case OPT_PROC_TYPE_NUM: conf->process_type = eal_parse_proc_type(optarg); break; @@ -875,12 +1396,13 @@ eal_parse_common_option(int opt, const char *optarg, break; case OPT_VDEV_NUM: - if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, + if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL, optarg) < 0) { return -1; } break; +#ifndef RTE_EXEC_ENV_WINDOWS case OPT_SYSLOG_NUM: if (eal_parse_syslog(optarg, conf) < 0) { RTE_LOG(ERR, EAL, "invalid parameters for --" @@ -888,25 +1410,72 @@ eal_parse_common_option(int opt, const char *optarg, return -1; } break; +#endif case OPT_LOG_LEVEL_NUM: { - uint32_t log; - - if (eal_parse_log_level(optarg, &log) < 0) { + if (eal_parse_log_level(optarg) < 0) { RTE_LOG(ERR, EAL, "invalid parameters for --" OPT_LOG_LEVEL "\n"); return -1; } - conf->log_level = log; break; } + + case OPT_TRACE_NUM: { + if (eal_trace_args_save(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameters for --" + OPT_TRACE "\n"); + return -1; + } + break; + } + + case OPT_TRACE_DIR_NUM: { + if (eal_trace_dir_args_save(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameters for --" + OPT_TRACE_DIR "\n"); + return -1; + } + break; + } + case OPT_LCORES_NUM: if (eal_parse_lcores(optarg) < 0) { RTE_LOG(ERR, EAL, "invalid parameter for --" OPT_LCORES "\n"); return -1; } + + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : + (core_parsed == LCORE_OPT_MSK) ? "-c" : + "--lcore"); + return -1; + } + + core_parsed = LCORE_OPT_MAP; + break; + case OPT_LEGACY_MEM_NUM: + conf->legacy_mem = 1; + break; + case OPT_SINGLE_FILE_SEGMENTS_NUM: + conf->single_file_segments = 1; + break; + case OPT_IOVA_MODE_NUM: + if (eal_parse_iova_mode(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameters for --" + OPT_IOVA_MODE "\n"); + return -1; + } + break; + case OPT_BASE_VIRTADDR_NUM: + if (eal_parse_base_virtaddr(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameter for --" + OPT_BASE_VIRTADDR "\n"); + return -1; + } break; /* don't know what to do, leave this to caller */ @@ -915,6 +1484,72 @@ eal_parse_common_option(int opt, const char *optarg, } + return 0; +bw_used: + RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) " + "cannot be used at the same time\n"); + return -1; +} + +static void +eal_auto_detect_cores(struct rte_config *cfg) +{ + unsigned int lcore_id; + unsigned int removed = 0; + rte_cpuset_t affinity_set; + + if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), + &affinity_set)) + CPU_ZERO(&affinity_set); + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (cfg->lcore_role[lcore_id] == ROLE_RTE && + !CPU_ISSET(lcore_id, &affinity_set)) { + cfg->lcore_role[lcore_id] = ROLE_OFF; + removed++; + } + } + + cfg->lcore_count -= removed; +} + +static void +compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) +{ + rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset; + rte_cpuset_t default_set; + unsigned int lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_has_role(lcore_id, ROLE_OFF)) + continue; + RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset); + } + RTE_CPU_NOT(cpuset, cpuset); + + if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), + &default_set)) + CPU_ZERO(&default_set); + + RTE_CPU_AND(cpuset, cpuset, &default_set); + + /* if no remaining cpu, use master lcore cpu affinity */ + if (!CPU_COUNT(cpuset)) { + memcpy(cpuset, &lcore_config[rte_get_master_lcore()].cpuset, + sizeof(*cpuset)); + } +} + +int +eal_cleanup_config(struct internal_config *internal_cfg) +{ + if (internal_cfg->hugefile_prefix != NULL) + free(internal_cfg->hugefile_prefix); + if (internal_cfg->hugepage_dir != NULL) + free(internal_cfg->hugepage_dir); + if (internal_cfg->user_mbuf_pool_ops_name != NULL) + free(internal_cfg->user_mbuf_pool_ops_name); + return 0; } @@ -924,12 +1559,21 @@ eal_adjust_config(struct internal_config *internal_cfg) int i; struct rte_config *cfg = rte_eal_get_configuration(); + if (!core_parsed) + eal_auto_detect_cores(cfg); + if (internal_config.process_type == RTE_PROC_AUTO) internal_config.process_type = eal_proc_type_detect(); /* default master lcore is the first one */ - if (!master_lcore_parsed) + if (!master_lcore_parsed) { cfg->master_lcore = rte_get_next_lcore(-1, 0, 0); + if (cfg->master_lcore >= RTE_MAX_LCORE) + return -1; + lcore_config[cfg->master_lcore].core_role = ROLE_RTE; + } + + compute_ctrl_threads_cpuset(internal_cfg); /* if no memory amounts were requested, this will result in 0 and * will be overridden later, right after eal_hugepage_info_init() */ @@ -953,7 +1597,22 @@ eal_check_common_options(struct internal_config *internal_cfg) RTE_LOG(ERR, EAL, "Invalid process type specified\n"); return -1; } - if (index(internal_cfg->hugefile_prefix, '%') != NULL) { + if (internal_cfg->hugefile_prefix != NULL && + strlen(internal_cfg->hugefile_prefix) < 1) { + RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n"); + return -1; + } + if (internal_cfg->hugepage_dir != NULL && + strlen(internal_cfg->hugepage_dir) < 1) { + RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n"); + return -1; + } + if (internal_cfg->user_mbuf_pool_ops_name != NULL && + strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) { + RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n"); + return -1; + } + if (index(eal_get_hugefile_prefix(), '%') != NULL) { RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" " "option\n"); return -1; @@ -968,19 +1627,44 @@ eal_check_common_options(struct internal_config *internal_cfg) "be specified together with --"OPT_NO_HUGE"\n"); return -1; } - - if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink) { + if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink && + !internal_cfg->in_memory) { RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot " "be specified together with --"OPT_NO_HUGE"\n"); return -1; } - - if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 0 && - rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 0) { - RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) " - "cannot be used at the same time\n"); + if (internal_config.force_socket_limits && internal_config.legacy_mem) { + RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT + " is only supported in non-legacy memory mode\n"); + } + if (internal_cfg->single_file_segments && + internal_cfg->hugepage_unlink && + !internal_cfg->in_memory) { + RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is " + "not compatible with --"OPT_HUGE_UNLINK"\n"); return -1; } + if (internal_cfg->legacy_mem && + internal_cfg->in_memory) { + RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " + "with --"OPT_IN_MEMORY"\n"); + return -1; + } + if (internal_cfg->legacy_mem && internal_cfg->match_allocations) { + RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " + "with --"OPT_MATCH_ALLOCATIONS"\n"); + return -1; + } + if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) { + RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible " + "with --"OPT_MATCH_ALLOCATIONS"\n"); + return -1; + } + if (internal_cfg->legacy_mem && internal_cfg->memory == 0) { + RTE_LOG(NOTICE, EAL, "Static memory layout is selected, " + "amount of reserved memory can be adjusted with " + "-m or --"OPT_SOCKET_MEM"\n"); + } return 0; } @@ -1002,7 +1686,9 @@ eal_common_usage(void) " ',' is used for single number separator.\n" " '( )' can be omitted for single element group,\n" " '@' can be omitted if cpus and lcores have the same value\n" + " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n" " --"OPT_MASTER_LCORE" ID Core ID that is used as master\n" + " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n" " -n CHANNELS Number of memory channels\n" " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n" " -r RANKS Force number of memory ranks (don't detect)\n" @@ -1016,15 +1702,33 @@ eal_common_usage(void) " [NOTE: PCI whitelist cannot be used with -b option]\n" " --"OPT_VDEV" Add a virtual device.\n" " The argument format is [,key=val,...]\n" - " (ex: --vdev=eth_pcap0,iface=eth2).\n" + " (ex: --vdev=net_pcap0,iface=eth2).\n" + " --"OPT_IOVA_MODE" Set IOVA mode. 'pa' for IOVA_PA\n" + " 'va' for IOVA_VA\n" " -d LIB.so|DIR Add a driver or driver directory\n" " (can be used multiple times)\n" " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" +#ifndef RTE_EXEC_ENV_WINDOWS " --"OPT_SYSLOG" Set syslog facility\n" - " --"OPT_LOG_LEVEL" Set default log level\n" +#endif + " --"OPT_LOG_LEVEL"= Set global log level\n" + " --"OPT_LOG_LEVEL"=:\n" + " Set specific log level\n" + " --"OPT_TRACE"=\n" + " Enable trace based on regular expression trace name.\n" + " By default, the trace is disabled.\n" + " User must specify this option to enable trace.\n" + " --"OPT_TRACE_DIR"=\n" + " Specify trace directory for trace output.\n" + " By default, trace output will created at\n" + " $HOME directory and parameter must be\n" + " specified once only.\n" " -v Display version information on startup\n" " -h, --help This help\n" + " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n" + " disable secondary process support\n" + " --"OPT_BASE_VIRTADDR" Base virtual address\n" "\nEAL options for DEBUG use only:\n" " --"OPT_HUGE_UNLINK" Unlink hugepage files after init\n" " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n" @@ -1032,4 +1736,5 @@ eal_common_usage(void) " --"OPT_NO_HPET" Disable HPET\n" " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n" "\n", RTE_MAX_LCORE); + rte_option_usage(); }