X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Fcommon%2Feal_common_options.c;h=75974dd5ba03c21a8afe949fc6d19873cb3b461d;hb=5aa2f0db4acf256d1655fa12c3e868b77f6f52ee;hp=00265d6e358a01a0cf51a40ad7cb86508310ec5f;hpb=25175fb2ab27966b7774be80f0caf94a40092ad9;p=dpdk.git diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c index 00265d6e35..75974dd5ba 100644 --- a/lib/librte_eal/common/eal_common_options.c +++ b/lib/librte_eal/common/eal_common_options.c @@ -1,52 +1,31 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 6WIND S.A. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. */ #include #include #include +#ifndef RTE_EXEC_ENV_WINDOWS #include +#endif #include #include #include #include +#ifndef RTE_EXEC_ENV_WINDOWS #include +#endif #include #include #include +#include #include #include #include +#include +#include #include #include #include @@ -54,8 +33,12 @@ #include "eal_internal_cfg.h" #include "eal_options.h" #include "eal_filesystem.h" +#include "eal_private.h" #define BITS_PER_HEX 4 +#define LCORE_OPT_LST 1 +#define LCORE_OPT_MSK 2 +#define LCORE_OPT_MAP 3 const char eal_short_options[] = @@ -65,6 +48,7 @@ eal_short_options[] = "d:" /* driver */ "h" /* help */ "l:" /* corelist */ + "S:" /* service corelist */ "m:" /* memory size */ "n:" /* memory channels */ "r:" /* memory ranks */ @@ -80,22 +64,28 @@ eal_long_options[] = { {OPT_HELP, 0, NULL, OPT_HELP_NUM }, {OPT_HUGE_DIR, 1, NULL, OPT_HUGE_DIR_NUM }, {OPT_HUGE_UNLINK, 0, NULL, OPT_HUGE_UNLINK_NUM }, + {OPT_IOVA_MODE, 1, NULL, OPT_IOVA_MODE_NUM }, {OPT_LCORES, 1, NULL, OPT_LCORES_NUM }, {OPT_LOG_LEVEL, 1, NULL, OPT_LOG_LEVEL_NUM }, {OPT_MASTER_LCORE, 1, NULL, OPT_MASTER_LCORE_NUM }, + {OPT_MBUF_POOL_OPS_NAME, 1, NULL, OPT_MBUF_POOL_OPS_NAME_NUM}, {OPT_NO_HPET, 0, NULL, OPT_NO_HPET_NUM }, {OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM }, {OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM }, {OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM }, + {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM }, {OPT_PCI_BLACKLIST, 1, NULL, OPT_PCI_BLACKLIST_NUM }, {OPT_PCI_WHITELIST, 1, NULL, OPT_PCI_WHITELIST_NUM }, {OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM }, {OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM }, + {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM }, {OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM }, {OPT_VDEV, 1, NULL, OPT_VDEV_NUM }, {OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM }, {OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM }, - {OPT_XEN_DOM0, 0, NULL, OPT_XEN_DOM0_NUM }, + {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM }, + {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM}, + {OPT_MATCH_ALLOCATIONS, 0, NULL, OPT_MATCH_ALLOCATIONS_NUM}, {0, 0, NULL, 0 } }; @@ -124,11 +114,75 @@ static const char *default_solib_dir = RTE_EAL_PMD_PATH; static const char dpdk_solib_path[] __attribute__((used)) = "DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH; +TAILQ_HEAD(device_option_list, device_option); + +struct device_option { + TAILQ_ENTRY(device_option) next; + + enum rte_devtype type; + char arg[]; +}; + +static struct device_option_list devopt_list = +TAILQ_HEAD_INITIALIZER(devopt_list); static int master_lcore_parsed; static int mem_parsed; static int core_parsed; +static int +eal_option_device_add(enum rte_devtype type, const char *optarg) +{ + struct device_option *devopt; + size_t optlen; + int ret; + + optlen = strlen(optarg) + 1; + devopt = calloc(1, sizeof(*devopt) + optlen); + if (devopt == NULL) { + RTE_LOG(ERR, EAL, "Unable to allocate device option\n"); + return -ENOMEM; + } + + devopt->type = type; + ret = strlcpy(devopt->arg, optarg, optlen); + if (ret < 0) { + RTE_LOG(ERR, EAL, "Unable to copy device option\n"); + free(devopt); + return -EINVAL; + } + TAILQ_INSERT_TAIL(&devopt_list, devopt, next); + return 0; +} + +int +eal_option_device_parse(void) +{ + struct device_option *devopt; + void *tmp; + int ret = 0; + + TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) { + if (ret == 0) { + ret = rte_devargs_add(devopt->type, devopt->arg); + if (ret) + RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n", + devopt->arg); + } + TAILQ_REMOVE(&devopt_list, devopt, next); + free(devopt); + } + return ret; +} + +const char * +eal_get_hugefile_prefix(void) +{ + if (internal_config.hugefile_prefix != NULL) + return internal_config.hugefile_prefix; + return HUGEFILE_PREFIX_DEFAULT; +} + void eal_reset_internal_config(struct internal_config *internal_cfg) { @@ -137,20 +191,27 @@ eal_reset_internal_config(struct internal_config *internal_cfg) internal_cfg->memory = 0; internal_cfg->force_nrank = 0; internal_cfg->force_nchannel = 0; - internal_cfg->hugefile_prefix = HUGEFILE_PREFIX_DEFAULT; + internal_cfg->hugefile_prefix = NULL; internal_cfg->hugepage_dir = NULL; internal_cfg->force_sockets = 0; /* zero out the NUMA config */ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) internal_cfg->socket_mem[i] = 0; + internal_cfg->force_socket_limits = 0; + /* zero out the NUMA limits config */ + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) + internal_cfg->socket_limit[i] = 0; /* zero out hugedir descriptors */ - for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) + for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) { + memset(&internal_cfg->hugepage_info[i], 0, + sizeof(internal_cfg->hugepage_info[0])); internal_cfg->hugepage_info[i].lock_descriptor = -1; + } internal_cfg->base_virtaddr = 0; +#ifdef LOG_DAEMON internal_cfg->syslog_facility = LOG_DAEMON; - - internal_cfg->xen_dom0_support = 0; +#endif /* if set to NONE, interrupt mode is determined automatically */ internal_cfg->vfio_intr_mode = RTE_INTR_MODE_NONE; @@ -162,6 +223,10 @@ eal_reset_internal_config(struct internal_config *internal_cfg) #endif internal_cfg->vmware_tsc_map = 0; internal_cfg->create_uio_dev = 0; + internal_cfg->iova_mode = RTE_IOVA_DC; + internal_cfg->user_mbuf_pool_ops_name = NULL; + CPU_ZERO(&internal_cfg->ctrl_cpuset); + internal_cfg->init_complete = 0; } static int @@ -175,7 +240,7 @@ eal_plugin_add(const char *path) return -1; } memset(solib, 0, sizeof(*solib)); - strncpy(solib->name, path, PATH_MAX-1); + strlcpy(solib->name, path, PATH_MAX-1); solib->name[PATH_MAX-1] = 0; TAILQ_INSERT_TAIL(&solib_list, solib, next); @@ -202,8 +267,7 @@ eal_plugindir_init(const char *path) while ((dent = readdir(d)) != NULL) { struct stat sb; - snprintf(sopath, PATH_MAX-1, "%s/%s", path, dent->d_name); - sopath[PATH_MAX-1] = 0; + snprintf(sopath, sizeof(sopath), "%s/%s", path, dent->d_name); if (!(stat(sopath, &sb) == 0 && S_ISREG(sb.st_mode))) continue; @@ -220,13 +284,15 @@ eal_plugindir_init(const char *path) int eal_plugins_init(void) { +#ifndef RTE_EXEC_ENV_WINDOWS struct shared_driver *solib = NULL; + struct stat sb; - if (*default_solib_dir != '\0') + if (*default_solib_dir != '\0' && stat(default_solib_dir, &sb) == 0 && + S_ISDIR(sb.st_mode)) eal_plugin_add(default_solib_dir); TAILQ_FOREACH(solib, &solib_list, next) { - struct stat sb; if (stat(solib->name, &sb) == 0 && S_ISDIR(sb.st_mode)) { if (eal_plugindir_init(solib->name) == -1) { @@ -247,6 +313,7 @@ eal_plugins_init(void) } return 0; +#endif } /* @@ -275,6 +342,7 @@ eal_parse_service_coremask(const char *coremask) unsigned int count = 0; char c; int val; + uint32_t taken_lcore_count = 0; if (coremask == NULL) return -1; @@ -308,16 +376,20 @@ eal_parse_service_coremask(const char *coremask) if (master_lcore_parsed && cfg->master_lcore == lcore) { RTE_LOG(ERR, EAL, - "Error: lcore %u is master lcore, cannot use as service core\n", + "lcore %u is master lcore, cannot use as service core\n", idx); return -1; } - if (!lcore_config[idx].detected) { + if (eal_cpu_detected(idx) == 0) { RTE_LOG(ERR, EAL, "lcore %u unavailable\n", idx); return -1; } + + if (cfg->lcore_role[idx] == ROLE_RTE) + taken_lcore_count++; + lcore_config[idx].core_role = ROLE_SERVICE; count++; } @@ -334,21 +406,66 @@ eal_parse_service_coremask(const char *coremask) if (count == 0) return -1; + if (core_parsed && taken_lcore_count != count) { + RTE_LOG(WARNING, EAL, + "Not all service cores are in the coremask. " + "Please ensure -c or -l includes service cores\n"); + } + cfg->service_lcore_count = count; return 0; } static int -eal_parse_coremask(const char *coremask) +eal_service_cores_parsed(void) +{ + int idx; + for (idx = 0; idx < RTE_MAX_LCORE; idx++) { + if (lcore_config[idx].core_role == ROLE_SERVICE) + return 1; + } + return 0; +} + +static int +update_lcore_config(int *cores) { struct rte_config *cfg = rte_eal_get_configuration(); - int i, j, idx = 0; + unsigned int count = 0; + unsigned int i; + int ret = 0; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (cores[i] != -1) { + if (eal_cpu_detected(i) == 0) { + RTE_LOG(ERR, EAL, "lcore %u unavailable\n", i); + ret = -1; + continue; + } + cfg->lcore_role[i] = ROLE_RTE; + count++; + } else { + cfg->lcore_role[i] = ROLE_OFF; + } + lcore_config[i].core_index = cores[i]; + } + if (!ret) + cfg->lcore_count = count; + return ret; +} + +static int +eal_parse_coremask(const char *coremask, int *cores) +{ unsigned count = 0; - char c; + int i, j, idx; int val; + char c; + + for (idx = 0; idx < RTE_MAX_LCORE; idx++) + cores[idx] = -1; + idx = 0; - if (coremask == NULL) - return -1; /* Remove all blank characters ahead and after . * Remove 0x/0X if exists. */ @@ -373,42 +490,28 @@ eal_parse_coremask(const char *coremask) for (j = 0; j < BITS_PER_HEX && idx < RTE_MAX_LCORE; j++, idx++) { if ((1 << j) & val) { - if (!lcore_config[idx].detected) { - RTE_LOG(ERR, EAL, "lcore %u " - "unavailable\n", idx); - return -1; - } - cfg->lcore_role[idx] = ROLE_RTE; - lcore_config[idx].core_index = count; + cores[idx] = count; count++; - } else { - cfg->lcore_role[idx] = ROLE_OFF; - lcore_config[idx].core_index = -1; } } } for (; i >= 0; i--) if (coremask[i] != '0') return -1; - for (; idx < RTE_MAX_LCORE; idx++) { - cfg->lcore_role[idx] = ROLE_OFF; - lcore_config[idx].core_index = -1; - } if (count == 0) return -1; - /* Update the count of enabled logical cores of the EAL configuration */ - cfg->lcore_count = count; return 0; } static int -eal_parse_corelist(const char *corelist) +eal_parse_service_corelist(const char *corelist) { struct rte_config *cfg = rte_eal_get_configuration(); int i, idx = 0; unsigned count = 0; char *end = NULL; int min, max; + uint32_t taken_lcore_count = 0; if (corelist == NULL) return -1; @@ -420,12 +523,6 @@ eal_parse_corelist(const char *corelist) while ((i > 0) && isblank(corelist[i - 1])) i--; - /* Reset config */ - for (idx = 0; idx < RTE_MAX_LCORE; idx++) { - cfg->lcore_role[idx] = ROLE_OFF; - lcore_config[idx].core_index = -1; - } - /* Get list of cores */ min = RTE_MAX_LCORE; do { @@ -446,9 +543,21 @@ eal_parse_corelist(const char *corelist) if (min == RTE_MAX_LCORE) min = idx; for (idx = min; idx <= max; idx++) { - if (cfg->lcore_role[idx] != ROLE_RTE) { - cfg->lcore_role[idx] = ROLE_RTE; - lcore_config[idx].core_index = count; + if (cfg->lcore_role[idx] != ROLE_SERVICE) { + /* handle master lcore already parsed */ + uint32_t lcore = idx; + if (cfg->master_lcore == lcore && + master_lcore_parsed) { + RTE_LOG(ERR, EAL, + "Error: lcore %u is master lcore, cannot use as service core\n", + idx); + return -1; + } + if (cfg->lcore_role[idx] == ROLE_RTE) + taken_lcore_count++; + + lcore_config[idx].core_role = + ROLE_SERVICE; count++; } } @@ -461,12 +570,68 @@ eal_parse_corelist(const char *corelist) if (count == 0) return -1; - /* Update the count of enabled logical cores of the EAL configuration */ - cfg->lcore_count = count; + if (core_parsed && taken_lcore_count != count) { + RTE_LOG(WARNING, EAL, + "Not all service cores were in the coremask. " + "Please ensure -c or -l includes service cores\n"); + } return 0; } +static int +eal_parse_corelist(const char *corelist, int *cores) +{ + unsigned count = 0; + char *end = NULL; + int min, max; + int idx; + + for (idx = 0; idx < RTE_MAX_LCORE; idx++) + cores[idx] = -1; + + /* Remove all blank characters ahead */ + while (isblank(*corelist)) + corelist++; + + /* Get list of cores */ + min = RTE_MAX_LCORE; + do { + while (isblank(*corelist)) + corelist++; + if (*corelist == '\0') + return -1; + errno = 0; + idx = strtol(corelist, &end, 10); + if (errno || end == NULL) + return -1; + if (idx < 0 || idx >= RTE_MAX_LCORE) + return -1; + while (isblank(*end)) + end++; + if (*end == '-') { + min = idx; + } else if ((*end == ',') || (*end == '\0')) { + max = idx; + if (min == RTE_MAX_LCORE) + min = idx; + for (idx = min; idx <= max; idx++) { + if (cores[idx] == -1) { + cores[idx] = count; + count++; + } + } + min = RTE_MAX_LCORE; + } else + return -1; + corelist = end + 1; + } while (*end != '\0'); + + if (count == 0) + return -1; + return 0; +} + /* Changes the lcore id of the master thread */ static int eal_parse_master_lcore(const char *arg) @@ -484,7 +649,8 @@ eal_parse_master_lcore(const char *arg) /* ensure master core is not used as service core */ if (lcore_config[cfg->master_lcore].core_role == ROLE_SERVICE) { - RTE_LOG(ERR, EAL, "Error: Master lcore is used as a service core.\n"); + RTE_LOG(ERR, EAL, + "Error: Master lcore is used as a service core\n"); return -1; } @@ -500,14 +666,14 @@ eal_parse_master_lcore(const char *arg) * ',' used for a single number. */ static int -eal_parse_set(const char *input, uint16_t set[], unsigned num) +eal_parse_set(const char *input, rte_cpuset_t *set) { unsigned idx; const char *str = input; char *end = NULL; unsigned min, max; - memset(set, 0, num * sizeof(uint16_t)); + CPU_ZERO(set); while (isblank(*str)) str++; @@ -520,7 +686,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) if (*str != '(') { errno = 0; idx = strtoul(str, &end, 10); - if (errno || end == NULL || idx >= num) + if (errno || end == NULL || idx >= CPU_SETSIZE) return -1; else { while (isblank(*end)) @@ -538,7 +704,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) errno = 0; idx = strtoul(end, &end, 10); - if (errno || end == NULL || idx >= num) + if (errno || end == NULL || idx >= CPU_SETSIZE) return -1; max = idx; while (isblank(*end)) @@ -553,7 +719,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) for (idx = RTE_MIN(min, max); idx <= RTE_MAX(min, max); idx++) - set[idx] = 1; + CPU_SET(idx, set); return end - input; } @@ -578,7 +744,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) /* get the digit value */ errno = 0; idx = strtoul(str, &end, 10); - if (errno || end == NULL || idx >= num) + if (errno || end == NULL || idx >= CPU_SETSIZE) return -1; /* go ahead to separator '-',',' and ')' */ @@ -595,7 +761,7 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) min = idx; for (idx = RTE_MIN(min, max); idx <= RTE_MAX(min, max); idx++) - set[idx] = 1; + CPU_SET(idx, set); min = RTE_MAX_LCORE; } else @@ -614,28 +780,21 @@ eal_parse_set(const char *input, uint16_t set[], unsigned num) return str - input; } -/* convert from set array to cpuset bitmap */ static int -convert_to_cpuset(rte_cpuset_t *cpusetp, - uint16_t *set, unsigned num) +check_cpuset(rte_cpuset_t *set) { - unsigned idx; + unsigned int idx; - CPU_ZERO(cpusetp); - - for (idx = 0; idx < num; idx++) { - if (!set[idx]) + for (idx = 0; idx < CPU_SETSIZE; idx++) { + if (!CPU_ISSET(idx, set)) continue; - if (!lcore_config[idx].detected) { + if (eal_cpu_detected(idx) == 0) { RTE_LOG(ERR, EAL, "core %u " "unavailable\n", idx); return -1; } - - CPU_SET(idx, cpusetp); } - return 0; } @@ -657,7 +816,8 @@ static int eal_parse_lcores(const char *lcores) { struct rte_config *cfg = rte_eal_get_configuration(); - static uint16_t set[RTE_MAX_LCORE]; + rte_cpuset_t lcore_set; + unsigned int set_count; unsigned idx = 0; unsigned count = 0; const char *lcore_start = NULL; @@ -706,18 +866,13 @@ eal_parse_lcores(const char *lcores) lcores += strcspn(lcores, "@,"); if (*lcores == '@') { - /* explicit assign cpu_set */ - offset = eal_parse_set(lcores + 1, set, RTE_DIM(set)); + /* explicit assign cpuset and update the end cursor */ + offset = eal_parse_set(lcores + 1, &cpuset); if (offset < 0) goto err; - - /* prepare cpu_set and update the end cursor */ - if (0 > convert_to_cpuset(&cpuset, - set, RTE_DIM(set))) - goto err; end = lcores + 1 + offset; } else { /* ',' or '\0' */ - /* haven't given cpu_set, current loop done */ + /* haven't given cpuset, current loop done */ end = lcores; /* go back to check - */ @@ -731,18 +886,19 @@ eal_parse_lcores(const char *lcores) goto err; /* parse lcore_set from start point */ - if (0 > eal_parse_set(lcore_start, set, RTE_DIM(set))) + if (eal_parse_set(lcore_start, &lcore_set) < 0) goto err; - /* without '@', by default using lcore_set as cpu_set */ - if (*lcores != '@' && - 0 > convert_to_cpuset(&cpuset, set, RTE_DIM(set))) - goto err; + /* without '@', by default using lcore_set as cpuset */ + if (*lcores != '@') + rte_memcpy(&cpuset, &lcore_set, sizeof(cpuset)); + set_count = CPU_COUNT(&lcore_set); /* start to update lcore_set */ for (idx = 0; idx < RTE_MAX_LCORE; idx++) { - if (!set[idx]) + if (!CPU_ISSET(idx, &lcore_set)) continue; + set_count--; if (cfg->lcore_role[idx] != ROLE_RTE) { lcore_config[idx].core_index = count; @@ -754,10 +910,17 @@ eal_parse_lcores(const char *lcores) CPU_ZERO(&cpuset); CPU_SET(idx, &cpuset); } + + if (check_cpuset(&cpuset) < 0) + goto err; rte_memcpy(&lcore_config[idx].cpuset, &cpuset, sizeof(rte_cpuset_t)); } + /* some cores from the lcore_set can't be handled by EAL */ + if (set_count != 0) + goto err; + lcores = end + 1; } while (*end != '\0'); @@ -772,11 +935,12 @@ err: return ret; } +#ifndef RTE_EXEC_ENV_WINDOWS static int eal_parse_syslog(const char *facility, struct internal_config *conf) { int i; - static struct { + static const struct { const char *name; int value; } map[] = { @@ -810,45 +974,95 @@ eal_parse_syslog(const char *facility, struct internal_config *conf) } return -1; } +#endif static int -eal_parse_log_level(const char *arg) +eal_parse_log_priority(const char *level) { - char *end, *str, *type, *level; + static const char * const levels[] = { + [RTE_LOG_EMERG] = "emergency", + [RTE_LOG_ALERT] = "alert", + [RTE_LOG_CRIT] = "critical", + [RTE_LOG_ERR] = "error", + [RTE_LOG_WARNING] = "warning", + [RTE_LOG_NOTICE] = "notice", + [RTE_LOG_INFO] = "info", + [RTE_LOG_DEBUG] = "debug", + }; + size_t len = strlen(level); unsigned long tmp; + char *end; + unsigned int i; - str = strdup(arg); - if (str == NULL) + if (len == 0) return -1; - if (strchr(str, ',') == NULL) { - type = NULL; - level = str; - } else { - type = strsep(&str, ","); - level = strsep(&str, ","); + /* look for named values, skip 0 which is not a valid level */ + for (i = 1; i < RTE_DIM(levels); i++) { + if (strncmp(levels[i], level, len) == 0) + return i; } + /* not a string, maybe it is numeric */ errno = 0; tmp = strtoul(level, &end, 0); /* check for errors */ - if ((errno != 0) || (level[0] == '\0') || - end == NULL || (*end != '\0')) - goto fail; + if (errno != 0 || end == NULL || *end != '\0' || + tmp >= UINT32_MAX) + return -1; - /* log_level is a uint32_t */ - if (tmp >= UINT32_MAX) - goto fail; + return tmp; +} + +static int +eal_parse_log_level(const char *arg) +{ + const char *pattern = NULL; + const char *regex = NULL; + char *str, *level; + int priority; + + str = strdup(arg); + if (str == NULL) + return -1; + + if ((level = strchr(str, ','))) { + regex = str; + *level++ = '\0'; + } else if ((level = strchr(str, ':'))) { + pattern = str; + *level++ = '\0'; + } else { + level = str; + } - if (type == NULL) { - rte_log_set_global_level(tmp); - } else if (rte_log_set_level_regexp(type, tmp) < 0) { - printf("cannot set log level %s,%lu\n", - type, tmp); + priority = eal_parse_log_priority(level); + if (priority < 0) { + fprintf(stderr, "invalid log priority: %s\n", level); goto fail; } + if (regex) { + if (rte_log_set_level_regexp(regex, priority) < 0) { + fprintf(stderr, "cannot set log level %s,%d\n", + pattern, priority); + goto fail; + } + if (rte_log_save_regexp(regex, priority) < 0) + goto fail; + } else if (pattern) { + if (rte_log_set_level_pattern(pattern, priority) < 0) { + fprintf(stderr, "cannot set log level %s:%d\n", + pattern, priority); + goto fail; + } + if (rte_log_save_pattern(pattern, priority) < 0) + goto fail; + } else { + rte_log_set_global_level(priority); + } + free(str); return 0; @@ -870,41 +1084,217 @@ eal_parse_proc_type(const char *arg) return RTE_PROC_INVALID; } +static int +eal_parse_iova_mode(const char *name) +{ + int mode; + + if (name == NULL) + return -1; + + if (!strcmp("pa", name)) + mode = RTE_IOVA_PA; + else if (!strcmp("va", name)) + mode = RTE_IOVA_VA; + else + return -1; + + internal_config.iova_mode = mode; + return 0; +} + +static int +eal_parse_base_virtaddr(const char *arg) +{ + char *end; + uint64_t addr; + + errno = 0; + addr = strtoull(arg, &end, 16); + + /* check for errors */ + if ((errno != 0) || (arg[0] == '\0') || end == NULL || (*end != '\0')) + return -1; + + /* make sure we don't exceed 32-bit boundary on 32-bit target */ +#ifndef RTE_ARCH_64 + if (addr >= UINTPTR_MAX) + return -1; +#endif + + /* align the addr on 16M boundary, 16MB is the minimum huge page + * size on IBM Power architecture. If the addr is aligned to 16MB, + * it can align to 2MB for x86. So this alignment can also be used + * on x86 and other architectures. + */ + internal_config.base_virtaddr = + RTE_PTR_ALIGN_CEIL((uintptr_t)addr, (size_t)RTE_PGSIZE_16M); + + return 0; +} + +/* caller is responsible for freeing the returned string */ +static char * +available_cores(void) +{ + char *str = NULL; + int previous; + int sequence; + char *tmp; + int idx; + + /* find the first available cpu */ + for (idx = 0; idx < RTE_MAX_LCORE; idx++) { + if (eal_cpu_detected(idx) == 0) + continue; + break; + } + if (idx >= RTE_MAX_LCORE) + return NULL; + + /* first sequence */ + if (asprintf(&str, "%d", idx) < 0) + return NULL; + previous = idx; + sequence = 0; + + for (idx++ ; idx < RTE_MAX_LCORE; idx++) { + if (eal_cpu_detected(idx) == 0) + continue; + + if (idx == previous + 1) { + previous = idx; + sequence = 1; + continue; + } + + /* finish current sequence */ + if (sequence) { + if (asprintf(&tmp, "%s-%d", str, previous) < 0) { + free(str); + return NULL; + } + free(str); + str = tmp; + } + + /* new sequence */ + if (asprintf(&tmp, "%s,%d", str, idx) < 0) { + free(str); + return NULL; + } + free(str); + str = tmp; + previous = idx; + sequence = 0; + } + + /* finish last sequence */ + if (sequence) { + if (asprintf(&tmp, "%s-%d", str, previous) < 0) { + free(str); + return NULL; + } + free(str); + str = tmp; + } + + return str; +} + int eal_parse_common_option(int opt, const char *optarg, struct internal_config *conf) { + static int b_used; + static int w_used; + switch (opt) { /* blacklist */ case 'b': - if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED, + if (w_used) + goto bw_used; + if (eal_option_device_add(RTE_DEVTYPE_BLACKLISTED_PCI, optarg) < 0) { return -1; } + b_used = 1; break; /* whitelist */ case 'w': - if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED, + if (b_used) + goto bw_used; + if (eal_option_device_add(RTE_DEVTYPE_WHITELISTED_PCI, optarg) < 0) { return -1; } + w_used = 1; break; /* coremask */ - case 'c': - if (eal_parse_coremask(optarg) < 0) { - RTE_LOG(ERR, EAL, "invalid coremask\n"); + case 'c': { + int lcore_indexes[RTE_MAX_LCORE]; + + if (eal_service_cores_parsed()) + RTE_LOG(WARNING, EAL, + "Service cores parsed before dataplane cores. Please ensure -c is before -s or -S\n"); + if (eal_parse_coremask(optarg, lcore_indexes) < 0) { + RTE_LOG(ERR, EAL, "invalid coremask syntax\n"); + return -1; + } + if (update_lcore_config(lcore_indexes) < 0) { + char *available = available_cores(); + + RTE_LOG(ERR, EAL, + "invalid coremask, please check specified cores are part of %s\n", + available); + free(available); return -1; } - core_parsed = 1; + + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -c is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : + (core_parsed == LCORE_OPT_MAP) ? "--lcore" : + "-c"); + return -1; + } + + core_parsed = LCORE_OPT_MSK; break; + } /* corelist */ - case 'l': - if (eal_parse_corelist(optarg) < 0) { - RTE_LOG(ERR, EAL, "invalid core list\n"); + case 'l': { + int lcore_indexes[RTE_MAX_LCORE]; + + if (eal_service_cores_parsed()) + RTE_LOG(WARNING, EAL, + "Service cores parsed before dataplane cores. Please ensure -l is before -s or -S\n"); + + if (eal_parse_corelist(optarg, lcore_indexes) < 0) { + RTE_LOG(ERR, EAL, "invalid core list syntax\n"); + return -1; + } + if (update_lcore_config(lcore_indexes) < 0) { + char *available = available_cores(); + + RTE_LOG(ERR, EAL, + "invalid core list, please check specified cores are part of %s\n", + available); + free(available); return -1; } - core_parsed = 1; + + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option -l is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_MSK) ? "-c" : + (core_parsed == LCORE_OPT_MAP) ? "--lcore" : + "-l"); + return -1; + } + + core_parsed = LCORE_OPT_LST; break; + } /* service coremask */ case 's': if (eal_parse_service_coremask(optarg) < 0) { @@ -912,6 +1302,13 @@ eal_parse_common_option(int opt, const char *optarg, return -1; } break; + /* service corelist */ + case 'S': + if (eal_parse_service_corelist(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid service core list\n"); + return -1; + } + break; /* size of memory */ case 'm': conf->memory = atoi(optarg); @@ -956,6 +1353,8 @@ eal_parse_common_option(int opt, const char *optarg, case OPT_NO_HUGE_NUM: conf->no_hugetlbfs = 1; + /* no-huge is legacy mem */ + conf->legacy_mem = 1; break; case OPT_NO_PCI_NUM: @@ -974,6 +1373,13 @@ eal_parse_common_option(int opt, const char *optarg, conf->no_shconf = 1; break; + case OPT_IN_MEMORY_NUM: + conf->in_memory = 1; + /* in-memory is a superset of noshconf and huge-unlink */ + conf->no_shconf = 1; + conf->hugepage_unlink = 1; + break; + case OPT_PROC_TYPE_NUM: conf->process_type = eal_parse_proc_type(optarg); break; @@ -987,12 +1393,13 @@ eal_parse_common_option(int opt, const char *optarg, break; case OPT_VDEV_NUM: - if (rte_eal_devargs_add(RTE_DEVTYPE_UNDEFINED, + if (eal_option_device_add(RTE_DEVTYPE_VIRTUAL, optarg) < 0) { return -1; } break; +#ifndef RTE_EXEC_ENV_WINDOWS case OPT_SYSLOG_NUM: if (eal_parse_syslog(optarg, conf) < 0) { RTE_LOG(ERR, EAL, "invalid parameters for --" @@ -1000,6 +1407,7 @@ eal_parse_common_option(int opt, const char *optarg, return -1; } break; +#endif case OPT_LOG_LEVEL_NUM: { if (eal_parse_log_level(optarg) < 0) { @@ -1016,7 +1424,36 @@ eal_parse_common_option(int opt, const char *optarg, OPT_LCORES "\n"); return -1; } - core_parsed = 1; + + if (core_parsed) { + RTE_LOG(ERR, EAL, "Option --lcore is ignored, because (%s) is set!\n", + (core_parsed == LCORE_OPT_LST) ? "-l" : + (core_parsed == LCORE_OPT_MSK) ? "-c" : + "--lcore"); + return -1; + } + + core_parsed = LCORE_OPT_MAP; + break; + case OPT_LEGACY_MEM_NUM: + conf->legacy_mem = 1; + break; + case OPT_SINGLE_FILE_SEGMENTS_NUM: + conf->single_file_segments = 1; + break; + case OPT_IOVA_MODE_NUM: + if (eal_parse_iova_mode(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameters for --" + OPT_IOVA_MODE "\n"); + return -1; + } + break; + case OPT_BASE_VIRTADDR_NUM: + if (eal_parse_base_virtaddr(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid parameter for --" + OPT_BASE_VIRTADDR "\n"); + return -1; + } break; /* don't know what to do, leave this to caller */ @@ -1026,6 +1463,10 @@ eal_parse_common_option(int opt, const char *optarg, } return 0; +bw_used: + RTE_LOG(ERR, EAL, "Options blacklist (-b) and whitelist (-w) " + "cannot be used at the same time\n"); + return -1; } static void @@ -1034,10 +1475,9 @@ eal_auto_detect_cores(struct rte_config *cfg) unsigned int lcore_id; unsigned int removed = 0; rte_cpuset_t affinity_set; - pthread_t tid = pthread_self(); - if (pthread_getaffinity_np(tid, sizeof(rte_cpuset_t), - &affinity_set) < 0) + if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), + &affinity_set)) CPU_ZERO(&affinity_set); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { @@ -1051,6 +1491,46 @@ eal_auto_detect_cores(struct rte_config *cfg) cfg->lcore_count -= removed; } +static void +compute_ctrl_threads_cpuset(struct internal_config *internal_cfg) +{ + rte_cpuset_t *cpuset = &internal_cfg->ctrl_cpuset; + rte_cpuset_t default_set; + unsigned int lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_has_role(lcore_id, ROLE_OFF)) + continue; + RTE_CPU_OR(cpuset, cpuset, &lcore_config[lcore_id].cpuset); + } + RTE_CPU_NOT(cpuset, cpuset); + + if (pthread_getaffinity_np(pthread_self(), sizeof(rte_cpuset_t), + &default_set)) + CPU_ZERO(&default_set); + + RTE_CPU_AND(cpuset, cpuset, &default_set); + + /* if no remaining cpu, use master lcore cpu affinity */ + if (!CPU_COUNT(cpuset)) { + memcpy(cpuset, &lcore_config[rte_get_master_lcore()].cpuset, + sizeof(*cpuset)); + } +} + +int +eal_cleanup_config(struct internal_config *internal_cfg) +{ + if (internal_cfg->hugefile_prefix != NULL) + free(internal_cfg->hugefile_prefix); + if (internal_cfg->hugepage_dir != NULL) + free(internal_cfg->hugepage_dir); + if (internal_cfg->user_mbuf_pool_ops_name != NULL) + free(internal_cfg->user_mbuf_pool_ops_name); + + return 0; +} + int eal_adjust_config(struct internal_config *internal_cfg) { @@ -1066,9 +1546,13 @@ eal_adjust_config(struct internal_config *internal_cfg) /* default master lcore is the first one */ if (!master_lcore_parsed) { cfg->master_lcore = rte_get_next_lcore(-1, 0, 0); + if (cfg->master_lcore >= RTE_MAX_LCORE) + return -1; lcore_config[cfg->master_lcore].core_role = ROLE_RTE; } + compute_ctrl_threads_cpuset(internal_cfg); + /* if no memory amounts were requested, this will result in 0 and * will be overridden later, right after eal_hugepage_info_init() */ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) @@ -1091,7 +1575,22 @@ eal_check_common_options(struct internal_config *internal_cfg) RTE_LOG(ERR, EAL, "Invalid process type specified\n"); return -1; } - if (index(internal_cfg->hugefile_prefix, '%') != NULL) { + if (internal_cfg->hugefile_prefix != NULL && + strlen(internal_cfg->hugefile_prefix) < 1) { + RTE_LOG(ERR, EAL, "Invalid length of --" OPT_FILE_PREFIX " option\n"); + return -1; + } + if (internal_cfg->hugepage_dir != NULL && + strlen(internal_cfg->hugepage_dir) < 1) { + RTE_LOG(ERR, EAL, "Invalid length of --" OPT_HUGE_DIR" option\n"); + return -1; + } + if (internal_cfg->user_mbuf_pool_ops_name != NULL && + strlen(internal_cfg->user_mbuf_pool_ops_name) < 1) { + RTE_LOG(ERR, EAL, "Invalid length of --" OPT_MBUF_POOL_OPS_NAME" option\n"); + return -1; + } + if (index(eal_get_hugefile_prefix(), '%') != NULL) { RTE_LOG(ERR, EAL, "Invalid char, '%%', in --"OPT_FILE_PREFIX" " "option\n"); return -1; @@ -1106,12 +1605,44 @@ eal_check_common_options(struct internal_config *internal_cfg) "be specified together with --"OPT_NO_HUGE"\n"); return -1; } - - if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink) { + if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink && + !internal_cfg->in_memory) { RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot " "be specified together with --"OPT_NO_HUGE"\n"); return -1; } + if (internal_config.force_socket_limits && internal_config.legacy_mem) { + RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT + " is only supported in non-legacy memory mode\n"); + } + if (internal_cfg->single_file_segments && + internal_cfg->hugepage_unlink && + !internal_cfg->in_memory) { + RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is " + "not compatible with --"OPT_HUGE_UNLINK"\n"); + return -1; + } + if (internal_cfg->legacy_mem && + internal_cfg->in_memory) { + RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " + "with --"OPT_IN_MEMORY"\n"); + return -1; + } + if (internal_cfg->legacy_mem && internal_cfg->match_allocations) { + RTE_LOG(ERR, EAL, "Option --"OPT_LEGACY_MEM" is not compatible " + "with --"OPT_MATCH_ALLOCATIONS"\n"); + return -1; + } + if (internal_cfg->no_hugetlbfs && internal_cfg->match_allocations) { + RTE_LOG(ERR, EAL, "Option --"OPT_NO_HUGE" is not compatible " + "with --"OPT_MATCH_ALLOCATIONS"\n"); + return -1; + } + if (internal_cfg->legacy_mem && internal_cfg->memory == 0) { + RTE_LOG(NOTICE, EAL, "Static memory layout is selected, " + "amount of reserved memory can be adjusted with " + "-m or --"OPT_SOCKET_MEM"\n"); + } return 0; } @@ -1135,6 +1666,7 @@ eal_common_usage(void) " '@' can be omitted if cpus and lcores have the same value\n" " -s SERVICE COREMASK Hexadecimal bitmask of cores to be used as service cores\n" " --"OPT_MASTER_LCORE" ID Core ID that is used as master\n" + " --"OPT_MBUF_POOL_OPS_NAME" Pool ops name for mbuf to use\n" " -n CHANNELS Number of memory channels\n" " -m MB Memory to allocate (see also --"OPT_SOCKET_MEM")\n" " -r RANKS Force number of memory ranks (don't detect)\n" @@ -1149,16 +1681,23 @@ eal_common_usage(void) " --"OPT_VDEV" Add a virtual device.\n" " The argument format is [,key=val,...]\n" " (ex: --vdev=net_pcap0,iface=eth2).\n" + " --"OPT_IOVA_MODE" Set IOVA mode. 'pa' for IOVA_PA\n" + " 'va' for IOVA_VA\n" " -d LIB.so|DIR Add a driver or driver directory\n" " (can be used multiple times)\n" " --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n" " --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n" +#ifndef RTE_EXEC_ENV_WINDOWS " --"OPT_SYSLOG" Set syslog facility\n" +#endif " --"OPT_LOG_LEVEL"= Set global log level\n" - " --"OPT_LOG_LEVEL"=,\n" + " --"OPT_LOG_LEVEL"=:\n" " Set specific log level\n" " -v Display version information on startup\n" " -h, --help This help\n" + " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n" + " disable secondary process support\n" + " --"OPT_BASE_VIRTADDR" Base virtual address\n" "\nEAL options for DEBUG use only:\n" " --"OPT_HUGE_UNLINK" Unlink hugepage files after init\n" " --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n" @@ -1166,4 +1705,5 @@ eal_common_usage(void) " --"OPT_NO_HPET" Disable HPET\n" " --"OPT_NO_SHCONF" No shared config (mmap'd files)\n" "\n", RTE_MAX_LCORE); + rte_option_usage(); }