1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
12 #include <rte_debug.h>
14 #include <eal_memcfg.h>
15 #include <rte_errno.h>
16 #include <rte_lcore.h>
17 #include <eal_thread.h>
18 #include <eal_internal_cfg.h>
19 #include <eal_filesystem.h>
20 #include <eal_options.h>
21 #include <eal_private.h>
22 #include <rte_service_component.h>
25 #include "eal_hugepages.h"
26 #include "eal_trace.h"
28 #include "eal_windows.h"
30 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
32 /* define fd variable here, because file needs to be kept open for the
33 * duration of the program, as we hold a write lock on it in the primary proc
35 static int mem_cfg_fd = -1;
37 /* internal configuration (per-core) */
38 struct lcore_config lcore_config[RTE_MAX_LCORE];
40 /* Detect if we are a primary or a secondary process */
42 eal_proc_type_detect(void)
44 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
45 const char *pathname = eal_runtime_config_path();
46 const struct rte_config *config = rte_eal_get_configuration();
48 /* if we can open the file but not get a write-lock we are a secondary
49 * process. NOTE: if we get a file handle back, we keep that open
50 * and don't close it to prevent a race condition between multiple opens
52 errno_t err = _sopen_s(&mem_cfg_fd, pathname,
53 _O_RDWR, _SH_DENYNO, _S_IREAD | _S_IWRITE);
55 OVERLAPPED soverlapped = { 0 };
56 soverlapped.Offset = sizeof(*config->mem_config);
57 soverlapped.OffsetHigh = 0;
59 HANDLE hwinfilehandle = (HANDLE)_get_osfhandle(mem_cfg_fd);
61 if (!LockFileEx(hwinfilehandle,
62 LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0,
63 sizeof(*config->mem_config), 0, &soverlapped))
64 ptype = RTE_PROC_SECONDARY;
67 RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
68 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
81 eal_usage(const char *prgname)
83 rte_usage_hook_t hook = eal_get_application_usage_hook();
85 printf("\nUsage: %s ", prgname);
87 /* Allow the application to print its usage message too
91 printf("===== Application Usage =====\n\n");
96 /* Parse the arguments for --log-level only */
98 eal_log_level_parse(int argc, char **argv)
103 struct internal_config *internal_conf =
104 eal_get_internal_configuration();
108 eal_reset_internal_config(internal_conf);
110 while ((opt = getopt_long(argc, argvopt, eal_short_options,
111 eal_long_options, &option_index)) != EOF) {
115 /* getopt is not happy, stop right now */
119 ret = (opt == OPT_LOG_LEVEL_NUM) ?
120 eal_parse_common_option(opt, optarg,
123 /* common parser is not happy */
128 optind = 0; /* reset getopt lib */
131 /* Parse the argument given in the command line of the application */
133 eal_parse_args(int argc, char **argv)
138 char *prgname = argv[0];
139 struct internal_config *internal_conf =
140 eal_get_internal_configuration();
144 while ((opt = getopt_long(argc, argvopt, eal_short_options,
145 eal_long_options, &option_index)) != EOF) {
149 /* getopt is not happy, stop right now */
155 /* eal_log_level_parse() already handled this option */
156 if (opt == OPT_LOG_LEVEL_NUM)
159 ret = eal_parse_common_option(opt, optarg, internal_conf);
160 /* common parser is not happy */
165 /* common parser handled this option */
174 if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
175 RTE_LOG(ERR, EAL, "Option %c is not supported "
176 "on Windows\n", opt);
177 } else if (opt >= OPT_LONG_MIN_NUM &&
178 opt < OPT_LONG_MAX_NUM) {
179 RTE_LOG(ERR, EAL, "Option %s is not supported "
181 eal_long_options[option_index].name);
183 RTE_LOG(ERR, EAL, "Option %d is not supported "
184 "on Windows\n", opt);
191 if (eal_adjust_config(internal_conf) != 0)
195 if (eal_check_common_options(internal_conf) != 0) {
201 argv[optind - 1] = prgname;
203 optind = 0; /* reset getopt lib */
208 sync_func(void *arg __rte_unused)
214 rte_eal_init_alert(const char *msg)
216 fprintf(stderr, "EAL: FATAL: %s\n", msg);
217 RTE_LOG(ERR, EAL, "%s\n", msg);
220 /* Stubs to enable EAL trace point compilation
221 * until eal_common_trace.c can be compiled.
224 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
225 RTE_DEFINE_PER_LCORE(void *, trace_mem);
228 __rte_trace_mem_per_thread_alloc(void)
233 trace_mem_per_thread_free(void)
238 __rte_trace_point_emit_field(size_t sz, const char *field,
247 __rte_trace_point_register(rte_trace_point_t *trace, const char *name,
248 void (*register_fn)(void))
252 RTE_SET_USED(register_fn);
257 rte_eal_cleanup(void)
259 struct internal_config *internal_conf =
260 eal_get_internal_configuration();
262 eal_intr_thread_cancel();
263 /* after this point, any DPDK pointers will become dangling */
264 rte_eal_memory_detach();
265 eal_cleanup_config(internal_conf);
269 /* Launch threads, called at application init(). */
271 rte_eal_init(int argc, char **argv)
273 int i, fctret, bscan;
274 const struct rte_config *config = rte_eal_get_configuration();
275 struct internal_config *internal_conf =
276 eal_get_internal_configuration();
279 eal_log_init(NULL, 0);
281 eal_log_level_parse(argc, argv);
283 if (eal_create_cpu_map() < 0) {
284 rte_eal_init_alert("Cannot discover CPU and NUMA.");
285 /* rte_errno is set */
289 if (rte_eal_cpu_init() < 0) {
290 rte_eal_init_alert("Cannot detect lcores.");
295 fctret = eal_parse_args(argc, argv);
299 if (eal_option_device_parse()) {
304 /* Prevent creation of shared memory files. */
305 if (internal_conf->in_memory == 0) {
306 RTE_LOG(WARNING, EAL, "Multi-process support is requested, "
307 "but not available.\n");
308 internal_conf->in_memory = 1;
309 internal_conf->no_shconf = 1;
312 if (!internal_conf->no_hugetlbfs && (eal_hugepage_info_init() < 0)) {
313 rte_eal_init_alert("Cannot get hugepage information");
318 if (internal_conf->memory == 0 && !internal_conf->force_sockets) {
319 if (internal_conf->no_hugetlbfs)
320 internal_conf->memory = MEMSIZE_IF_NO_HUGE_PAGE;
323 if (eal_mem_win32api_init() < 0) {
324 rte_eal_init_alert("Cannot access Win32 memory management");
329 if (eal_mem_virt2iova_init() < 0) {
330 /* Non-fatal error if physical addresses are not required. */
331 RTE_LOG(WARNING, EAL, "Cannot access virt2phys driver, "
332 "PA will not be available\n");
335 if (rte_eal_memzone_init() < 0) {
336 rte_eal_init_alert("Cannot init memzone");
341 if (rte_eal_memory_init() < 0) {
342 rte_eal_init_alert("Cannot init memory");
347 if (rte_eal_malloc_heap_init() < 0) {
348 rte_eal_init_alert("Cannot init malloc heap");
353 if (rte_eal_tailqs_init() < 0) {
354 rte_eal_init_alert("Cannot init tail queues for objects");
359 if (rte_eal_intr_init() < 0) {
360 rte_eal_init_alert("Cannot init interrupt-handling thread");
364 if (rte_eal_timer_init() < 0) {
365 rte_eal_init_alert("Cannot init TSC timer");
370 __rte_thread_init(config->main_lcore,
371 &lcore_config[config->main_lcore].cpuset);
373 bscan = rte_bus_scan();
375 rte_eal_init_alert("Cannot init PCI");
380 RTE_LCORE_FOREACH_WORKER(i) {
383 * create communication pipes between main thread
386 if (_pipe(lcore_config[i].pipe_main2worker,
387 sizeof(char), _O_BINARY) < 0)
388 rte_panic("Cannot create pipe\n");
389 if (_pipe(lcore_config[i].pipe_worker2main,
390 sizeof(char), _O_BINARY) < 0)
391 rte_panic("Cannot create pipe\n");
393 lcore_config[i].state = WAIT;
395 /* create a thread for each lcore */
396 if (eal_thread_create(&lcore_config[i].thread_id) != 0)
397 rte_panic("Cannot create thread\n");
400 /* Initialize services so drivers can register services during probe. */
401 ret = rte_service_init();
403 rte_eal_init_alert("rte_service_init() failed");
408 if (rte_bus_probe()) {
409 rte_eal_init_alert("Cannot probe devices");
415 * Launch a dummy function on all worker lcores, so that main lcore
416 * knows they are all ready when this function returns.
418 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MAIN);
419 rte_eal_mp_wait_lcore();
423 /* Don't use MinGW asprintf() to have identical code with all toolchains. */
425 eal_asprintf(char **buffer, const char *format, ...)
430 va_start(arg, format);
431 size = vsnprintf(NULL, 0, format, arg);
437 *buffer = malloc(size);
441 va_start(arg, format);
442 ret = vsnprintf(*buffer, size, format, arg);
444 if (ret != size - 1) {
452 rte_vfio_container_dma_map(__rte_unused int container_fd,
453 __rte_unused uint64_t vaddr,
454 __rte_unused uint64_t iova,
455 __rte_unused uint64_t len)
461 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
462 __rte_unused uint64_t vaddr,
463 __rte_unused uint64_t iova,
464 __rte_unused uint64_t len)