4 * Copyright(c) 2010-2018 Intel Corporation. All rights reserved.
5 * Copyright(c) 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <sys/queue.h>
51 #include <rte_compat.h>
52 #include <rte_common.h>
53 #include <rte_debug.h>
54 #include <rte_memory.h>
55 #include <rte_launch.h>
57 #include <rte_eal_memconfig.h>
58 #include <rte_errno.h>
59 #include <rte_per_lcore.h>
60 #include <rte_lcore.h>
61 #include <rte_service_component.h>
63 #include <rte_random.h>
64 #include <rte_cycles.h>
65 #include <rte_string_fns.h>
66 #include <rte_cpuflags.h>
67 #include <rte_interrupts.h>
70 #include <rte_devargs.h>
71 #include <rte_version.h>
72 #include <rte_atomic.h>
73 #include <malloc_heap.h>
75 #include "eal_private.h"
76 #include "eal_thread.h"
77 #include "eal_internal_cfg.h"
78 #include "eal_filesystem.h"
79 #include "eal_hugepages.h"
80 #include "eal_options.h"
82 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
84 /* Allow the application to print its usage message too if set */
85 static rte_usage_hook_t rte_application_usage_hook = NULL;
86 /* early configuration structure, when memory config is not mmapped */
87 static struct rte_mem_config early_mem_config;
89 /* define fd variable here, because file needs to be kept open for the
90 * duration of the program, as we hold a write lock on it in the primary proc */
91 static int mem_cfg_fd = -1;
93 static struct flock wr_lock = {
96 .l_start = offsetof(struct rte_mem_config, memseg),
97 .l_len = sizeof(early_mem_config.memseg),
100 /* Address of global and public configuration */
101 static struct rte_config rte_config = {
102 .mem_config = &early_mem_config,
105 /* internal configuration (per-core) */
106 struct lcore_config lcore_config[RTE_MAX_LCORE];
108 /* internal configuration */
109 struct internal_config internal_config;
111 /* used by rte_rdtsc() */
112 int rte_cycles_vmware_tsc_map;
114 /* Return mbuf pool ops name */
116 rte_eal_mbuf_default_mempool_ops(void)
118 if (internal_config.user_mbuf_pool_ops_name == NULL)
119 return RTE_MBUF_DEFAULT_MEMPOOL_OPS;
121 return internal_config.user_mbuf_pool_ops_name;
124 /* Return a pointer to the configuration structure */
126 rte_eal_get_configuration(void)
132 rte_eal_iova_mode(void)
134 return rte_eal_get_configuration()->iova_mode;
137 /* parse a sysfs (or other) file containing one integer value */
139 eal_parse_sysfs_value(const char *filename, unsigned long *val)
145 if ((f = fopen(filename, "r")) == NULL) {
146 RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
151 if (fgets(buf, sizeof(buf), f) == NULL) {
152 RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
157 *val = strtoul(buf, &end, 0);
158 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
159 RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
169 /* create memory configuration in shared/mmap memory. Take out
170 * a write lock on the memsegs, so we can auto-detect primary/secondary.
171 * This means we never close the file while running (auto-close on exit).
172 * We also don't lock the whole file, so that in future we can use read-locks
173 * on other parts, e.g. memzones, to detect if there are running secondary
176 rte_eal_config_create(void)
178 void *rte_mem_cfg_addr;
181 const char *pathname = eal_runtime_config_path();
183 if (internal_config.no_shconf)
187 mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
189 rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
192 retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
195 rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
198 retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
201 rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
202 "process running?\n", pathname);
205 rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
206 PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
208 if (rte_mem_cfg_addr == MAP_FAILED){
209 rte_panic("Cannot mmap memory for rte_config\n");
211 memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
212 rte_config.mem_config = rte_mem_cfg_addr;
215 /* attach to an existing shared memory config */
217 rte_eal_config_attach(void)
219 void *rte_mem_cfg_addr;
220 const char *pathname = eal_runtime_config_path();
222 if (internal_config.no_shconf)
226 mem_cfg_fd = open(pathname, O_RDWR);
228 rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
231 rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
232 PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
234 if (rte_mem_cfg_addr == MAP_FAILED)
235 rte_panic("Cannot mmap memory for rte_config\n");
237 rte_config.mem_config = rte_mem_cfg_addr;
240 /* Detect if we are a primary or a secondary process */
242 eal_proc_type_detect(void)
244 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
245 const char *pathname = eal_runtime_config_path();
247 /* if we can open the file but not get a write-lock we are a secondary
248 * process. NOTE: if we get a file handle back, we keep that open
249 * and don't close it to prevent a race condition between multiple opens */
250 if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
251 (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
252 ptype = RTE_PROC_SECONDARY;
254 RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
255 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
260 /* Sets up rte_config structure with the pointer to shared memory config.*/
262 rte_config_init(void)
264 rte_config.process_type = internal_config.process_type;
266 switch (rte_config.process_type){
267 case RTE_PROC_PRIMARY:
268 rte_eal_config_create();
270 case RTE_PROC_SECONDARY:
271 rte_eal_config_attach();
272 rte_eal_mcfg_wait_complete(rte_config.mem_config);
275 case RTE_PROC_INVALID:
276 rte_panic("Invalid process type\n");
282 eal_usage(const char *prgname)
284 printf("\nUsage: %s ", prgname);
286 /* Allow the application to print its usage message too if hook is set */
287 if ( rte_application_usage_hook ) {
288 printf("===== Application Usage =====\n\n");
289 rte_application_usage_hook(prgname);
293 /* Set a per-application usage message */
295 rte_set_application_usage_hook( rte_usage_hook_t usage_func )
297 rte_usage_hook_t old_func;
299 /* Will be NULL on the first call to denote the last usage routine. */
300 old_func = rte_application_usage_hook;
301 rte_application_usage_hook = usage_func;
307 eal_get_hugepage_mem_size(void)
312 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
313 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
314 if (hpi->hugedir != NULL) {
315 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
316 size += hpi->hugepage_sz * hpi->num_pages[j];
321 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
324 /* Parse the arguments for --log-level only */
326 eal_log_level_parse(int argc, char **argv)
331 const int old_optind = optind;
332 const int old_optopt = optopt;
333 const int old_optreset = optreset;
334 char * const old_optarg = optarg;
340 while ((opt = getopt_long(argc, argvopt, eal_short_options,
341 eal_long_options, &option_index)) != EOF) {
345 /* getopt is not happy, stop right now */
349 ret = (opt == OPT_LOG_LEVEL_NUM) ?
350 eal_parse_common_option(opt, optarg, &internal_config) : 0;
352 /* common parser is not happy */
357 /* restore getopt lib */
360 optreset = old_optreset;
364 /* Parse the argument given in the command line of the application */
366 eal_parse_args(int argc, char **argv)
371 char *prgname = argv[0];
372 const int old_optind = optind;
373 const int old_optopt = optopt;
374 const int old_optreset = optreset;
375 char * const old_optarg = optarg;
381 while ((opt = getopt_long(argc, argvopt, eal_short_options,
382 eal_long_options, &option_index)) != EOF) {
384 /* getopt is not happy, stop right now */
391 ret = eal_parse_common_option(opt, optarg, &internal_config);
392 /* common parser is not happy */
398 /* common parser handled this option */
403 case OPT_MBUF_POOL_OPS_NAME_NUM:
404 internal_config.user_mbuf_pool_ops_name = optarg;
410 if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
411 RTE_LOG(ERR, EAL, "Option %c is not supported "
412 "on FreeBSD\n", opt);
413 } else if (opt >= OPT_LONG_MIN_NUM &&
414 opt < OPT_LONG_MAX_NUM) {
415 RTE_LOG(ERR, EAL, "Option %s is not supported "
417 eal_long_options[option_index].name);
419 RTE_LOG(ERR, EAL, "Option %d is not supported "
420 "on FreeBSD\n", opt);
428 if (eal_adjust_config(&internal_config) != 0) {
434 if (eal_check_common_options(&internal_config) != 0) {
441 argv[optind-1] = prgname;
445 /* restore getopt lib */
448 optreset = old_optreset;
455 eal_check_mem_on_local_socket(void)
457 const struct rte_memseg *ms;
460 socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
462 ms = rte_eal_get_physmem_layout();
464 for (i = 0; i < RTE_MAX_MEMSEG; i++)
465 if (ms[i].socket_id == socket_id &&
469 RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
470 "memory on local socket!\n");
474 sync_func(__attribute__((unused)) void *arg)
480 rte_eal_mcfg_complete(void)
482 /* ALL shared mem_config related INIT DONE */
483 if (rte_config.process_type == RTE_PROC_PRIMARY)
484 rte_config.mem_config->magic = RTE_MAGIC;
487 /* return non-zero if hugepages are enabled. */
488 int rte_eal_has_hugepages(void)
490 return !internal_config.no_hugetlbfs;
493 /* Abstraction for port I/0 privilege */
495 rte_eal_iopl_init(void)
499 fd = open("/dev/io", O_RDWR);
502 /* keep fd open for iopl */
506 static void rte_eal_init_alert(const char *msg)
508 fprintf(stderr, "EAL: FATAL: %s\n", msg);
509 RTE_LOG(ERR, EAL, "%s\n", msg);
512 /* Launch threads, called at application init(). */
514 rte_eal_init(int argc, char **argv)
518 static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
519 char cpuset[RTE_CPU_AFFINITY_STR_LEN];
520 char thread_name[RTE_MAX_THREAD_NAME_LEN];
522 /* checks if the machine is adequate */
523 if (!rte_cpu_is_supported()) {
524 rte_eal_init_alert("unsupported cpu type.");
529 if (!rte_atomic32_test_and_set(&run_once)) {
530 rte_eal_init_alert("already called initialization.");
531 rte_errno = EALREADY;
535 thread_id = pthread_self();
537 eal_reset_internal_config(&internal_config);
539 /* set log level as early as possible */
540 eal_log_level_parse(argc, argv);
542 if (rte_eal_cpu_init() < 0) {
543 rte_eal_init_alert("Cannot detect lcores.");
548 fctret = eal_parse_args(argc, argv);
550 rte_eal_init_alert("Invalid 'command line' arguments.");
552 rte_atomic32_clear(&run_once);
556 if (eal_plugins_init() < 0) {
557 rte_eal_init_alert("Cannot init plugins\n");
559 rte_atomic32_clear(&run_once);
563 if (eal_option_device_parse()) {
565 rte_atomic32_clear(&run_once);
569 if (rte_bus_scan()) {
570 rte_eal_init_alert("Cannot scan the buses for devices\n");
572 rte_atomic32_clear(&run_once);
576 /* autodetect the iova mapping mode (default is iova_pa) */
577 rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
579 if (internal_config.no_hugetlbfs == 0 &&
580 internal_config.process_type != RTE_PROC_SECONDARY &&
581 eal_hugepage_info_init() < 0) {
582 rte_eal_init_alert("Cannot get hugepage information.");
584 rte_atomic32_clear(&run_once);
588 if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
589 if (internal_config.no_hugetlbfs)
590 internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
592 internal_config.memory = eal_get_hugepage_mem_size();
595 if (internal_config.vmware_tsc_map == 1) {
596 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
597 rte_cycles_vmware_tsc_map = 1;
598 RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
599 "you must have monitor_control.pseudo_perfctr = TRUE\n");
601 RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
602 "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
606 rte_srand(rte_rdtsc());
610 if (rte_mp_channel_init() < 0) {
611 rte_eal_init_alert("failed to init mp channel\n");
612 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
618 if (rte_eal_memory_init() < 0) {
619 rte_eal_init_alert("Cannot init memory\n");
624 if (rte_eal_memzone_init() < 0) {
625 rte_eal_init_alert("Cannot init memzone\n");
630 if (rte_eal_tailqs_init() < 0) {
631 rte_eal_init_alert("Cannot init tail queues for objects\n");
636 if (rte_eal_alarm_init() < 0) {
637 rte_eal_init_alert("Cannot init interrupt-handling thread\n");
638 /* rte_eal_alarm_init sets rte_errno on failure. */
642 if (rte_eal_intr_init() < 0) {
643 rte_eal_init_alert("Cannot init interrupt-handling thread\n");
647 if (rte_eal_timer_init() < 0) {
648 rte_eal_init_alert("Cannot init HPET or TSC timers\n");
653 eal_check_mem_on_local_socket();
655 eal_thread_init_master(rte_config.master_lcore);
657 ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
659 RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
660 rte_config.master_lcore, thread_id, cpuset,
661 ret == 0 ? "" : "...");
663 RTE_LCORE_FOREACH_SLAVE(i) {
666 * create communication pipes between master thread
669 if (pipe(lcore_config[i].pipe_master2slave) < 0)
670 rte_panic("Cannot create pipe\n");
671 if (pipe(lcore_config[i].pipe_slave2master) < 0)
672 rte_panic("Cannot create pipe\n");
674 lcore_config[i].state = WAIT;
676 /* create a thread for each lcore */
677 ret = pthread_create(&lcore_config[i].thread_id, NULL,
678 eal_thread_loop, NULL);
680 rte_panic("Cannot create thread\n");
682 /* Set thread_name for aid in debugging. */
683 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
684 "lcore-slave-%d", i);
685 rte_thread_setname(lcore_config[i].thread_id, thread_name);
689 * Launch a dummy function on all slave lcores, so that master lcore
690 * knows they are all ready when this function returns.
692 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
693 rte_eal_mp_wait_lcore();
695 /* initialize services so vdevs register service during bus_probe. */
696 ret = rte_service_init();
698 rte_eal_init_alert("rte_service_init() failed\n");
703 /* Probe all the buses and devices/drivers on them */
704 if (rte_bus_probe()) {
705 rte_eal_init_alert("Cannot probe devices\n");
710 /* initialize default service/lcore mappings and start running. Ignore
711 * -ENOTSUP, as it indicates no service coremask passed to EAL.
713 ret = rte_service_start_with_defaults();
714 if (ret < 0 && ret != -ENOTSUP) {
719 rte_eal_mcfg_complete();
724 int __rte_experimental
725 rte_eal_cleanup(void)
727 rte_service_finalize();
732 enum rte_lcore_role_t
733 rte_eal_lcore_role(unsigned lcore_id)
735 return rte_config.lcore_role[lcore_id];
739 rte_eal_process_type(void)
741 return rte_config.process_type;
744 int rte_eal_has_pci(void)
746 return !internal_config.no_pci;
749 int rte_eal_create_uio_dev(void)
751 return internal_config.create_uio_dev;
755 rte_eal_vfio_intr_mode(void)
757 return RTE_INTR_MODE_NONE;
760 /* dummy forward declaration. */
761 struct vfio_device_info;
763 /* dummy prototypes. */
764 int rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
765 int *vfio_dev_fd, struct vfio_device_info *device_info);
766 int rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd);
767 int rte_vfio_enable(const char *modname);
768 int rte_vfio_is_enabled(const char *modname);
769 int rte_vfio_noiommu_is_enabled(void);
770 int rte_vfio_clear_group(int vfio_group_fd);
772 int rte_vfio_setup_device(__rte_unused const char *sysfs_base,
773 __rte_unused const char *dev_addr,
774 __rte_unused int *vfio_dev_fd,
775 __rte_unused struct vfio_device_info *device_info)
780 int rte_vfio_release_device(__rte_unused const char *sysfs_base,
781 __rte_unused const char *dev_addr,
787 int rte_vfio_enable(__rte_unused const char *modname)
792 int rte_vfio_is_enabled(__rte_unused const char *modname)
797 int rte_vfio_noiommu_is_enabled(void)
802 int rte_vfio_clear_group(__rte_unused int vfio_group_fd)