1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
20 #include <sys/queue.h>
23 #include <rte_compat.h>
24 #include <rte_common.h>
25 #include <rte_debug.h>
26 #include <rte_memory.h>
27 #include <rte_launch.h>
29 #include <rte_eal_memconfig.h>
30 #include <rte_errno.h>
31 #include <rte_per_lcore.h>
32 #include <rte_lcore.h>
33 #include <rte_service_component.h>
35 #include <rte_random.h>
36 #include <rte_cycles.h>
37 #include <rte_string_fns.h>
38 #include <rte_cpuflags.h>
39 #include <rte_interrupts.h>
42 #include <rte_devargs.h>
43 #include <rte_version.h>
45 #include <rte_atomic.h>
46 #include <malloc_heap.h>
48 #include "eal_private.h"
49 #include "eal_thread.h"
50 #include "eal_internal_cfg.h"
51 #include "eal_filesystem.h"
52 #include "eal_hugepages.h"
53 #include "eal_options.h"
55 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
57 /* Allow the application to print its usage message too if set */
58 static rte_usage_hook_t rte_application_usage_hook = NULL;
59 /* early configuration structure, when memory config is not mmapped */
60 static struct rte_mem_config early_mem_config;
62 /* define fd variable here, because file needs to be kept open for the
63 * duration of the program, as we hold a write lock on it in the primary proc */
64 static int mem_cfg_fd = -1;
66 static struct flock wr_lock = {
69 .l_start = offsetof(struct rte_mem_config, memsegs),
70 .l_len = sizeof(early_mem_config.memsegs),
73 /* Address of global and public configuration */
74 static struct rte_config rte_config = {
75 .mem_config = &early_mem_config,
78 /* internal configuration (per-core) */
79 struct lcore_config lcore_config[RTE_MAX_LCORE];
81 /* internal configuration */
82 struct internal_config internal_config;
84 /* used by rte_rdtsc() */
85 int rte_cycles_vmware_tsc_map;
87 /* platform-specific runtime dir */
88 static char runtime_dir[PATH_MAX];
90 static const char *default_runtime_dir = "/var/run";
93 eal_create_runtime_dir(void)
95 const char *directory = default_runtime_dir;
96 const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
97 const char *fallback = "/tmp";
102 /* try XDG path first, fall back to /tmp */
103 if (xdg_runtime_dir != NULL)
104 directory = xdg_runtime_dir;
106 directory = fallback;
108 /* create DPDK subdirectory under runtime dir */
109 ret = snprintf(tmp, sizeof(tmp), "%s/dpdk", directory);
110 if (ret < 0 || ret == sizeof(tmp)) {
111 RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n");
115 /* create prefix-specific subdirectory under DPDK runtime dir */
116 ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
117 tmp, internal_config.hugefile_prefix);
118 if (ret < 0 || ret == sizeof(runtime_dir)) {
119 RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
123 /* create the path if it doesn't exist. no "mkdir -p" here, so do it
126 ret = mkdir(tmp, 0700);
127 if (ret < 0 && errno != EEXIST) {
128 RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
129 tmp, strerror(errno));
133 ret = mkdir(runtime_dir, 0700);
134 if (ret < 0 && errno != EEXIST) {
135 RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
136 runtime_dir, strerror(errno));
144 eal_get_runtime_dir(void)
149 /* Return user provided mbuf pool ops name */
150 const char * __rte_experimental
151 rte_eal_mbuf_user_pool_ops(void)
153 return internal_config.user_mbuf_pool_ops_name;
156 /* Return mbuf pool ops name */
158 rte_eal_mbuf_default_mempool_ops(void)
160 if (internal_config.user_mbuf_pool_ops_name == NULL)
161 return RTE_MBUF_DEFAULT_MEMPOOL_OPS;
163 return internal_config.user_mbuf_pool_ops_name;
166 /* Return a pointer to the configuration structure */
168 rte_eal_get_configuration(void)
174 rte_eal_iova_mode(void)
176 return rte_eal_get_configuration()->iova_mode;
179 /* parse a sysfs (or other) file containing one integer value */
181 eal_parse_sysfs_value(const char *filename, unsigned long *val)
187 if ((f = fopen(filename, "r")) == NULL) {
188 RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
193 if (fgets(buf, sizeof(buf), f) == NULL) {
194 RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
199 *val = strtoul(buf, &end, 0);
200 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
201 RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
211 /* create memory configuration in shared/mmap memory. Take out
212 * a write lock on the memsegs, so we can auto-detect primary/secondary.
213 * This means we never close the file while running (auto-close on exit).
214 * We also don't lock the whole file, so that in future we can use read-locks
215 * on other parts, e.g. memzones, to detect if there are running secondary
218 rte_eal_config_create(void)
220 void *rte_mem_cfg_addr;
223 const char *pathname = eal_runtime_config_path();
225 if (internal_config.no_shconf)
229 mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
231 rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
234 retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
237 rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
240 retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
243 rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
244 "process running?\n", pathname);
247 rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
248 PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
250 if (rte_mem_cfg_addr == MAP_FAILED){
251 rte_panic("Cannot mmap memory for rte_config\n");
253 memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
254 rte_config.mem_config = rte_mem_cfg_addr;
257 /* attach to an existing shared memory config */
259 rte_eal_config_attach(void)
261 void *rte_mem_cfg_addr;
262 const char *pathname = eal_runtime_config_path();
264 if (internal_config.no_shconf)
268 mem_cfg_fd = open(pathname, O_RDWR);
270 rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
273 rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
274 PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
276 if (rte_mem_cfg_addr == MAP_FAILED)
277 rte_panic("Cannot mmap memory for rte_config\n");
279 rte_config.mem_config = rte_mem_cfg_addr;
282 /* Detect if we are a primary or a secondary process */
284 eal_proc_type_detect(void)
286 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
287 const char *pathname = eal_runtime_config_path();
289 /* if there no shared config, there can be no secondary processes */
290 if (!internal_config.no_shconf) {
291 /* if we can open the file but not get a write-lock we are a
292 * secondary process. NOTE: if we get a file handle back, we
293 * keep that open and don't close it to prevent a race condition
294 * between multiple opens.
296 if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
297 (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
298 ptype = RTE_PROC_SECONDARY;
301 RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
302 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
307 /* Sets up rte_config structure with the pointer to shared memory config.*/
309 rte_config_init(void)
311 rte_config.process_type = internal_config.process_type;
313 switch (rte_config.process_type){
314 case RTE_PROC_PRIMARY:
315 rte_eal_config_create();
317 case RTE_PROC_SECONDARY:
318 rte_eal_config_attach();
319 rte_eal_mcfg_wait_complete(rte_config.mem_config);
322 case RTE_PROC_INVALID:
323 rte_panic("Invalid process type\n");
329 eal_usage(const char *prgname)
331 printf("\nUsage: %s ", prgname);
333 /* Allow the application to print its usage message too if hook is set */
334 if ( rte_application_usage_hook ) {
335 printf("===== Application Usage =====\n\n");
336 rte_application_usage_hook(prgname);
340 /* Set a per-application usage message */
342 rte_set_application_usage_hook( rte_usage_hook_t usage_func )
344 rte_usage_hook_t old_func;
346 /* Will be NULL on the first call to denote the last usage routine. */
347 old_func = rte_application_usage_hook;
348 rte_application_usage_hook = usage_func;
354 eal_get_hugepage_mem_size(void)
359 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
360 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
361 if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
362 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
363 size += hpi->hugepage_sz * hpi->num_pages[j];
368 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
371 /* Parse the arguments for --log-level only */
373 eal_log_level_parse(int argc, char **argv)
378 const int old_optind = optind;
379 const int old_optopt = optopt;
380 const int old_optreset = optreset;
381 char * const old_optarg = optarg;
387 while ((opt = getopt_long(argc, argvopt, eal_short_options,
388 eal_long_options, &option_index)) != EOF) {
392 /* getopt is not happy, stop right now */
396 ret = (opt == OPT_LOG_LEVEL_NUM) ?
397 eal_parse_common_option(opt, optarg, &internal_config) : 0;
399 /* common parser is not happy */
404 /* restore getopt lib */
407 optreset = old_optreset;
411 /* Parse the argument given in the command line of the application */
413 eal_parse_args(int argc, char **argv)
418 char *prgname = argv[0];
419 const int old_optind = optind;
420 const int old_optopt = optopt;
421 const int old_optreset = optreset;
422 char * const old_optarg = optarg;
428 while ((opt = getopt_long(argc, argvopt, eal_short_options,
429 eal_long_options, &option_index)) != EOF) {
431 /* getopt is not happy, stop right now */
438 ret = eal_parse_common_option(opt, optarg, &internal_config);
439 /* common parser is not happy */
445 /* common parser handled this option */
450 case OPT_MBUF_POOL_OPS_NAME_NUM:
451 internal_config.user_mbuf_pool_ops_name =
458 if (opt < OPT_LONG_MIN_NUM && isprint(opt)) {
459 RTE_LOG(ERR, EAL, "Option %c is not supported "
460 "on FreeBSD\n", opt);
461 } else if (opt >= OPT_LONG_MIN_NUM &&
462 opt < OPT_LONG_MAX_NUM) {
463 RTE_LOG(ERR, EAL, "Option %s is not supported "
465 eal_long_options[option_index].name);
467 RTE_LOG(ERR, EAL, "Option %d is not supported "
468 "on FreeBSD\n", opt);
476 /* create runtime data directory */
477 if (internal_config.no_shconf == 0 &&
478 eal_create_runtime_dir() < 0) {
479 RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
484 if (eal_adjust_config(&internal_config) != 0) {
490 if (eal_check_common_options(&internal_config) != 0) {
497 argv[optind-1] = prgname;
501 /* restore getopt lib */
504 optreset = old_optreset;
511 check_socket(const struct rte_memseg_list *msl, void *arg)
513 int *socket_id = arg;
515 if (msl->socket_id == *socket_id && msl->memseg_arr.count != 0)
522 eal_check_mem_on_local_socket(void)
526 socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
528 if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
529 RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
534 sync_func(__attribute__((unused)) void *arg)
540 rte_eal_mcfg_complete(void)
542 /* ALL shared mem_config related INIT DONE */
543 if (rte_config.process_type == RTE_PROC_PRIMARY)
544 rte_config.mem_config->magic = RTE_MAGIC;
547 /* return non-zero if hugepages are enabled. */
548 int rte_eal_has_hugepages(void)
550 return !internal_config.no_hugetlbfs;
553 /* Abstraction for port I/0 privilege */
555 rte_eal_iopl_init(void)
559 fd = open("/dev/io", O_RDWR);
562 /* keep fd open for iopl */
566 static void rte_eal_init_alert(const char *msg)
568 fprintf(stderr, "EAL: FATAL: %s\n", msg);
569 RTE_LOG(ERR, EAL, "%s\n", msg);
572 /* Launch threads, called at application init(). */
574 rte_eal_init(int argc, char **argv)
578 static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
579 char cpuset[RTE_CPU_AFFINITY_STR_LEN];
580 char thread_name[RTE_MAX_THREAD_NAME_LEN];
582 /* checks if the machine is adequate */
583 if (!rte_cpu_is_supported()) {
584 rte_eal_init_alert("unsupported cpu type.");
589 if (!rte_atomic32_test_and_set(&run_once)) {
590 rte_eal_init_alert("already called initialization.");
591 rte_errno = EALREADY;
595 thread_id = pthread_self();
597 eal_reset_internal_config(&internal_config);
599 /* set log level as early as possible */
600 eal_log_level_parse(argc, argv);
602 if (rte_eal_cpu_init() < 0) {
603 rte_eal_init_alert("Cannot detect lcores.");
608 fctret = eal_parse_args(argc, argv);
610 rte_eal_init_alert("Invalid 'command line' arguments.");
612 rte_atomic32_clear(&run_once);
616 /* FreeBSD always uses legacy memory model */
617 internal_config.legacy_mem = true;
619 if (eal_plugins_init() < 0) {
620 rte_eal_init_alert("Cannot init plugins\n");
622 rte_atomic32_clear(&run_once);
626 if (eal_option_device_parse()) {
628 rte_atomic32_clear(&run_once);
634 if (rte_eal_intr_init() < 0) {
635 rte_eal_init_alert("Cannot init interrupt-handling thread\n");
639 /* Put mp channel init before bus scan so that we can init the vdev
640 * bus through mp channel in the secondary process before the bus scan.
642 if (rte_mp_channel_init() < 0) {
643 rte_eal_init_alert("failed to init mp channel\n");
644 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
650 if (rte_bus_scan()) {
651 rte_eal_init_alert("Cannot scan the buses for devices\n");
653 rte_atomic32_clear(&run_once);
657 /* autodetect the iova mapping mode (default is iova_pa) */
658 rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
660 if (internal_config.no_hugetlbfs == 0) {
661 /* rte_config isn't initialized yet */
662 ret = internal_config.process_type == RTE_PROC_PRIMARY ?
663 eal_hugepage_info_init() :
664 eal_hugepage_info_read();
666 rte_eal_init_alert("Cannot get hugepage information.");
668 rte_atomic32_clear(&run_once);
673 if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
674 if (internal_config.no_hugetlbfs)
675 internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
677 internal_config.memory = eal_get_hugepage_mem_size();
680 if (internal_config.vmware_tsc_map == 1) {
681 #ifdef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
682 rte_cycles_vmware_tsc_map = 1;
683 RTE_LOG (DEBUG, EAL, "Using VMWARE TSC MAP, "
684 "you must have monitor_control.pseudo_perfctr = TRUE\n");
686 RTE_LOG (WARNING, EAL, "Ignoring --vmware-tsc-map because "
687 "RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT is not set\n");
691 rte_srand(rte_rdtsc());
693 /* in secondary processes, memory init may allocate additional fbarrays
694 * not present in primary processes, so to avoid any potential issues,
695 * initialize memzones first.
697 if (rte_eal_memzone_init() < 0) {
698 rte_eal_init_alert("Cannot init memzone\n");
703 if (rte_eal_memory_init() < 0) {
704 rte_eal_init_alert("Cannot init memory\n");
709 if (rte_eal_malloc_heap_init() < 0) {
710 rte_eal_init_alert("Cannot init malloc heap\n");
715 if (rte_eal_tailqs_init() < 0) {
716 rte_eal_init_alert("Cannot init tail queues for objects\n");
721 if (rte_eal_alarm_init() < 0) {
722 rte_eal_init_alert("Cannot init interrupt-handling thread\n");
723 /* rte_eal_alarm_init sets rte_errno on failure. */
727 if (rte_eal_timer_init() < 0) {
728 rte_eal_init_alert("Cannot init HPET or TSC timers\n");
733 eal_check_mem_on_local_socket();
735 eal_thread_init_master(rte_config.master_lcore);
737 ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
739 RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
740 rte_config.master_lcore, thread_id, cpuset,
741 ret == 0 ? "" : "...");
743 RTE_LCORE_FOREACH_SLAVE(i) {
746 * create communication pipes between master thread
749 if (pipe(lcore_config[i].pipe_master2slave) < 0)
750 rte_panic("Cannot create pipe\n");
751 if (pipe(lcore_config[i].pipe_slave2master) < 0)
752 rte_panic("Cannot create pipe\n");
754 lcore_config[i].state = WAIT;
756 /* create a thread for each lcore */
757 ret = pthread_create(&lcore_config[i].thread_id, NULL,
758 eal_thread_loop, NULL);
760 rte_panic("Cannot create thread\n");
762 /* Set thread_name for aid in debugging. */
763 snprintf(thread_name, sizeof(thread_name),
764 "lcore-slave-%d", i);
765 rte_thread_setname(lcore_config[i].thread_id, thread_name);
769 * Launch a dummy function on all slave lcores, so that master lcore
770 * knows they are all ready when this function returns.
772 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
773 rte_eal_mp_wait_lcore();
775 /* initialize services so vdevs register service during bus_probe. */
776 ret = rte_service_init();
778 rte_eal_init_alert("rte_service_init() failed\n");
783 /* Probe all the buses and devices/drivers on them */
784 if (rte_bus_probe()) {
785 rte_eal_init_alert("Cannot probe devices\n");
790 /* initialize default service/lcore mappings and start running. Ignore
791 * -ENOTSUP, as it indicates no service coremask passed to EAL.
793 ret = rte_service_start_with_defaults();
794 if (ret < 0 && ret != -ENOTSUP) {
799 rte_eal_mcfg_complete();
804 int __rte_experimental
805 rte_eal_cleanup(void)
807 rte_service_finalize();
812 enum rte_lcore_role_t
813 rte_eal_lcore_role(unsigned lcore_id)
815 return rte_config.lcore_role[lcore_id];
819 rte_eal_process_type(void)
821 return rte_config.process_type;
824 int rte_eal_has_pci(void)
826 return !internal_config.no_pci;
829 int rte_eal_create_uio_dev(void)
831 return internal_config.create_uio_dev;
835 rte_eal_vfio_intr_mode(void)
837 return RTE_INTR_MODE_NONE;
840 int rte_vfio_setup_device(__rte_unused const char *sysfs_base,
841 __rte_unused const char *dev_addr,
842 __rte_unused int *vfio_dev_fd,
843 __rte_unused struct vfio_device_info *device_info)
848 int rte_vfio_release_device(__rte_unused const char *sysfs_base,
849 __rte_unused const char *dev_addr,
855 int rte_vfio_enable(__rte_unused const char *modname)
860 int rte_vfio_is_enabled(__rte_unused const char *modname)
865 int rte_vfio_noiommu_is_enabled(void)
870 int rte_vfio_clear_group(__rte_unused int vfio_group_fd)
875 int __rte_experimental
876 rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
877 __rte_unused uint64_t len)
882 int __rte_experimental
883 rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
884 __rte_unused uint64_t len)
889 int __rte_experimental
890 rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
891 __rte_unused const char *dev_addr,
892 __rte_unused int *iommu_group_num)
897 int __rte_experimental
898 rte_vfio_get_container_fd(void)
903 int __rte_experimental
904 rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
909 int __rte_experimental
910 rte_vfio_container_create(void)
915 int __rte_experimental
916 rte_vfio_container_destroy(__rte_unused int container_fd)
921 int __rte_experimental
922 rte_vfio_container_group_bind(__rte_unused int container_fd,
923 __rte_unused int iommu_group_num)
928 int __rte_experimental
929 rte_vfio_container_group_unbind(__rte_unused int container_fd,
930 __rte_unused int iommu_group_num)
935 int __rte_experimental
936 rte_vfio_container_dma_map(__rte_unused int container_fd,
937 __rte_unused uint64_t vaddr,
938 __rte_unused uint64_t iova,
939 __rte_unused uint64_t len)
944 int __rte_experimental
945 rte_vfio_container_dma_unmap(__rte_unused int container_fd,
946 __rte_unused uint64_t vaddr,
947 __rte_unused uint64_t iova,
948 __rte_unused uint64_t len)