4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_debug.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_launch.h>
57 #include <rte_tailq.h>
59 #include <rte_eal_memconfig.h>
60 #include <rte_per_lcore.h>
61 #include <rte_lcore.h>
63 #include <rte_random.h>
64 #include <rte_cycles.h>
65 #include <rte_string_fns.h>
66 #include <rte_cpuflags.h>
67 #include <rte_interrupts.h>
69 #include <rte_common.h>
70 #include <rte_version.h>
71 #include <rte_atomic.h>
72 #include <malloc_heap.h>
74 #include "eal_private.h"
75 #include "eal_thread.h"
76 #include "eal_internal_cfg.h"
77 #include "eal_filesystem.h"
78 #include "eal_hugepages.h"
80 #define OPT_HUGE_DIR "huge-dir"
81 #define OPT_PROC_TYPE "proc-type"
82 #define OPT_NO_SHCONF "no-shconf"
83 #define OPT_NO_HPET "no-hpet"
84 #define OPT_NO_PCI "no-pci"
85 #define OPT_NO_HUGE "no-huge"
86 #define OPT_FILE_PREFIX "file-prefix"
87 #define OPT_SOCKET_MEM "socket-mem"
88 #define OPT_SYSLOG "syslog"
90 #define RTE_EAL_BLACKLIST_SIZE 0x100
92 #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
94 #define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
96 #define GET_BLACKLIST_FIELD(in, fd, lim, dlm) \
101 val = strtoul((in), &end, 16); \
102 if (errno != 0 || end[0] != (dlm) || val > (lim)) \
104 (fd) = (typeof (fd))val; \
108 /* Allow the application to print its usage message too if set */
109 static rte_usage_hook_t rte_application_usage_hook = NULL;
110 /* early configuration structure, when memory config is not mmapped */
111 static struct rte_mem_config early_mem_config;
113 /* define fd variable here, because file needs to be kept open for the
114 * duration of the program, as we hold a write lock on it in the primary proc */
115 static int mem_cfg_fd = -1;
117 static struct flock wr_lock = {
119 .l_whence = SEEK_SET,
120 .l_start = offsetof(struct rte_mem_config, memseg),
121 .l_len = sizeof(early_mem_config.memseg),
124 /* Address of global and public configuration */
125 static struct rte_config rte_config = {
126 .mem_config = &early_mem_config,
129 static struct rte_pci_addr eal_dev_blacklist[RTE_EAL_BLACKLIST_SIZE];
131 /* internal configuration (per-core) */
132 struct lcore_config lcore_config[RTE_MAX_LCORE];
134 /* internal configuration */
135 struct internal_config internal_config;
137 /* Return a pointer to the configuration structure */
139 rte_eal_get_configuration(void)
144 /* parse a sysfs (or other) file containing one integer value */
146 eal_parse_sysfs_value(const char *filename, unsigned long *val)
152 if ((f = fopen(filename, "r")) == NULL) {
153 RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
158 if (fgets(buf, sizeof(buf), f) == NULL) {
159 RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
164 *val = strtoul(buf, &end, 0);
165 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
166 RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
176 /* create memory configuration in shared/mmap memory. Take out
177 * a write lock on the memsegs, so we can auto-detect primary/secondary.
178 * This means we never close the file while running (auto-close on exit).
179 * We also don't lock the whole file, so that in future we can use read-locks
180 * on other parts, e.g. memzones, to detect if there are running secondary
183 rte_eal_config_create(void)
185 void *rte_mem_cfg_addr;
188 const char *pathname = eal_runtime_config_path();
190 if (internal_config.no_shconf)
194 mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
196 rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
199 retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
202 rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
205 retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
208 rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
209 "process running?\n", pathname);
212 rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
213 PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
215 if (rte_mem_cfg_addr == MAP_FAILED){
216 rte_panic("Cannot mmap memory for rte_config\n");
218 memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
219 rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
222 /* attach to an existing shared memory config */
224 rte_eal_config_attach(void)
226 void *rte_mem_cfg_addr;
227 const char *pathname = eal_runtime_config_path();
229 if (internal_config.no_shconf)
233 mem_cfg_fd = open(pathname, O_RDWR);
235 rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
238 rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
239 PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
241 if (rte_mem_cfg_addr == MAP_FAILED)
242 rte_panic("Cannot mmap memory for rte_config\n");
244 rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
247 /* Detect if we are a primary or a secondary process */
248 static enum rte_proc_type_t
249 eal_proc_type_detect(void)
251 enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
252 const char *pathname = eal_runtime_config_path();
254 /* if we can open the file but not get a write-lock we are a secondary
255 * process. NOTE: if we get a file handle back, we keep that open
256 * and don't close it to prevent a race condition between multiple opens */
257 if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
258 (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
259 ptype = RTE_PROC_SECONDARY;
261 RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
262 ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
267 /* Sets up rte_config structure with the pointer to shared memory config.*/
269 rte_config_init(void)
271 /* set the magic in configuration structure */
272 rte_config.magic = RTE_MAGIC;
273 rte_config.process_type = (internal_config.process_type == RTE_PROC_AUTO) ?
274 eal_proc_type_detect() : /* for auto, detect the type */
275 internal_config.process_type; /* otherwise use what's already set */
277 switch (rte_config.process_type){
278 case RTE_PROC_PRIMARY:
279 rte_eal_config_create();
281 case RTE_PROC_SECONDARY:
282 rte_eal_config_attach();
283 rte_eal_mcfg_wait_complete(rte_config.mem_config);
286 case RTE_PROC_INVALID:
287 rte_panic("Invalid process type\n");
291 /* Unlocks hugepage directories that were locked by eal_hugepage_info_init */
293 eal_hugedirs_unlock(void)
297 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
299 /* skip uninitialized */
300 if (internal_config.hugepage_info[i].lock_descriptor == 0)
302 /* unlock hugepage file */
303 flock(internal_config.hugepage_info[i].lock_descriptor, LOCK_UN);
304 close(internal_config.hugepage_info[i].lock_descriptor);
305 /* reset the field */
306 internal_config.hugepage_info[i].lock_descriptor = 0;
312 eal_usage(const char *prgname)
314 printf("\nUsage: %s -c COREMASK -n NUM [-m NB] [-r NUM] [-b <domain:bus:devid.func>]"
315 "[--proc-type primary|secondary|auto] \n\n"
317 " -c COREMASK : A hexadecimal bitmask of cores to run on\n"
318 " -n NUM : Number of memory channels\n"
319 " -v : Display version information on startup\n"
320 " -b <domain:bus:devid.func>: to prevent EAL from using specified "
322 " (multiple -b options are allowed)\n"
323 " -m MB : memory to allocate (see also --"OPT_SOCKET_MEM")\n"
324 " -r NUM : force number of memory ranks (don't detect)\n"
325 " --"OPT_SYSLOG" : set syslog facility\n"
326 " --"OPT_SOCKET_MEM" : memory to allocate on specific \n"
327 " sockets (use comma separated values)\n"
328 " --"OPT_HUGE_DIR" : directory where hugetlbfs is mounted\n"
329 " --"OPT_PROC_TYPE" : type of this process\n"
330 " --"OPT_FILE_PREFIX": prefix for hugepage filenames\n"
331 "\nEAL options for DEBUG use only:\n"
332 " --"OPT_NO_HUGE" : use malloc instead of hugetlbfs\n"
333 " --"OPT_NO_PCI" : disable pci\n"
334 " --"OPT_NO_HPET" : disable hpet\n"
335 " --"OPT_NO_SHCONF": no shared config (mmap'd files)\n\n",
337 /* Allow the application to print its usage message too if hook is set */
338 if ( rte_application_usage_hook ) {
339 printf("===== Application Usage =====\n\n");
340 rte_application_usage_hook(prgname);
344 /* Set a per-application usage message */
346 rte_set_application_usage_hook( rte_usage_hook_t usage_func )
348 rte_usage_hook_t old_func;
350 /* Will be NULL on the first call to denote the last usage routine. */
351 old_func = rte_application_usage_hook;
352 rte_application_usage_hook = usage_func;
358 * Parse the coremask given as argument (hexadecimal string) and fill
359 * the global configuration (core role and core count) with the parsed
363 eal_parse_coremask(const char *coremask)
365 struct rte_config *cfg = rte_eal_get_configuration();
368 unsigned long long cm;
371 /* parse hexadecimal string */
372 cm = strtoull(coremask, &end, 16);
373 if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0') || (cm == 0))
376 RTE_LOG(DEBUG, EAL, "coremask set to %llx\n", cm);
377 /* set core role and core count */
378 for (i = 0; i < RTE_MAX_LCORE; i++) {
379 if ((1ULL << i) & cm) {
381 cfg->master_lcore = i;
382 cfg->lcore_role[i] = ROLE_RTE;
386 cfg->lcore_role[i] = ROLE_OFF;
393 eal_parse_syslog(const char *facility)
400 { "auth", LOG_AUTH },
401 { "cron", LOG_CRON },
402 { "daemon", LOG_DAEMON },
404 { "kern", LOG_KERN },
406 { "mail", LOG_MAIL },
407 { "news", LOG_NEWS },
408 { "syslog", LOG_SYSLOG },
409 { "user", LOG_USER },
410 { "uucp", LOG_UUCP },
411 { "local0", LOG_LOCAL0 },
412 { "local1", LOG_LOCAL1 },
413 { "local2", LOG_LOCAL2 },
414 { "local3", LOG_LOCAL3 },
415 { "local4", LOG_LOCAL4 },
416 { "local5", LOG_LOCAL5 },
417 { "local6", LOG_LOCAL6 },
418 { "local7", LOG_LOCAL7 },
422 for (i = 0; map[i].name; i++) {
423 if (!strcmp(facility, map[i].name)) {
424 internal_config.syslog_facility = map[i].value;
432 eal_parse_socket_mem(char *socket_mem)
434 char * arg[RTE_MAX_NUMA_NODES];
437 uint64_t total_mem = 0;
439 len = strnlen(socket_mem, SOCKET_MEM_STRLEN);
440 if (len == SOCKET_MEM_STRLEN) {
441 RTE_LOG(ERR, EAL, "--socket-mem is too long\n");
445 /* all other error cases will be caught later */
446 if (!isdigit(socket_mem[len-1]))
449 /* split the optarg into separate socket values */
450 arg_num = rte_strsplit(socket_mem, len,
451 arg, RTE_MAX_NUMA_NODES, ',');
453 /* if split failed, or 0 arguments */
457 internal_config.force_sockets = 1;
459 /* parse each defined socket option */
461 for (i = 0; i < arg_num; i++) {
463 internal_config.socket_mem[i] = strtoull(arg[i], &end, 10);
465 /* check for invalid input */
467 (arg[i][0] == '\0') || (end == NULL) || (*end != '\0'))
469 internal_config.socket_mem[i] *= 1024ULL;
470 internal_config.socket_mem[i] *= 1024ULL;
471 total_mem += internal_config.socket_mem[i];
474 /* check if we have a positive amount of total memory */
482 eal_get_hugepage_mem_size(void)
487 for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
488 struct hugepage_info *hpi = &internal_config.hugepage_info[i];
489 if (hpi->hugedir != NULL) {
490 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
491 size += hpi->hugepage_sz * hpi->num_pages[j];
496 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
499 static enum rte_proc_type_t
500 eal_parse_proc_type(const char *arg)
502 if (strncasecmp(arg, "primary", sizeof("primary")) == 0)
503 return RTE_PROC_PRIMARY;
504 if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0)
505 return RTE_PROC_SECONDARY;
506 if (strncasecmp(arg, "auto", sizeof("auto")) == 0)
507 return RTE_PROC_AUTO;
509 return RTE_PROC_INVALID;
513 eal_parse_blacklist(const char *input, struct rte_pci_addr *dev2bl)
515 GET_BLACKLIST_FIELD(input, dev2bl->domain, UINT16_MAX, ':');
516 GET_BLACKLIST_FIELD(input, dev2bl->bus, UINT8_MAX, ':');
517 GET_BLACKLIST_FIELD(input, dev2bl->devid, UINT8_MAX, '.');
518 GET_BLACKLIST_FIELD(input, dev2bl->function, UINT8_MAX, 0);
523 eal_parse_blacklist_opt(const char *optarg, size_t idx)
525 if (idx >= sizeof (eal_dev_blacklist) / sizeof (eal_dev_blacklist[0])) {
527 "%s - too many devices to blacklist...\n",
530 } else if (eal_parse_blacklist(optarg, eal_dev_blacklist + idx) != 0) {
532 "%s - invalid device to blacklist...\n",
542 /* Parse the argument given in the command line of the application */
544 eal_parse_args(int argc, char **argv)
550 ssize_t blacklist_index = 0;;
551 char *prgname = argv[0];
552 static struct option lgopts[] = {
553 {OPT_NO_HUGE, 0, 0, 0},
554 {OPT_NO_PCI, 0, 0, 0},
555 {OPT_NO_HPET, 0, 0, 0},
556 {OPT_HUGE_DIR, 1, 0, 0},
557 {OPT_NO_SHCONF, 0, 0, 0},
558 {OPT_PROC_TYPE, 1, 0, 0},
559 {OPT_FILE_PREFIX, 1, 0, 0},
560 {OPT_SOCKET_MEM, 1, 0, 0},
561 {OPT_SYSLOG, 1, NULL, 0},
567 internal_config.memory = 0;
568 internal_config.force_nrank = 0;
569 internal_config.force_nchannel = 0;
570 internal_config.hugefile_prefix = HUGEFILE_PREFIX_DEFAULT;
571 internal_config.hugepage_dir = NULL;
572 internal_config.force_sockets = 0;
573 internal_config.syslog_facility = LOG_DAEMON;
574 #ifdef RTE_LIBEAL_USE_HPET
575 internal_config.no_hpet = 0;
577 internal_config.no_hpet = 1;
579 /* zero out the NUMA config */
580 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
581 internal_config.socket_mem[i] = 0;
583 /* zero out hugedir descriptors */
584 for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
585 internal_config.hugepage_info[i].lock_descriptor = 0;
587 while ((opt = getopt_long(argc, argvopt, "b:c:m:n:r:v",
588 lgopts, &option_index)) != EOF) {
593 if ((blacklist_index = eal_parse_blacklist_opt(optarg,
594 blacklist_index)) < 0) {
601 if (eal_parse_coremask(optarg) < 0) {
602 RTE_LOG(ERR, EAL, "invalid coremask\n");
610 internal_config.memory = atoi(optarg);
611 internal_config.memory *= 1024ULL;
612 internal_config.memory *= 1024ULL;
614 /* force number of channels */
616 internal_config.force_nchannel = atoi(optarg);
617 if (internal_config.force_nchannel == 0 ||
618 internal_config.force_nchannel > 4) {
619 RTE_LOG(ERR, EAL, "invalid channel number\n");
624 /* force number of ranks */
626 internal_config.force_nrank = atoi(optarg);
627 if (internal_config.force_nrank == 0 ||
628 internal_config.force_nrank > 16) {
629 RTE_LOG(ERR, EAL, "invalid rank number\n");
635 /* since message is explicitly requested by user, we
636 * write message at highest log level so it can always be seen
637 * even if info or warning messages are disabled */
638 RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version());
643 if (!strcmp(lgopts[option_index].name, OPT_NO_HUGE)) {
644 internal_config.no_hugetlbfs = 1;
646 else if (!strcmp(lgopts[option_index].name, OPT_NO_PCI)) {
647 internal_config.no_pci = 1;
649 else if (!strcmp(lgopts[option_index].name, OPT_NO_HPET)) {
650 internal_config.no_hpet = 1;
652 else if (!strcmp(lgopts[option_index].name, OPT_NO_SHCONF)) {
653 internal_config.no_shconf = 1;
655 else if (!strcmp(lgopts[option_index].name, OPT_HUGE_DIR)) {
656 internal_config.hugepage_dir = optarg;
658 else if (!strcmp(lgopts[option_index].name, OPT_PROC_TYPE)) {
659 internal_config.process_type = eal_parse_proc_type(optarg);
661 else if (!strcmp(lgopts[option_index].name, OPT_FILE_PREFIX)) {
662 internal_config.hugefile_prefix = optarg;
664 else if (!strcmp(lgopts[option_index].name, OPT_SOCKET_MEM)) {
665 if (eal_parse_socket_mem(optarg) < 0) {
666 RTE_LOG(ERR, EAL, "invalid parameters for --"
667 OPT_SOCKET_MEM "\n");
672 else if (!strcmp(lgopts[option_index].name, OPT_SYSLOG)) {
673 if (eal_parse_syslog(optarg) < 0) {
674 RTE_LOG(ERR, EAL, "invalid parameters for --"
690 RTE_LOG(ERR, EAL, "coremask not specified\n");
694 if (internal_config.process_type == RTE_PROC_AUTO){
695 internal_config.process_type = eal_proc_type_detect();
697 if (internal_config.process_type == RTE_PROC_INVALID){
698 RTE_LOG(ERR, EAL, "Invalid process type specified\n");
702 if (internal_config.process_type == RTE_PROC_PRIMARY &&
703 internal_config.force_nchannel == 0) {
704 RTE_LOG(ERR, EAL, "Number of memory channels (-n) not specified\n");
708 if (index(internal_config.hugefile_prefix,'%') != NULL){
709 RTE_LOG(ERR, EAL, "Invalid char, '%%', in '"OPT_FILE_PREFIX"' option\n");
713 if (internal_config.memory > 0 && internal_config.force_sockets == 1) {
714 RTE_LOG(ERR, EAL, "Options -m and --socket-mem cannot be specified "
715 "at the same time\n");
719 /* --no-huge doesn't make sense with either -m or --socket-mem */
720 if (internal_config.no_hugetlbfs &&
721 (internal_config.memory > 0 ||
722 internal_config.force_sockets == 1)) {
723 RTE_LOG(ERR, EAL, "Options -m or --socket-mem cannot be specified "
724 "together with --no-huge!\n");
729 if (blacklist_index > 0)
730 rte_eal_pci_set_blacklist(eal_dev_blacklist, blacklist_index);
733 argv[optind-1] = prgname;
735 /* if no memory amounts were requested, this will result in 0 and
736 * will be overriden later, right after eal_hugepage_info_init() */
737 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
738 internal_config.memory += internal_config.socket_mem[i];
741 optind = 0; /* reset getopt lib */
746 eal_check_mem_on_local_socket(void)
748 const struct rte_memseg *ms;
751 socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
753 ms = rte_eal_get_physmem_layout();
755 for (i = 0; i < RTE_MAX_MEMSEG; i++)
756 if (ms[i].socket_id == socket_id &&
760 RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
761 "memory on local socket!\n");
765 sync_func(__attribute__((unused)) void *arg)
771 rte_eal_mcfg_complete(void)
773 /* ALL shared mem_config related INIT DONE */
774 if (rte_config.process_type == RTE_PROC_PRIMARY)
775 rte_config.mem_config->magic = RTE_MAGIC;
778 /* Launch threads, called at application init(). */
780 rte_eal_init(int argc, char **argv)
784 static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
786 if (!rte_atomic32_test_and_set(&run_once))
789 thread_id = pthread_self();
791 if (rte_eal_log_early_init() < 0)
792 rte_panic("Cannot init early logs\n");
794 fctret = eal_parse_args(argc, argv);
798 if (internal_config.no_hugetlbfs == 0 &&
799 internal_config.process_type != RTE_PROC_SECONDARY &&
800 eal_hugepage_info_init() < 0)
801 rte_panic("Cannot get hugepage information\n");
803 if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
804 if (internal_config.no_hugetlbfs)
805 internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
807 internal_config.memory = eal_get_hugepage_mem_size();
810 rte_srand(rte_rdtsc());
814 if (rte_eal_cpu_init() < 0)
815 rte_panic("Cannot detect lcores\n");
817 if (rte_eal_memory_init() < 0)
818 rte_panic("Cannot init memory\n");
820 /* the directories are locked during eal_hugepage_info_init */
821 eal_hugedirs_unlock();
823 if (rte_eal_memzone_init() < 0)
824 rte_panic("Cannot init memzone\n");
826 if (rte_eal_tailqs_init() < 0)
827 rte_panic("Cannot init tail queues for objects\n");
829 if (rte_eal_log_init(argv[0], internal_config.syslog_facility) < 0)
830 rte_panic("Cannot init logs\n");
832 if (rte_eal_alarm_init() < 0)
833 rte_panic("Cannot init interrupt-handling thread\n");
835 if (rte_eal_intr_init() < 0)
836 rte_panic("Cannot init interrupt-handling thread\n");
838 if (rte_eal_hpet_init() < 0)
839 rte_panic("Cannot init HPET\n");
841 if (rte_eal_pci_init() < 0)
842 rte_panic("Cannot init PCI\n");
844 RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%x)\n",
845 rte_config.master_lcore, (int)thread_id);
847 eal_check_mem_on_local_socket();
849 rte_eal_mcfg_complete();
851 RTE_LCORE_FOREACH_SLAVE(i) {
854 * create communication pipes between master thread
857 if (pipe(lcore_config[i].pipe_master2slave) < 0)
858 rte_panic("Cannot create pipe\n");
859 if (pipe(lcore_config[i].pipe_slave2master) < 0)
860 rte_panic("Cannot create pipe\n");
862 lcore_config[i].state = WAIT;
864 /* create a thread for each lcore */
865 ret = pthread_create(&lcore_config[i].thread_id, NULL,
866 eal_thread_loop, NULL);
868 rte_panic("Cannot create thread\n");
871 eal_thread_init_master(rte_config.master_lcore);
874 * Launch a dummy function on all slave lcores, so that master lcore
875 * knows they are all ready when this function returns.
877 rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
878 rte_eal_mp_wait_lcore();
884 enum rte_lcore_role_t
885 rte_eal_lcore_role(unsigned lcore_id)
887 return (rte_config.lcore_role[lcore_id]);
891 rte_eal_process_type(void)
893 return (rte_config.process_type);