1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_common.h>
10 #include <rte_debug.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
15 #include <rte_rwlock.h>
17 #include "eal_memcfg.h"
18 #include "eal_private.h"
19 #include "eal_thread.h"
21 unsigned int rte_get_main_lcore(void)
23 return rte_eal_get_configuration()->main_lcore;
26 unsigned int rte_lcore_count(void)
28 return rte_eal_get_configuration()->lcore_count;
31 int rte_lcore_index(int lcore_id)
33 if (unlikely(lcore_id >= RTE_MAX_LCORE))
37 if (rte_lcore_id() == LCORE_ID_ANY)
40 lcore_id = (int)rte_lcore_id();
43 return lcore_config[lcore_id].core_index;
46 int rte_lcore_to_cpu_id(int lcore_id)
48 if (unlikely(lcore_id >= RTE_MAX_LCORE))
52 if (rte_lcore_id() == LCORE_ID_ANY)
55 lcore_id = (int)rte_lcore_id();
58 return lcore_config[lcore_id].core_id;
61 rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
63 return lcore_config[lcore_id].cpuset;
67 rte_eal_lcore_role(unsigned int lcore_id)
69 struct rte_config *cfg = rte_eal_get_configuration();
71 if (lcore_id >= RTE_MAX_LCORE)
73 return cfg->lcore_role[lcore_id];
77 rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
79 struct rte_config *cfg = rte_eal_get_configuration();
81 if (lcore_id >= RTE_MAX_LCORE)
84 return cfg->lcore_role[lcore_id] == role;
87 int rte_lcore_is_enabled(unsigned int lcore_id)
89 struct rte_config *cfg = rte_eal_get_configuration();
91 if (lcore_id >= RTE_MAX_LCORE)
93 return cfg->lcore_role[lcore_id] == ROLE_RTE;
96 unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
102 while (i < RTE_MAX_LCORE) {
103 if (!rte_lcore_is_enabled(i) ||
104 (skip_main && (i == rte_get_main_lcore()))) {
116 rte_lcore_to_socket_id(unsigned int lcore_id)
118 return lcore_config[lcore_id].socket_id;
122 socket_id_cmp(const void *a, const void *b)
124 const int *lcore_id_a = a;
125 const int *lcore_id_b = b;
127 if (*lcore_id_a < *lcore_id_b)
129 if (*lcore_id_a > *lcore_id_b)
135 * Parse /sys/devices/system/cpu to get the number of physical and logical
136 * processors on the machine. The function will fill the cpu_info
140 rte_eal_cpu_init(void)
142 /* pointer to global configuration */
143 struct rte_config *config = rte_eal_get_configuration();
146 unsigned int socket_id, prev_socket_id;
147 int lcore_to_socket_id[RTE_MAX_LCORE];
150 * Parse the maximum set of logical cores, detect the subset of running
151 * ones and enable them by default.
153 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
154 lcore_config[lcore_id].core_index = count;
156 /* init cpuset for per lcore config */
157 CPU_ZERO(&lcore_config[lcore_id].cpuset);
159 /* find socket first */
160 socket_id = eal_cpu_socket_id(lcore_id);
161 lcore_to_socket_id[lcore_id] = socket_id;
163 if (eal_cpu_detected(lcore_id) == 0) {
164 config->lcore_role[lcore_id] = ROLE_OFF;
165 lcore_config[lcore_id].core_index = -1;
169 /* By default, lcore 1:1 map to cpu id */
170 CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
172 /* By default, each detected core is enabled */
173 config->lcore_role[lcore_id] = ROLE_RTE;
174 lcore_config[lcore_id].core_role = ROLE_RTE;
175 lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
176 lcore_config[lcore_id].socket_id = socket_id;
177 RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
178 "core %u on socket %u\n",
179 lcore_id, lcore_config[lcore_id].core_id,
180 lcore_config[lcore_id].socket_id);
183 for (; lcore_id < CPU_SETSIZE; lcore_id++) {
184 if (eal_cpu_detected(lcore_id) == 0)
186 RTE_LOG(DEBUG, EAL, "Skipped lcore %u as core %u on socket %u\n",
187 lcore_id, eal_cpu_core_id(lcore_id),
188 eal_cpu_socket_id(lcore_id));
191 /* Set the count of enabled logical cores of the EAL configuration */
192 config->lcore_count = count;
194 "Support maximum %u logical core(s) by configuration.\n",
196 RTE_LOG(INFO, EAL, "Detected %u lcore(s)\n", config->lcore_count);
198 /* sort all socket id's in ascending order */
199 qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
200 sizeof(lcore_to_socket_id[0]), socket_id_cmp);
203 config->numa_node_count = 0;
204 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
205 socket_id = lcore_to_socket_id[lcore_id];
206 if (socket_id != prev_socket_id)
207 config->numa_nodes[config->numa_node_count++] =
209 prev_socket_id = socket_id;
211 RTE_LOG(INFO, EAL, "Detected %u NUMA nodes\n", config->numa_node_count);
217 rte_socket_count(void)
219 const struct rte_config *config = rte_eal_get_configuration();
220 return config->numa_node_count;
224 rte_socket_id_by_idx(unsigned int idx)
226 const struct rte_config *config = rte_eal_get_configuration();
227 if (idx >= config->numa_node_count) {
231 return config->numa_nodes[idx];
234 static rte_rwlock_t lcore_lock = RTE_RWLOCK_INITIALIZER;
235 struct lcore_callback {
236 TAILQ_ENTRY(lcore_callback) next;
238 rte_lcore_init_cb init;
239 rte_lcore_uninit_cb uninit;
242 static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
243 TAILQ_HEAD_INITIALIZER(lcore_callbacks);
246 callback_init(struct lcore_callback *callback, unsigned int lcore_id)
248 if (callback->init == NULL)
250 RTE_LOG(DEBUG, EAL, "Call init for lcore callback %s, lcore_id %u\n",
251 callback->name, lcore_id);
252 return callback->init(lcore_id, callback->arg);
256 callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
258 if (callback->uninit == NULL)
260 RTE_LOG(DEBUG, EAL, "Call uninit for lcore callback %s, lcore_id %u\n",
261 callback->name, lcore_id);
262 callback->uninit(lcore_id, callback->arg);
266 free_callback(struct lcore_callback *callback)
268 free(callback->name);
273 rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
274 rte_lcore_uninit_cb uninit, void *arg)
276 struct rte_config *cfg = rte_eal_get_configuration();
277 struct lcore_callback *callback;
278 unsigned int lcore_id;
282 callback = calloc(1, sizeof(*callback));
283 if (callback == NULL)
285 if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {
289 callback->init = init;
290 callback->uninit = uninit;
292 rte_rwlock_write_lock(&lcore_lock);
293 if (callback->init == NULL)
295 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
296 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
298 if (callback_init(callback, lcore_id) == 0)
300 /* Callback refused init for this lcore, uninitialize all
303 while (lcore_id-- != 0) {
304 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
306 callback_uninit(callback, lcore_id);
308 free_callback(callback);
313 TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
314 RTE_LOG(DEBUG, EAL, "Registered new lcore callback %s (%sinit, %suninit).\n",
315 callback->name, callback->init == NULL ? "NO " : "",
316 callback->uninit == NULL ? "NO " : "");
318 rte_rwlock_write_unlock(&lcore_lock);
323 rte_lcore_callback_unregister(void *handle)
325 struct rte_config *cfg = rte_eal_get_configuration();
326 struct lcore_callback *callback = handle;
327 unsigned int lcore_id;
329 if (callback == NULL)
331 rte_rwlock_write_lock(&lcore_lock);
332 if (callback->uninit == NULL)
334 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
335 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
337 callback_uninit(callback, lcore_id);
340 TAILQ_REMOVE(&lcore_callbacks, callback, next);
341 rte_rwlock_write_unlock(&lcore_lock);
342 RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
343 callback->name, callback->arg);
344 free_callback(callback);
348 eal_lcore_non_eal_allocate(void)
350 struct rte_config *cfg = rte_eal_get_configuration();
351 struct lcore_callback *callback;
352 struct lcore_callback *prev;
353 unsigned int lcore_id;
355 rte_rwlock_write_lock(&lcore_lock);
356 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
357 if (cfg->lcore_role[lcore_id] != ROLE_OFF)
359 cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
363 if (lcore_id == RTE_MAX_LCORE) {
364 RTE_LOG(DEBUG, EAL, "No lcore available.\n");
367 TAILQ_FOREACH(callback, &lcore_callbacks, next) {
368 if (callback_init(callback, lcore_id) == 0)
370 /* Callback refused init for this lcore, call uninit for all
371 * previous callbacks.
373 prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
374 while (prev != NULL) {
375 callback_uninit(prev, lcore_id);
376 prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
378 RTE_LOG(DEBUG, EAL, "Initialization refused for lcore %u.\n",
380 cfg->lcore_role[lcore_id] = ROLE_OFF;
382 lcore_id = RTE_MAX_LCORE;
386 rte_rwlock_write_unlock(&lcore_lock);
391 eal_lcore_non_eal_release(unsigned int lcore_id)
393 struct rte_config *cfg = rte_eal_get_configuration();
394 struct lcore_callback *callback;
396 rte_rwlock_write_lock(&lcore_lock);
397 if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
399 TAILQ_FOREACH(callback, &lcore_callbacks, next)
400 callback_uninit(callback, lcore_id);
401 cfg->lcore_role[lcore_id] = ROLE_OFF;
404 rte_rwlock_write_unlock(&lcore_lock);
408 rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
410 struct rte_config *cfg = rte_eal_get_configuration();
411 unsigned int lcore_id;
414 rte_rwlock_read_lock(&lcore_lock);
415 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
416 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
418 ret = cb(lcore_id, arg);
422 rte_rwlock_read_unlock(&lcore_lock);
427 lcore_dump_cb(unsigned int lcore_id, void *arg)
429 struct rte_config *cfg = rte_eal_get_configuration();
430 char cpuset[RTE_CPU_AFFINITY_STR_LEN];
435 switch (cfg->lcore_role[lcore_id]) {
450 ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
452 fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
453 rte_lcore_to_socket_id(lcore_id), role, cpuset,
454 ret == 0 ? "" : "...");
459 rte_lcore_dump(FILE *f)
461 rte_lcore_iterate(lcore_dump_cb, f);