1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_branch_prediction.h>
10 #include <rte_lcore.h>
13 #include "eal_private.h"
14 #include "eal_thread.h"
16 unsigned int rte_get_main_lcore(void)
18 return rte_eal_get_configuration()->main_lcore;
21 unsigned int rte_lcore_count(void)
23 return rte_eal_get_configuration()->lcore_count;
26 int rte_lcore_index(int lcore_id)
28 if (unlikely(lcore_id >= RTE_MAX_LCORE))
32 if (rte_lcore_id() == LCORE_ID_ANY)
35 lcore_id = (int)rte_lcore_id();
38 return lcore_config[lcore_id].core_index;
41 int rte_lcore_to_cpu_id(int lcore_id)
43 if (unlikely(lcore_id >= RTE_MAX_LCORE))
47 if (rte_lcore_id() == LCORE_ID_ANY)
50 lcore_id = (int)rte_lcore_id();
53 return lcore_config[lcore_id].core_id;
56 rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
58 return lcore_config[lcore_id].cpuset;
62 rte_eal_lcore_role(unsigned int lcore_id)
64 struct rte_config *cfg = rte_eal_get_configuration();
66 if (lcore_id >= RTE_MAX_LCORE)
68 return cfg->lcore_role[lcore_id];
72 rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
74 struct rte_config *cfg = rte_eal_get_configuration();
76 if (lcore_id >= RTE_MAX_LCORE)
79 return cfg->lcore_role[lcore_id] == role;
82 int rte_lcore_is_enabled(unsigned int lcore_id)
84 struct rte_config *cfg = rte_eal_get_configuration();
86 if (lcore_id >= RTE_MAX_LCORE)
88 return cfg->lcore_role[lcore_id] == ROLE_RTE;
91 unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
97 while (i < RTE_MAX_LCORE) {
98 if (!rte_lcore_is_enabled(i) ||
99 (skip_main && (i == rte_get_main_lcore()))) {
111 rte_lcore_to_socket_id(unsigned int lcore_id)
113 return lcore_config[lcore_id].socket_id;
117 socket_id_cmp(const void *a, const void *b)
119 const int *lcore_id_a = a;
120 const int *lcore_id_b = b;
122 if (*lcore_id_a < *lcore_id_b)
124 if (*lcore_id_a > *lcore_id_b)
130 * Parse /sys/devices/system/cpu to get the number of physical and logical
131 * processors on the machine. The function will fill the cpu_info
135 rte_eal_cpu_init(void)
137 /* pointer to global configuration */
138 struct rte_config *config = rte_eal_get_configuration();
141 unsigned int socket_id, prev_socket_id;
142 int lcore_to_socket_id[RTE_MAX_LCORE];
145 * Parse the maximum set of logical cores, detect the subset of running
146 * ones and enable them by default.
148 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
149 lcore_config[lcore_id].core_index = count;
151 /* init cpuset for per lcore config */
152 CPU_ZERO(&lcore_config[lcore_id].cpuset);
154 /* find socket first */
155 socket_id = eal_cpu_socket_id(lcore_id);
156 lcore_to_socket_id[lcore_id] = socket_id;
158 if (eal_cpu_detected(lcore_id) == 0) {
159 config->lcore_role[lcore_id] = ROLE_OFF;
160 lcore_config[lcore_id].core_index = -1;
164 /* By default, lcore 1:1 map to cpu id */
165 CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
167 /* By default, each detected core is enabled */
168 config->lcore_role[lcore_id] = ROLE_RTE;
169 lcore_config[lcore_id].core_role = ROLE_RTE;
170 lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
171 lcore_config[lcore_id].socket_id = socket_id;
172 RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
173 "core %u on socket %u\n",
174 lcore_id, lcore_config[lcore_id].core_id,
175 lcore_config[lcore_id].socket_id);
178 for (; lcore_id < CPU_SETSIZE; lcore_id++) {
179 if (eal_cpu_detected(lcore_id) == 0)
181 RTE_LOG(DEBUG, EAL, "Skipped lcore %u as core %u on socket %u\n",
182 lcore_id, eal_cpu_core_id(lcore_id),
183 eal_cpu_socket_id(lcore_id));
186 /* Set the count of enabled logical cores of the EAL configuration */
187 config->lcore_count = count;
189 "Maximum logical cores by configuration: %u\n",
191 RTE_LOG(INFO, EAL, "Detected CPU lcores: %u\n", config->lcore_count);
193 /* sort all socket id's in ascending order */
194 qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
195 sizeof(lcore_to_socket_id[0]), socket_id_cmp);
198 config->numa_node_count = 0;
199 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
200 socket_id = lcore_to_socket_id[lcore_id];
201 if (socket_id != prev_socket_id)
202 config->numa_nodes[config->numa_node_count++] =
204 prev_socket_id = socket_id;
206 RTE_LOG(INFO, EAL, "Detected NUMA nodes: %u\n", config->numa_node_count);
212 rte_socket_count(void)
214 const struct rte_config *config = rte_eal_get_configuration();
215 return config->numa_node_count;
219 rte_socket_id_by_idx(unsigned int idx)
221 const struct rte_config *config = rte_eal_get_configuration();
222 if (idx >= config->numa_node_count) {
226 return config->numa_nodes[idx];
229 static rte_rwlock_t lcore_lock = RTE_RWLOCK_INITIALIZER;
230 struct lcore_callback {
231 TAILQ_ENTRY(lcore_callback) next;
233 rte_lcore_init_cb init;
234 rte_lcore_uninit_cb uninit;
237 static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
238 TAILQ_HEAD_INITIALIZER(lcore_callbacks);
241 callback_init(struct lcore_callback *callback, unsigned int lcore_id)
243 if (callback->init == NULL)
245 RTE_LOG(DEBUG, EAL, "Call init for lcore callback %s, lcore_id %u\n",
246 callback->name, lcore_id);
247 return callback->init(lcore_id, callback->arg);
251 callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
253 if (callback->uninit == NULL)
255 RTE_LOG(DEBUG, EAL, "Call uninit for lcore callback %s, lcore_id %u\n",
256 callback->name, lcore_id);
257 callback->uninit(lcore_id, callback->arg);
261 free_callback(struct lcore_callback *callback)
263 free(callback->name);
268 rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
269 rte_lcore_uninit_cb uninit, void *arg)
271 struct rte_config *cfg = rte_eal_get_configuration();
272 struct lcore_callback *callback;
273 unsigned int lcore_id;
277 callback = calloc(1, sizeof(*callback));
278 if (callback == NULL)
280 if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {
284 callback->init = init;
285 callback->uninit = uninit;
287 rte_rwlock_write_lock(&lcore_lock);
288 if (callback->init == NULL)
290 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
291 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
293 if (callback_init(callback, lcore_id) == 0)
295 /* Callback refused init for this lcore, uninitialize all
298 while (lcore_id-- != 0) {
299 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
301 callback_uninit(callback, lcore_id);
303 free_callback(callback);
308 TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
309 RTE_LOG(DEBUG, EAL, "Registered new lcore callback %s (%sinit, %suninit).\n",
310 callback->name, callback->init == NULL ? "NO " : "",
311 callback->uninit == NULL ? "NO " : "");
313 rte_rwlock_write_unlock(&lcore_lock);
318 rte_lcore_callback_unregister(void *handle)
320 struct rte_config *cfg = rte_eal_get_configuration();
321 struct lcore_callback *callback = handle;
322 unsigned int lcore_id;
324 if (callback == NULL)
326 rte_rwlock_write_lock(&lcore_lock);
327 if (callback->uninit == NULL)
329 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
330 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
332 callback_uninit(callback, lcore_id);
335 TAILQ_REMOVE(&lcore_callbacks, callback, next);
336 rte_rwlock_write_unlock(&lcore_lock);
337 RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
338 callback->name, callback->arg);
339 free_callback(callback);
343 eal_lcore_non_eal_allocate(void)
345 struct rte_config *cfg = rte_eal_get_configuration();
346 struct lcore_callback *callback;
347 struct lcore_callback *prev;
348 unsigned int lcore_id;
350 rte_rwlock_write_lock(&lcore_lock);
351 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
352 if (cfg->lcore_role[lcore_id] != ROLE_OFF)
354 cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
358 if (lcore_id == RTE_MAX_LCORE) {
359 RTE_LOG(DEBUG, EAL, "No lcore available.\n");
362 TAILQ_FOREACH(callback, &lcore_callbacks, next) {
363 if (callback_init(callback, lcore_id) == 0)
365 /* Callback refused init for this lcore, call uninit for all
366 * previous callbacks.
368 prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
369 while (prev != NULL) {
370 callback_uninit(prev, lcore_id);
371 prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
373 RTE_LOG(DEBUG, EAL, "Initialization refused for lcore %u.\n",
375 cfg->lcore_role[lcore_id] = ROLE_OFF;
377 lcore_id = RTE_MAX_LCORE;
381 rte_rwlock_write_unlock(&lcore_lock);
386 eal_lcore_non_eal_release(unsigned int lcore_id)
388 struct rte_config *cfg = rte_eal_get_configuration();
389 struct lcore_callback *callback;
391 rte_rwlock_write_lock(&lcore_lock);
392 if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
394 TAILQ_FOREACH(callback, &lcore_callbacks, next)
395 callback_uninit(callback, lcore_id);
396 cfg->lcore_role[lcore_id] = ROLE_OFF;
399 rte_rwlock_write_unlock(&lcore_lock);
403 rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
405 struct rte_config *cfg = rte_eal_get_configuration();
406 unsigned int lcore_id;
409 rte_rwlock_read_lock(&lcore_lock);
410 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
411 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
413 ret = cb(lcore_id, arg);
417 rte_rwlock_read_unlock(&lcore_lock);
422 lcore_dump_cb(unsigned int lcore_id, void *arg)
424 struct rte_config *cfg = rte_eal_get_configuration();
425 char cpuset[RTE_CPU_AFFINITY_STR_LEN];
430 switch (cfg->lcore_role[lcore_id]) {
445 ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
447 fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
448 rte_lcore_to_socket_id(lcore_id), role, cpuset,
449 ret == 0 ? "" : "...");
454 rte_lcore_dump(FILE *f)
456 rte_lcore_iterate(lcore_dump_cb, f);