-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <unistd.h>
#include <limits.h>
#include <string.h>
-#include <dirent.h>
-#include <rte_log.h>
-#include <rte_eal.h>
-#include <rte_lcore.h>
#include <rte_common.h>
#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+#include "eal_memcfg.h"
#include "eal_private.h"
#include "eal_thread.h"
+unsigned int rte_get_master_lcore(void)
+{
+ return rte_eal_get_configuration()->master_lcore;
+}
+
+unsigned int rte_lcore_count(void)
+{
+ return rte_eal_get_configuration()->lcore_count;
+}
+
+int rte_lcore_index(int lcore_id)
+{
+ if (unlikely(lcore_id >= RTE_MAX_LCORE))
+ return -1;
+
+ if (lcore_id < 0) {
+ if (rte_lcore_id() == LCORE_ID_ANY)
+ return -1;
+
+ lcore_id = (int)rte_lcore_id();
+ }
+
+ return lcore_config[lcore_id].core_index;
+}
+
+int rte_lcore_to_cpu_id(int lcore_id)
+{
+ if (unlikely(lcore_id >= RTE_MAX_LCORE))
+ return -1;
+
+ if (lcore_id < 0) {
+ if (rte_lcore_id() == LCORE_ID_ANY)
+ return -1;
+
+ lcore_id = (int)rte_lcore_id();
+ }
+
+ return lcore_config[lcore_id].core_id;
+}
+
+rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
+{
+ return lcore_config[lcore_id].cpuset;
+}
+
+enum rte_lcore_role_t
+rte_eal_lcore_role(unsigned int lcore_id)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return ROLE_OFF;
+ return cfg->lcore_role[lcore_id];
+}
+
+int
+rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ return cfg->lcore_role[lcore_id] == role;
+}
+
+int rte_lcore_is_enabled(unsigned int lcore_id)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ if (lcore_id >= RTE_MAX_LCORE)
+ return 0;
+ return cfg->lcore_role[lcore_id] == ROLE_RTE;
+}
+
+unsigned int rte_get_next_lcore(unsigned int i, int skip_master, int wrap)
+{
+ i++;
+ if (wrap)
+ i %= RTE_MAX_LCORE;
+
+ while (i < RTE_MAX_LCORE) {
+ if (!rte_lcore_is_enabled(i) ||
+ (skip_master && (i == rte_get_master_lcore()))) {
+ i++;
+ if (wrap)
+ i %= RTE_MAX_LCORE;
+ continue;
+ }
+ break;
+ }
+ return i;
+}
+
+unsigned int
+rte_lcore_to_socket_id(unsigned int lcore_id)
+{
+ return lcore_config[lcore_id].socket_id;
+}
+
+static int
+socket_id_cmp(const void *a, const void *b)
+{
+ const int *lcore_id_a = a;
+ const int *lcore_id_b = b;
+
+ if (*lcore_id_a < *lcore_id_b)
+ return -1;
+ if (*lcore_id_a > *lcore_id_b)
+ return 1;
+ return 0;
+}
+
/*
* Parse /sys/devices/system/cpu to get the number of physical and logical
* processors on the machine. The function will fill the cpu_info
struct rte_config *config = rte_eal_get_configuration();
unsigned lcore_id;
unsigned count = 0;
+ unsigned int socket_id, prev_socket_id;
+ int lcore_to_socket_id[RTE_MAX_LCORE];
/*
* Parse the maximum set of logical cores, detect the subset of running
/* init cpuset for per lcore config */
CPU_ZERO(&lcore_config[lcore_id].cpuset);
- /* in 1:1 mapping, record related cpu detected state */
- lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
- if (lcore_config[lcore_id].detected == 0) {
+ /* find socket first */
+ socket_id = eal_cpu_socket_id(lcore_id);
+ lcore_to_socket_id[lcore_id] = socket_id;
+
+ if (eal_cpu_detected(lcore_id) == 0) {
config->lcore_role[lcore_id] = ROLE_OFF;
lcore_config[lcore_id].core_index = -1;
continue;
/* By default, each detected core is enabled */
config->lcore_role[lcore_id] = ROLE_RTE;
+ lcore_config[lcore_id].core_role = ROLE_RTE;
lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
- lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
- if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
-#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
- lcore_config[lcore_id].socket_id = 0;
-#else
- rte_panic("Socket ID (%u) is greater than "
- "RTE_MAX_NUMA_NODES (%d)\n",
- lcore_config[lcore_id].socket_id,
- RTE_MAX_NUMA_NODES);
-#endif
-
+ lcore_config[lcore_id].socket_id = socket_id;
RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
"core %u on socket %u\n",
lcore_id, lcore_config[lcore_id].core_id,
lcore_config[lcore_id].socket_id);
count++;
}
+ for (; lcore_id < CPU_SETSIZE; lcore_id++) {
+ if (eal_cpu_detected(lcore_id) == 0)
+ continue;
+ RTE_LOG(DEBUG, EAL, "Skipped lcore %u as core %u on socket %u\n",
+ lcore_id, eal_cpu_core_id(lcore_id),
+ eal_cpu_socket_id(lcore_id));
+ }
+
/* Set the count of enabled logical cores of the EAL configuration */
config->lcore_count = count;
RTE_LOG(DEBUG, EAL,
RTE_MAX_LCORE);
RTE_LOG(INFO, EAL, "Detected %u lcore(s)\n", config->lcore_count);
+ /* sort all socket id's in ascending order */
+ qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
+ sizeof(lcore_to_socket_id[0]), socket_id_cmp);
+
+ prev_socket_id = -1;
+ config->numa_node_count = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ socket_id = lcore_to_socket_id[lcore_id];
+ if (socket_id != prev_socket_id)
+ config->numa_nodes[config->numa_node_count++] =
+ socket_id;
+ prev_socket_id = socket_id;
+ }
+ RTE_LOG(INFO, EAL, "Detected %u NUMA nodes\n", config->numa_node_count);
+
return 0;
}
+
+unsigned int
+rte_socket_count(void)
+{
+ const struct rte_config *config = rte_eal_get_configuration();
+ return config->numa_node_count;
+}
+
+int
+rte_socket_id_by_idx(unsigned int idx)
+{
+ const struct rte_config *config = rte_eal_get_configuration();
+ if (idx >= config->numa_node_count) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ return config->numa_nodes[idx];
+}
+
+static rte_spinlock_t lcore_lock = RTE_SPINLOCK_INITIALIZER;
+
+unsigned int
+eal_lcore_non_eal_allocate(void)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ unsigned int lcore_id;
+
+ rte_spinlock_lock(&lcore_lock);
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (cfg->lcore_role[lcore_id] != ROLE_OFF)
+ continue;
+ cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
+ cfg->lcore_count++;
+ break;
+ }
+ if (lcore_id == RTE_MAX_LCORE)
+ RTE_LOG(DEBUG, EAL, "No lcore available.\n");
+ rte_spinlock_unlock(&lcore_lock);
+ return lcore_id;
+}
+
+void
+eal_lcore_non_eal_release(unsigned int lcore_id)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+
+ rte_spinlock_lock(&lcore_lock);
+ if (cfg->lcore_role[lcore_id] == ROLE_NON_EAL) {
+ cfg->lcore_role[lcore_id] = ROLE_OFF;
+ cfg->lcore_count--;
+ }
+ rte_spinlock_unlock(&lcore_lock);
+}