return cfg->lcore_role[lcore_id] == role;
}
-int eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
+static int
+eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
{
unsigned cpu = 0;
int socket_id = SOCKET_ID_ANY;
break;
}
- } while (++cpu < RTE_MAX_LCORE);
+ } while (++cpu < CPU_SETSIZE);
return socket_id;
}
rte_thread_get_affinity(&cpuset);
- for (cpu = 0; cpu < RTE_MAX_LCORE; cpu++) {
+ for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
if (!CPU_ISSET(cpu, &cpuset))
continue;
static void *rte_thread_init(void *arg)
{
int ret;
+ rte_cpuset_t *cpuset = &internal_config.ctrl_cpuset;
struct rte_thread_ctrl_params *params = arg;
void *(*start_routine)(void *) = params->start_routine;
void *routine_arg = params->arg;
+ /* Store cpuset in TLS for quick access */
+ memmove(&RTE_PER_LCORE(_cpuset), cpuset, sizeof(rte_cpuset_t));
+
ret = pthread_barrier_wait(¶ms->configured);
if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
pthread_barrier_destroy(¶ms->configured);
return start_routine(routine_arg);
}
-__rte_experimental int
+int
rte_ctrl_thread_create(pthread_t *thread, const char *name,
const pthread_attr_t *attr,
void *(*start_routine)(void *), void *arg)