Add a helper to iterate all lcores.
The iterator callback is read-only wrt the lcores list.
Implement a dump function on top of this for debugging.
Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Olivier Matz <olivier.matz@6wind.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
l[0].uninit, l[1].uninit);
goto cleanup_threads;
}
+ rte_lcore_dump(stdout);
/* Release all threads, and check their states. */
__atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE);
ret = 0;
}
if (ret < 0)
goto error;
+ rte_lcore_dump(stdout);
if (l[0].uninit != 2 || l[1].uninit != 1) {
printf("Error: threads reported having successfully registered and unregistered, but incorrect uninit calls, expected 2, 1, got %u, %u\n",
l[0].uninit, l[1].uninit);
}
printf("EAL threads count: %u, RTE_MAX_LCORE=%u\n", eal_threads_count,
RTE_MAX_LCORE);
+ rte_lcore_dump(stdout);
if (test_non_eal_lcores(eal_threads_count) < 0)
return TEST_FAILED;
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
#include "eal_memcfg.h"
#include "eal_private.h"
return config->numa_nodes[idx];
}
-static rte_spinlock_t lcore_lock = RTE_SPINLOCK_INITIALIZER;
+static rte_rwlock_t lcore_lock = RTE_RWLOCK_INITIALIZER;
struct lcore_callback {
TAILQ_ENTRY(lcore_callback) next;
char *name;
callback->init = init;
callback->uninit = uninit;
callback->arg = arg;
- rte_spinlock_lock(&lcore_lock);
+ rte_rwlock_write_lock(&lcore_lock);
if (callback->init == NULL)
goto no_init;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
callback->name, callback->init == NULL ? "NO " : "",
callback->uninit == NULL ? "NO " : "");
out:
- rte_spinlock_unlock(&lcore_lock);
+ rte_rwlock_write_unlock(&lcore_lock);
return callback;
}
if (callback == NULL)
return;
- rte_spinlock_lock(&lcore_lock);
+ rte_rwlock_write_lock(&lcore_lock);
if (callback->uninit == NULL)
goto no_uninit;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
}
no_uninit:
TAILQ_REMOVE(&lcore_callbacks, callback, next);
- rte_spinlock_unlock(&lcore_lock);
+ rte_rwlock_write_unlock(&lcore_lock);
RTE_LOG(DEBUG, EAL, "Unregistered lcore callback %s-%p.\n",
callback->name, callback->arg);
free_callback(callback);
struct lcore_callback *prev;
unsigned int lcore_id;
- rte_spinlock_lock(&lcore_lock);
+ rte_rwlock_write_lock(&lcore_lock);
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
if (cfg->lcore_role[lcore_id] != ROLE_OFF)
continue;
goto out;
}
out:
- rte_spinlock_unlock(&lcore_lock);
+ rte_rwlock_write_unlock(&lcore_lock);
return lcore_id;
}
struct rte_config *cfg = rte_eal_get_configuration();
struct lcore_callback *callback;
- rte_spinlock_lock(&lcore_lock);
+ rte_rwlock_write_lock(&lcore_lock);
if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
goto out;
TAILQ_FOREACH(callback, &lcore_callbacks, next)
cfg->lcore_role[lcore_id] = ROLE_OFF;
cfg->lcore_count--;
out:
- rte_spinlock_unlock(&lcore_lock);
+ rte_rwlock_write_unlock(&lcore_lock);
+}
+
+int
+rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ unsigned int lcore_id;
+ int ret = 0;
+
+ rte_rwlock_read_lock(&lcore_lock);
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (cfg->lcore_role[lcore_id] == ROLE_OFF)
+ continue;
+ ret = cb(lcore_id, arg);
+ if (ret != 0)
+ break;
+ }
+ rte_rwlock_read_unlock(&lcore_lock);
+ return ret;
+}
+
+static int
+lcore_dump_cb(unsigned int lcore_id, void *arg)
+{
+ struct rte_config *cfg = rte_eal_get_configuration();
+ char cpuset[RTE_CPU_AFFINITY_STR_LEN];
+ const char *role;
+ FILE *f = arg;
+ int ret;
+
+ switch (cfg->lcore_role[lcore_id]) {
+ case ROLE_RTE:
+ role = "RTE";
+ break;
+ case ROLE_SERVICE:
+ role = "SERVICE";
+ break;
+ case ROLE_NON_EAL:
+ role = "NON_EAL";
+ break;
+ default:
+ role = "UNKNOWN";
+ break;
+ }
+
+ ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
+ sizeof(cpuset));
+ fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s\n", lcore_id,
+ rte_lcore_to_socket_id(lcore_id), role, cpuset,
+ ret == 0 ? "" : "...");
+ return 0;
+}
+
+void
+rte_lcore_dump(FILE *f)
+{
+ rte_lcore_iterate(lcore_dump_cb, f);
}
}
int
-eal_thread_dump_affinity(char *str, unsigned size)
+eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size)
{
- rte_cpuset_t cpuset;
unsigned cpu;
int ret;
unsigned int out = 0;
- rte_thread_get_affinity(&cpuset);
-
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
- if (!CPU_ISSET(cpu, &cpuset))
+ if (!CPU_ISSET(cpu, cpuset))
continue;
ret = snprintf(str + out,
return ret;
}
+int
+eal_thread_dump_current_affinity(char *str, unsigned int size)
+{
+ rte_cpuset_t cpuset;
+
+ rte_thread_get_affinity(&cpuset);
+ return eal_thread_dump_affinity(&cpuset, str, size);
+}
+
void
__rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset)
{
#define RTE_CPU_AFFINITY_STR_LEN 256
/**
- * Dump the current pthread cpuset.
+ * Dump the cpuset as a human readable string.
* This function is private to EAL.
*
* Note:
* If the dump size is greater than the size of given buffer,
* the string will be truncated and with '\0' at the end.
*
+ * @param cpuset
+ * The CPU affinity object to dump.
* @param str
* The string buffer the cpuset will dump to.
* @param size
* 0 for success, -1 if truncation happens.
*/
int
-eal_thread_dump_affinity(char *str, unsigned size);
+eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size);
+
+/**
+ * Dump the current thread cpuset.
+ * This is a wrapper on eal_thread_dump_affinity().
+ */
+int
+eal_thread_dump_current_affinity(char *str, unsigned int size);
#endif /* EAL_THREAD_H */
__rte_thread_init(config->master_lcore,
&lcore_config[config->master_lcore].cpuset);
- ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
+ ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
config->master_lcore, thread_id, cpuset,
__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
- ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
+ ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
lcore_id, thread_id, cpuset, ret == 0 ? "" : "...");
* If this step succeeds, the callbacks are put in the lcore callbacks list
* that will get called for each lcore allocation/release.
*
- * Note: callbacks execution is serialised under a lock protecting the lcores
- * and callbacks list.
+ * Note: callbacks execution is serialised under a write lock protecting the
+ * lcores and callbacks list.
*
* @param name
* A name serving as a small description for this callback.
void
rte_lcore_callback_unregister(void *handle);
+/**
+ * Callback prototype for iterating over lcores.
+ *
+ * @param lcore_id
+ * The lcore to consider.
+ * @param arg
+ * An opaque pointer coming from the caller.
+ * @return
+ * - 0 lets the iteration continue.
+ * - !0 makes the iteration stop.
+ */
+typedef int (*rte_lcore_iterate_cb)(unsigned int lcore_id, void *arg);
+
+/**
+ * Iterate on all active lcores (ROLE_RTE, ROLE_SERVICE and ROLE_NON_EAL).
+ * No modification on the lcore states is allowed in the callback.
+ *
+ * Note: as opposed to init/uninit callbacks, iteration callbacks can be
+ * invoked in parallel as they are run under a read lock protecting the lcores
+ * and callbacks list.
+ *
+ * @param cb
+ * The callback that gets passed each lcore.
+ * @param arg
+ * An opaque pointer passed to cb.
+ * @return
+ * Same return code as the callback last invocation (see rte_lcore_iterate_cb
+ * description).
+ */
+__rte_experimental
+int
+rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg);
+
+/**
+ * List all lcores.
+ *
+ * @param f
+ * The output stream where the dump should be sent.
+ */
+__rte_experimental
+void
+rte_lcore_dump(FILE *f);
+
/**
* Set core affinity of the current thread.
* Support both EAL and non-EAL thread and update TLS.
__rte_thread_init(config->master_lcore,
&lcore_config[config->master_lcore].cpuset);
- ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
+ ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
config->master_lcore, (uintptr_t)thread_id, cpuset,
ret == 0 ? "" : "...");
__rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset);
- ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
+ ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n",
lcore_id, (uintptr_t)thread_id, cpuset, ret == 0 ? "" : "...");
rte_eal_vfio_get_vf_token;
rte_lcore_callback_register;
rte_lcore_callback_unregister;
+ rte_lcore_dump;
+ rte_lcore_iterate;
rte_thread_register;
rte_thread_unregister;
};