#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_spinlock.h>
#include <rte_malloc.h>
-#ifdef RTE_LIBRTE_HASH
+#ifdef RTE_LIB_HASH
#include <rte_hash.h>
#include <rte_fbk_hash.h>
#include <rte_jhash.h>
-#endif /* RTE_LIBRTE_HASH */
+#endif /* RTE_LIB_HASH */
-#ifdef RTE_LIBRTE_LPM
+#ifdef RTE_LIB_LPM
#include <rte_lpm.h>
-#endif /* RTE_LIBRTE_LPM */
+#endif /* RTE_LIB_LPM */
#include <rte_string_fns.h>
#define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
-static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
-static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
+static uint32_t obj_count;
+static uint32_t synchro;
-#define WAIT_SYNCHRO_FOR_SLAVES() do{ \
- if (lcore_self != rte_get_master_lcore()) \
- while (rte_atomic32_read(&synchro) == 0); \
+#define WAIT_SYNCHRO_FOR_WORKERS() do { \
+ if (lcore_self != rte_get_main_lcore()) \
+ rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
} while(0)
/*
* rte_eal_init only init once
*/
static int
-test_eal_init_once(__attribute__((unused)) void *arg)
+test_eal_init_once(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
- rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
+ __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
if (rte_eal_init(0, NULL) != -1)
return -1;
char ring_name[MAX_STRING_SIZE];
int i;
+ rp = rte_ring_lookup("fr_test_once");
+ if (rp != NULL)
+ rte_ring_free(rp);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(ring_name, sizeof(ring_name),
"fr_test_%d_%d", lcore_id, i);
}
static int
-ring_create_lookup(__attribute__((unused)) void *arg)
+ring_create_lookup(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_ring * rp;
char ring_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same ring simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
if (rp != NULL)
- rte_atomic32_inc(&obj_count);
+ __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
}
/* create/lookup new ring several times */
}
static void
-my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
+my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
void *obj, unsigned i)
{
uint32_t *objnum = obj;
char mempool_name[MAX_STRING_SIZE];
int i;
- /* verify all ring created successful */
+ mp = rte_mempool_lookup("fr_test_once");
+ if (mp != NULL)
+ rte_mempool_free(mp);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
lcore_id, i);
}
static int
-mempool_create_lookup(__attribute__((unused)) void *arg)
+mempool_create_lookup(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_mempool * mp;
char mempool_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same mempool simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
my_obj_init, NULL,
SOCKET_ID_ANY, 0);
if (mp != NULL)
- rte_atomic32_inc(&obj_count);
+ __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
}
/* create/lookup new ring several times */
return 0;
}
-#ifdef RTE_LIBRTE_HASH
+#ifdef RTE_LIB_HASH
static void
hash_clean(unsigned lcore_id)
{
struct rte_hash *handle;
int i;
+ handle = rte_hash_find_existing("fr_test_once");
+ if (handle != NULL)
+ rte_hash_free(handle);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
}
static int
-hash_create_free(__attribute__((unused)) void *arg)
+hash_create_free(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_hash *handle;
.socket_id = 0,
};
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same hash simultaneously on all threads */
hash_params.name = "fr_test_once";
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_hash_create(&hash_params);
if (handle != NULL)
- rte_atomic32_inc(&obj_count);
+ __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
}
- /* create mutiple times simultaneously */
+ /* create multiple times simultaneously */
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
hash_params.name = hash_name;
struct rte_fbk_hash_table *handle;
int i;
+ handle = rte_fbk_hash_find_existing("fr_test_once");
+ if (handle != NULL)
+ rte_fbk_hash_free(handle);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
}
static int
-fbk_create_free(__attribute__((unused)) void *arg)
+fbk_create_free(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_fbk_hash_table *handle;
.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
};
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same fbk hash table simultaneously on all threads */
fbk_params.name = "fr_test_once";
for (i = 0; i < MAX_ITER_ONCE; i++) {
handle = rte_fbk_hash_create(&fbk_params);
if (handle != NULL)
- rte_atomic32_inc(&obj_count);
+ __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
}
- /* create mutiple fbk tables simultaneously */
+ /* create multiple fbk tables simultaneously */
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
fbk_params.name = fbk_name;
return 0;
}
-#endif /* RTE_LIBRTE_HASH */
+#endif /* RTE_LIB_HASH */
-#ifdef RTE_LIBRTE_LPM
+#ifdef RTE_LIB_LPM
static void
lpm_clean(unsigned int lcore_id)
{
struct rte_lpm *lpm;
int i;
+ lpm = rte_lpm_find_existing("fr_test_once");
+ if (lpm != NULL)
+ rte_lpm_free(lpm);
+
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
}
static int
-lpm_create_free(__attribute__((unused)) void *arg)
+lpm_create_free(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
struct rte_lpm *lpm;
char lpm_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_SLAVES();
+ WAIT_SYNCHRO_FOR_WORKERS();
/* create the same lpm simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if (lpm != NULL)
- rte_atomic32_inc(&obj_count);
+ __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
}
- /* create mutiple fbk tables simultaneously */
+ /* create multiple fbk tables simultaneously */
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
return 0;
}
-#endif /* RTE_LIBRTE_LPM */
+#endif /* RTE_LIB_LPM */
struct test_case{
case_func_t func;
{ ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
{ mempool_create_lookup, NULL, mempool_clean,
"mempool create/lookup" },
-#ifdef RTE_LIBRTE_HASH
+#ifdef RTE_LIB_HASH
{ hash_create_free, NULL, hash_clean, "hash create/free" },
{ fbk_create_free, NULL, fbk_clean, "fbk create/free" },
-#endif /* RTE_LIBRTE_HASH */
-#ifdef RTE_LIBRTE_LPM
+#endif /* RTE_LIB_HASH */
+#ifdef RTE_LIB_LPM
{ lpm_create_free, NULL, lpm_clean, "lpm create/free" },
-#endif /* RTE_LIBRTE_LPM */
+#endif /* RTE_LIB_LPM */
};
/**
static int
launch_test(struct test_case *pt_case)
{
+ unsigned int lcore_id;
+ unsigned int cores;
+ unsigned int count;
int ret = 0;
- unsigned lcore_id;
- unsigned cores_save = rte_lcore_count();
- unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
- unsigned count;
if (pt_case->func == NULL)
return -1;
- rte_atomic32_set(&obj_count, 0);
- rte_atomic32_set(&synchro, 0);
+ __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (cores == 1)
break;
cores--;
rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
}
- rte_atomic32_set(&synchro, 1);
+ __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
- cores = cores_save;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (cores == 1)
- break;
- cores--;
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
ret = -1;
+ }
+ RTE_LCORE_FOREACH(lcore_id) {
if (pt_case->clean != NULL)
pt_case->clean(lcore_id);
}
- count = rte_atomic32_read(&obj_count);
+ count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
if (count != 1) {
printf("%s: common object allocated %d times (should be 1)\n",
pt_case->name, count);
uint32_t case_id;
struct test_case *pt_case = NULL;
+ if (RTE_EXEC_ENV_IS_WINDOWS)
+ return TEST_SKIPPED;
+
if (rte_lcore_count() < 2) {
printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
return TEST_SKIPPED;