git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
ethdev: add represented port action to flow API
[dpdk.git]
/
app
/
test
/
test_func_reentrancy.c
diff --git
a/app/test/test_func_reentrancy.c
b/app/test/test_func_reentrancy.c
index
2a0c134
..
838ab6f
100644
(file)
--- a/
app/test/test_func_reentrancy.c
+++ b/
app/test/test_func_reentrancy.c
@@
-57,8
+57,8
@@
typedef void (*case_clean_t)(unsigned lcore_id);
static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
-#define WAIT_SYNCHRO_FOR_
SLAVES() do
{ \
- if (lcore_self != rte_get_ma
ster
_lcore()) \
+#define WAIT_SYNCHRO_FOR_
WORKERS() do
{ \
+ if (lcore_self != rte_get_ma
in
_lcore()) \
while (rte_atomic32_read(&synchro) == 0); \
} while(0)
while (rte_atomic32_read(&synchro) == 0); \
} while(0)
@@
-70,7
+70,7
@@
test_eal_init_once(__rte_unused void *arg)
{
unsigned lcore_self = rte_lcore_id();
{
unsigned lcore_self = rte_lcore_id();
- WAIT_SYNCHRO_FOR_
SLAVE
S();
+ WAIT_SYNCHRO_FOR_
WORKER
S();
rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
if (rte_eal_init(0, NULL) != -1)
rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
if (rte_eal_init(0, NULL) != -1)
@@
-89,6
+89,10
@@
ring_clean(unsigned int lcore_id)
char ring_name[MAX_STRING_SIZE];
int i;
char ring_name[MAX_STRING_SIZE];
int i;
+ rp = rte_ring_lookup("fr_test_once");
+ if (rp != NULL)
+ rte_ring_free(rp);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(ring_name, sizeof(ring_name),
"fr_test_%d_%d", lcore_id, i);
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(ring_name, sizeof(ring_name),
"fr_test_%d_%d", lcore_id, i);
@@
-106,7
+110,7
@@
ring_create_lookup(__rte_unused void *arg)
char ring_name[MAX_STRING_SIZE];
int i;
char ring_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_
SLAVE
S();
+ WAIT_SYNCHRO_FOR_
WORKER
S();
/* create the same ring simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
/* create the same ring simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
@@
-148,7
+152,10
@@
mempool_clean(unsigned int lcore_id)
char mempool_name[MAX_STRING_SIZE];
int i;
char mempool_name[MAX_STRING_SIZE];
int i;
- /* verify all ring created successful */
+ mp = rte_mempool_lookup("fr_test_once");
+ if (mp != NULL)
+ rte_mempool_free(mp);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
lcore_id, i);
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
lcore_id, i);
@@
-166,7
+173,7
@@
mempool_create_lookup(__rte_unused void *arg)
char mempool_name[MAX_STRING_SIZE];
int i;
char mempool_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_
SLAVE
S();
+ WAIT_SYNCHRO_FOR_
WORKER
S();
/* create the same mempool simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
/* create the same mempool simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
@@
-208,6
+215,10
@@
hash_clean(unsigned lcore_id)
struct rte_hash *handle;
int i;
struct rte_hash *handle;
int i;
+ handle = rte_hash_find_existing("fr_test_once");
+ if (handle != NULL)
+ rte_hash_free(handle);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
@@
-232,7
+243,7
@@
hash_create_free(__rte_unused void *arg)
.socket_id = 0,
};
.socket_id = 0,
};
- WAIT_SYNCHRO_FOR_
SLAVE
S();
+ WAIT_SYNCHRO_FOR_
WORKER
S();
/* create the same hash simultaneously on all threads */
hash_params.name = "fr_test_once";
/* create the same hash simultaneously on all threads */
hash_params.name = "fr_test_once";
@@
-272,6
+283,10
@@
fbk_clean(unsigned lcore_id)
struct rte_fbk_hash_table *handle;
int i;
struct rte_fbk_hash_table *handle;
int i;
+ handle = rte_fbk_hash_find_existing("fr_test_once");
+ if (handle != NULL)
+ rte_fbk_hash_free(handle);
+
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
@@
-296,7
+311,7
@@
fbk_create_free(__rte_unused void *arg)
.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
};
.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
};
- WAIT_SYNCHRO_FOR_
SLAVE
S();
+ WAIT_SYNCHRO_FOR_
WORKER
S();
/* create the same fbk hash table simultaneously on all threads */
fbk_params.name = "fr_test_once";
/* create the same fbk hash table simultaneously on all threads */
fbk_params.name = "fr_test_once";
@@
-338,6
+353,10
@@
lpm_clean(unsigned int lcore_id)
struct rte_lpm *lpm;
int i;
struct rte_lpm *lpm;
int i;
+ lpm = rte_lpm_find_existing("fr_test_once");
+ if (lpm != NULL)
+ rte_lpm_free(lpm);
+
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
@@
-359,7
+378,7
@@
lpm_create_free(__rte_unused void *arg)
char lpm_name[MAX_STRING_SIZE];
int i;
char lpm_name[MAX_STRING_SIZE];
int i;
- WAIT_SYNCHRO_FOR_
SLAVE
S();
+ WAIT_SYNCHRO_FOR_
WORKER
S();
/* create the same lpm simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
/* create the same lpm simultaneously on all threads */
for (i = 0; i < MAX_ITER_ONCE; i++) {
@@
-418,11
+437,10
@@
struct test_case test_cases[] = {
static int
launch_test(struct test_case *pt_case)
{
static int
launch_test(struct test_case *pt_case)
{
+ unsigned int lcore_id;
+ unsigned int cores;
+ unsigned int count;
int ret = 0;
int ret = 0;
- unsigned lcore_id;
- unsigned cores_save = rte_lcore_count();
- unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
- unsigned count;
if (pt_case->func == NULL)
return -1;
if (pt_case->func == NULL)
return -1;
@@
-430,7
+448,8
@@
launch_test(struct test_case *pt_case)
rte_atomic32_set(&obj_count, 0);
rte_atomic32_set(&synchro, 0);
rte_atomic32_set(&obj_count, 0);
rte_atomic32_set(&synchro, 0);
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (cores == 1)
break;
cores--;
if (cores == 1)
break;
cores--;
@@
-442,14
+461,12
@@
launch_test(struct test_case *pt_case)
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
if (pt_case->func(pt_case->arg) < 0)
ret = -1;
- cores = cores_save;
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (cores == 1)
- break;
- cores--;
+ RTE_LCORE_FOREACH_WORKER(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
ret = -1;
if (rte_eal_wait_lcore(lcore_id) < 0)
ret = -1;
+ }
+ RTE_LCORE_FOREACH(lcore_id) {
if (pt_case->clean != NULL)
pt_case->clean(lcore_id);
}
if (pt_case->clean != NULL)
pt_case->clean(lcore_id);
}