X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_func_reentrancy.c;h=838ab6f0f90b03303becc71a4e8bef7d58f832c1;hb=88caad251c8de3a84e353b0b2a27014bc303df87;hp=2a0c134a99be39345290d501535513fda6ebf3c7;hpb=a8d0d473a0a89b3c50813e3e144e9a5377429f24;p=dpdk.git diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c index 2a0c134a99..838ab6f0f9 100644 --- a/app/test/test_func_reentrancy.c +++ b/app/test/test_func_reentrancy.c @@ -57,8 +57,8 @@ typedef void (*case_clean_t)(unsigned lcore_id); static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0); static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0); -#define WAIT_SYNCHRO_FOR_SLAVES() do{ \ - if (lcore_self != rte_get_master_lcore()) \ +#define WAIT_SYNCHRO_FOR_WORKERS() do { \ + if (lcore_self != rte_get_main_lcore()) \ while (rte_atomic32_read(&synchro) == 0); \ } while(0) @@ -70,7 +70,7 @@ test_eal_init_once(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */ if (rte_eal_init(0, NULL) != -1) @@ -89,6 +89,10 @@ ring_clean(unsigned int lcore_id) char ring_name[MAX_STRING_SIZE]; int i; + rp = rte_ring_lookup("fr_test_once"); + if (rp != NULL) + rte_ring_free(rp); + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_id, i); @@ -106,7 +110,7 @@ ring_create_lookup(__rte_unused void *arg) char ring_name[MAX_STRING_SIZE]; int i; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same ring simultaneously on all threads */ for (i = 0; i < MAX_ITER_ONCE; i++) { @@ -148,7 +152,10 @@ mempool_clean(unsigned int lcore_id) char mempool_name[MAX_STRING_SIZE]; int i; - /* verify all ring created successful */ + mp = rte_mempool_lookup("fr_test_once"); + if (mp != NULL) + rte_mempool_free(mp); + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_id, i); @@ -166,7 +173,7 @@ mempool_create_lookup(__rte_unused void *arg) char mempool_name[MAX_STRING_SIZE]; int i; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same mempool simultaneously on all threads */ for (i = 0; i < MAX_ITER_ONCE; i++) { @@ -208,6 +215,10 @@ hash_clean(unsigned lcore_id) struct rte_hash *handle; int i; + handle = rte_hash_find_existing("fr_test_once"); + if (handle != NULL) + rte_hash_free(handle); + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i); @@ -232,7 +243,7 @@ hash_create_free(__rte_unused void *arg) .socket_id = 0, }; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same hash simultaneously on all threads */ hash_params.name = "fr_test_once"; @@ -272,6 +283,10 @@ fbk_clean(unsigned lcore_id) struct rte_fbk_hash_table *handle; int i; + handle = rte_fbk_hash_find_existing("fr_test_once"); + if (handle != NULL) + rte_fbk_hash_free(handle); + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i); @@ -296,7 +311,7 @@ fbk_create_free(__rte_unused void *arg) .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT, }; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same fbk hash table simultaneously on all threads */ fbk_params.name = "fr_test_once"; @@ -338,6 +353,10 @@ lpm_clean(unsigned int lcore_id) struct rte_lpm *lpm; int i; + lpm = rte_lpm_find_existing("fr_test_once"); + if (lpm != NULL) + rte_lpm_free(lpm); + for (i = 0; i < MAX_LPM_ITER_TIMES; i++) { snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i); @@ -359,7 +378,7 @@ lpm_create_free(__rte_unused void *arg) char lpm_name[MAX_STRING_SIZE]; int i; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same lpm simultaneously on all threads */ for (i = 0; i < MAX_ITER_ONCE; i++) { @@ -418,11 +437,10 @@ struct test_case test_cases[] = { static int launch_test(struct test_case *pt_case) { + unsigned int lcore_id; + unsigned int cores; + unsigned int count; int ret = 0; - unsigned lcore_id; - unsigned cores_save = rte_lcore_count(); - unsigned cores = RTE_MIN(cores_save, MAX_LCORES); - unsigned count; if (pt_case->func == NULL) return -1; @@ -430,7 +448,8 @@ launch_test(struct test_case *pt_case) rte_atomic32_set(&obj_count, 0); rte_atomic32_set(&synchro, 0); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { + cores = RTE_MIN(rte_lcore_count(), MAX_LCORES); + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (cores == 1) break; cores--; @@ -442,14 +461,12 @@ launch_test(struct test_case *pt_case) if (pt_case->func(pt_case->arg) < 0) ret = -1; - cores = cores_save; - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (cores == 1) - break; - cores--; + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) ret = -1; + } + RTE_LCORE_FOREACH(lcore_id) { if (pt_case->clean != NULL) pt_case->clean(lcore_id); }