X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_spinlock.c;h=3f59372300c3087a3a4c29b3431b4b56244d1d49;hb=2c85598c5377ab9b7fdc1b9a91fe5619d15a37d3;hp=6795195aee5df667a970806d426428093b1fff7d;hpb=9119ad305db027b5c87e27b13fde9d60f9aa9a1f;p=dpdk.git diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c index 6795195aee..3f59372300 100644 --- a/app/test/test_spinlock.c +++ b/app/test/test_spinlock.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "test.h" @@ -28,7 +27,7 @@ * - There is a global spinlock and a table of spinlocks (one per lcore). * * - The test function takes all of these locks and launches the - * ``test_spinlock_per_core()`` function on each core (except the master). + * ``test_spinlock_per_core()`` function on each core (except the main). * * - The function takes the global lock, display something, then releases * the global lock. @@ -49,10 +48,10 @@ static rte_spinlock_t sl_tab[RTE_MAX_LCORE]; static rte_spinlock_recursive_t slr; static unsigned count = 0; -static rte_atomic32_t synchro; +static uint32_t synchro; static int -test_spinlock_per_core(__attribute__((unused)) void *arg) +test_spinlock_per_core(__rte_unused void *arg) { rte_spinlock_lock(&sl); printf("Global lock taken on core %u\n", rte_lcore_id()); @@ -66,7 +65,7 @@ test_spinlock_per_core(__attribute__((unused)) void *arg) } static int -test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) +test_spinlock_recursive_per_core(__rte_unused void *arg) { unsigned id = rte_lcore_id(); @@ -96,33 +95,33 @@ test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) } static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER; -static uint64_t lock_count[RTE_MAX_LCORE] = {0}; +static uint64_t time_count[RTE_MAX_LCORE] = {0}; -#define TIME_MS 100 +#define MAX_LOOP 10000 static int load_loop_fn(void *func_param) { uint64_t time_diff = 0, begin; uint64_t hz = rte_get_timer_hz(); - uint64_t lcount = 0; + volatile uint64_t lcount = 0; const int use_lock = *(int*)func_param; const unsigned lcore = rte_lcore_id(); - /* wait synchro for slaves */ - if (lcore != rte_get_master_lcore()) - while (rte_atomic32_read(&synchro) == 0); + /* wait synchro for workers */ + if (lcore != rte_get_main_lcore()) + rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); begin = rte_get_timer_cycles(); - while (time_diff < hz * TIME_MS / 1000) { + while (lcount < MAX_LOOP) { if (use_lock) rte_spinlock_lock(&lk); lcount++; if (use_lock) rte_spinlock_unlock(&lk); - time_diff = rte_get_timer_cycles() - begin; } - lock_count[lcore] = lcount; + time_diff = rte_get_timer_cycles() - begin; + time_count[lcore] = time_diff * 1000000 / hz; return 0; } @@ -136,33 +135,36 @@ test_spinlock_perf(void) printf("\nTest with no lock on single core...\n"); load_loop_fn(&lock); - printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]); - memset(lock_count, 0, sizeof(lock_count)); + printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, + time_count[lcore]); + memset(time_count, 0, sizeof(time_count)); printf("\nTest with lock on single core...\n"); lock = 1; load_loop_fn(&lock); - printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]); - memset(lock_count, 0, sizeof(lock_count)); + printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, + time_count[lcore]); + memset(time_count, 0, sizeof(time_count)); printf("\nTest with lock on %u cores...\n", rte_lcore_count()); - /* Clear synchro and start slaves */ - rte_atomic32_set(&synchro, 0); - rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER); + /* Clear synchro and start workers */ + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); + rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN); - /* start synchro and launch test on master */ - rte_atomic32_set(&synchro, 1); + /* start synchro and launch test on main */ + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(&lock); rte_eal_mp_wait_lcore(); RTE_LCORE_FOREACH(i) { - printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]); - total += lock_count[i]; + printf("Core [%u] Cost Time = %"PRIu64" us\n", i, + time_count[i]); + total += time_count[i]; } - printf("Total count = %"PRIu64"\n", total); + printf("Total Cost Time = %"PRIu64" us\n", total); return 0; } @@ -175,7 +177,7 @@ test_spinlock_perf(void) * checked as the result later. */ static int -test_spinlock_try(__attribute__((unused)) void *arg) +test_spinlock_try(__rte_unused void *arg) { if (rte_spinlock_trylock(&sl_try) == 0) { rte_spinlock_lock(&sl); @@ -197,8 +199,8 @@ test_spinlock(void) int ret = 0; int i; - /* slave cores should be waiting: print it */ - RTE_LCORE_FOREACH_SLAVE(i) { + /* worker cores should be waiting: print it */ + RTE_LCORE_FOREACH_WORKER(i) { printf("lcore %d state: %d\n", i, (int) rte_eal_get_lcore_state(i)); } @@ -211,19 +213,19 @@ test_spinlock(void) rte_spinlock_lock(&sl); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_spinlock_lock(&sl_tab[i]); rte_eal_remote_launch(test_spinlock_per_core, NULL, i); } - /* slave cores should be busy: print it */ - RTE_LCORE_FOREACH_SLAVE(i) { + /* worker cores should be busy: print it */ + RTE_LCORE_FOREACH_WORKER(i) { printf("lcore %d state: %d\n", i, (int) rte_eal_get_lcore_state(i)); } rte_spinlock_unlock(&sl); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_spinlock_unlock(&sl_tab[i]); rte_delay_ms(10); } @@ -242,7 +244,7 @@ test_spinlock(void) } else rte_spinlock_recursive_unlock(&slr); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i); } rte_spinlock_recursive_unlock(&slr); @@ -250,12 +252,12 @@ test_spinlock(void) /* * Test if it could return immediately from try-locking a locked object. - * Here it will lock the spinlock object first, then launch all the slave + * Here it will lock the spinlock object first, then launch all the worker * lcores to trylock the same spinlock object. - * All the slave lcores should give up try-locking a locked object and + * All the worker lcores should give up try-locking a locked object and * return immediately, and then increase the "count" initialized with zero * by one per times. - * We can check if the "count" is finally equal to the number of all slave + * We can check if the "count" is finally equal to the number of all worker * lcores to see if the behavior of try-locking a locked spinlock object * is correct. */ @@ -263,7 +265,7 @@ test_spinlock(void) return -1; } count = 0; - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_eal_remote_launch(test_spinlock_try, NULL, i); } rte_eal_mp_wait_lcore();