X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=app%2Ftest%2Ftest_spinlock.c;h=3f59372300c3087a3a4c29b3431b4b56244d1d49;hb=6eccb0c9eea51089ce14bff92697245eb342963b;hp=78d8a0fb267d713625d69b6380a6aebcd95280b8;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c index 78d8a0fb26..3f59372300 100644 --- a/app/test/test_spinlock.c +++ b/app/test/test_spinlock.c @@ -1,54 +1,19 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include #include #include +#include #include #include -#include - #include #include -#include #include #include -#include #include -#include #include #include #include @@ -62,7 +27,7 @@ * - There is a global spinlock and a table of spinlocks (one per lcore). * * - The test function takes all of these locks and launches the - * ``test_spinlock_per_core()`` function on each core (except the master). + * ``test_spinlock_per_core()`` function on each core (except the main). * * - The function takes the global lock, display something, then releases * the global lock. @@ -81,10 +46,12 @@ static rte_spinlock_t sl, sl_try; static rte_spinlock_t sl_tab[RTE_MAX_LCORE]; static rte_spinlock_recursive_t slr; -static unsigned count; +static unsigned count = 0; + +static uint32_t synchro; static int -test_spinlock_per_core(__attribute__((unused)) void *arg) +test_spinlock_per_core(__rte_unused void *arg) { rte_spinlock_lock(&sl); printf("Global lock taken on core %u\n", rte_lcore_id()); @@ -98,7 +65,7 @@ test_spinlock_per_core(__attribute__((unused)) void *arg) } static int -test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) +test_spinlock_recursive_per_core(__rte_unused void *arg) { unsigned id = rte_lcore_id(); @@ -127,72 +94,90 @@ test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) return 0; } -static volatile int count1, count2; static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER; -static unsigned int max = 10000000; /* 10M */ -static volatile uint64_t looptime[RTE_MAX_LCORE]; +static uint64_t time_count[RTE_MAX_LCORE] = {0}; + +#define MAX_LOOP 10000 static int -load_loop_fn(__attribute__((unused)) void *dummy) +load_loop_fn(void *func_param) { - uint64_t end, begin; - begin = rte_get_hpet_cycles(); - unsigned int i = 0; - for ( i = 0; i < max; i++) { - rte_spinlock_lock(&lk); - count1++; - rte_spinlock_unlock(&lk); - count2++; + uint64_t time_diff = 0, begin; + uint64_t hz = rte_get_timer_hz(); + volatile uint64_t lcount = 0; + const int use_lock = *(int*)func_param; + const unsigned lcore = rte_lcore_id(); + + /* wait synchro for workers */ + if (lcore != rte_get_main_lcore()) + rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); + + begin = rte_get_timer_cycles(); + while (lcount < MAX_LOOP) { + if (use_lock) + rte_spinlock_lock(&lk); + lcount++; + if (use_lock) + rte_spinlock_unlock(&lk); } - end = rte_get_hpet_cycles(); - looptime[rte_lcore_id()] = end - begin; + time_diff = rte_get_timer_cycles() - begin; + time_count[lcore] = time_diff * 1000000 / hz; return 0; } static int -test_spinlock_load(void) +test_spinlock_perf(void) { - if (rte_lcore_count()<= 1) { - printf("no cores counted\n"); - return -1; - } - printf ("Running %u tests.......\n", max); - printf ("Number of cores = %u\n", rte_lcore_count()); + unsigned int i; + uint64_t total = 0; + int lock = 0; + const unsigned lcore = rte_lcore_id(); + + printf("\nTest with no lock on single core...\n"); + load_loop_fn(&lock); + printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, + time_count[lcore]); + memset(time_count, 0, sizeof(time_count)); + + printf("\nTest with lock on single core...\n"); + lock = 1; + load_loop_fn(&lock); + printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, + time_count[lcore]); + memset(time_count, 0, sizeof(time_count)); + + printf("\nTest with lock on %u cores...\n", rte_lcore_count()); + + /* Clear synchro and start workers */ + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); + rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN); + + /* start synchro and launch test on main */ + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); + load_loop_fn(&lock); - rte_eal_mp_remote_launch(load_loop_fn, NULL , CALL_MASTER); rte_eal_mp_wait_lcore(); - unsigned int k = 0; - uint64_t avgtime = 0; - - RTE_LCORE_FOREACH(k) { - printf("Core [%u] time = %"PRIu64"\n", k, looptime[k]); - avgtime += looptime[k]; + RTE_LCORE_FOREACH(i) { + printf("Core [%u] Cost Time = %"PRIu64" us\n", i, + time_count[i]); + total += time_count[i]; } - avgtime = avgtime / rte_lcore_count(); - printf("Average time = %"PRIu64"\n", avgtime); + printf("Total Cost Time = %"PRIu64" us\n", total); - int check = 0; - check = max * rte_lcore_count(); - if (count1 == check && count2 != check) - printf("Passed Load test\n"); - else { - printf("Failed load test\n"); - return -1; - } return 0; } /* * Use rte_spinlock_trylock() to trylock a spinlock object, - * If it could not lock the object sucessfully, it would + * If it could not lock the object successfully, it would * return immediately and the variable of "count" would be * increased by one per times. the value of "count" could be * checked as the result later. */ static int -test_spinlock_try(__attribute__((unused)) void *arg) +test_spinlock_try(__rte_unused void *arg) { if (rte_spinlock_trylock(&sl_try) == 0) { rte_spinlock_lock(&sl); @@ -208,14 +193,14 @@ test_spinlock_try(__attribute__((unused)) void *arg) * Test rte_eal_get_lcore_state() in addition to spinlocks * as we have "waiting" then "running" lcores. */ -int +static int test_spinlock(void) { int ret = 0; int i; - /* slave cores should be waiting: print it */ - RTE_LCORE_FOREACH_SLAVE(i) { + /* worker cores should be waiting: print it */ + RTE_LCORE_FOREACH_WORKER(i) { printf("lcore %d state: %d\n", i, (int) rte_eal_get_lcore_state(i)); } @@ -228,28 +213,25 @@ test_spinlock(void) rte_spinlock_lock(&sl); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_spinlock_lock(&sl_tab[i]); rte_eal_remote_launch(test_spinlock_per_core, NULL, i); } - /* slave cores should be busy: print it */ - RTE_LCORE_FOREACH_SLAVE(i) { + /* worker cores should be busy: print it */ + RTE_LCORE_FOREACH_WORKER(i) { printf("lcore %d state: %d\n", i, (int) rte_eal_get_lcore_state(i)); } rte_spinlock_unlock(&sl); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_spinlock_unlock(&sl_tab[i]); - rte_delay_ms(100); + rte_delay_ms(10); } rte_eal_mp_wait_lcore(); - if (test_spinlock_load()<0) - return -1; - rte_spinlock_recursive_lock(&slr); /* @@ -262,7 +244,7 @@ test_spinlock(void) } else rte_spinlock_recursive_unlock(&slr); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i); } rte_spinlock_recursive_unlock(&slr); @@ -270,12 +252,12 @@ test_spinlock(void) /* * Test if it could return immediately from try-locking a locked object. - * Here it will lock the spinlock object first, then launch all the slave + * Here it will lock the spinlock object first, then launch all the worker * lcores to trylock the same spinlock object. - * All the slave lcores should give up try-locking a locked object and + * All the worker lcores should give up try-locking a locked object and * return immediately, and then increase the "count" initialized with zero * by one per times. - * We can check if the "count" is finally equal to the number of all slave + * We can check if the "count" is finally equal to the number of all worker * lcores to see if the behavior of try-locking a locked spinlock object * is correct. */ @@ -283,7 +265,7 @@ test_spinlock(void) return -1; } count = 0; - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_eal_remote_launch(test_spinlock_try, NULL, i); } rte_eal_mp_wait_lcore(); @@ -314,5 +296,10 @@ test_spinlock(void) rte_spinlock_recursive_unlock(&slr); rte_spinlock_recursive_unlock(&slr); + if (test_spinlock_perf() < 0) + return -1; + return ret; } + +REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock);