X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=app%2Ftest%2Ftest_spinlock.c;h=3f59372300c3087a3a4c29b3431b4b56244d1d49;hb=6622d9c97e2fb7b3b2867749a8f6f4ca1373ee79;hp=fb148cb86e7620638fc8308841c8dc3dc53fb673;hpb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;p=dpdk.git diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c index fb148cb86e..3f59372300 100644 --- a/app/test/test_spinlock.c +++ b/app/test/test_spinlock.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -38,20 +9,14 @@ #include #include -#include - #include #include -#include #include #include -#include #include -#include #include #include #include -#include #include "test.h" @@ -62,7 +27,7 @@ * - There is a global spinlock and a table of spinlocks (one per lcore). * * - The test function takes all of these locks and launches the - * ``test_spinlock_per_core()`` function on each core (except the master). + * ``test_spinlock_per_core()`` function on each core (except the main). * * - The function takes the global lock, display something, then releases * the global lock. @@ -83,10 +48,10 @@ static rte_spinlock_t sl_tab[RTE_MAX_LCORE]; static rte_spinlock_recursive_t slr; static unsigned count = 0; -static rte_atomic32_t synchro; +static uint32_t synchro; static int -test_spinlock_per_core(__attribute__((unused)) void *arg) +test_spinlock_per_core(__rte_unused void *arg) { rte_spinlock_lock(&sl); printf("Global lock taken on core %u\n", rte_lcore_id()); @@ -100,7 +65,7 @@ test_spinlock_per_core(__attribute__((unused)) void *arg) } static int -test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) +test_spinlock_recursive_per_core(__rte_unused void *arg) { unsigned id = rte_lcore_id(); @@ -130,35 +95,33 @@ test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) } static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER; -static uint64_t lock_count[RTE_MAX_LCORE] = {0}; +static uint64_t time_count[RTE_MAX_LCORE] = {0}; -#define TIME_S 5 +#define MAX_LOOP 10000 static int load_loop_fn(void *func_param) { uint64_t time_diff = 0, begin; uint64_t hz = rte_get_timer_hz(); - uint64_t lcount = 0; + volatile uint64_t lcount = 0; const int use_lock = *(int*)func_param; const unsigned lcore = rte_lcore_id(); - /* wait synchro for slaves */ - if (lcore != rte_get_master_lcore()) - while (rte_atomic32_read(&synchro) == 0); + /* wait synchro for workers */ + if (lcore != rte_get_main_lcore()) + rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); begin = rte_get_timer_cycles(); - while (time_diff / hz < TIME_S) { + while (lcount < MAX_LOOP) { if (use_lock) rte_spinlock_lock(&lk); lcount++; if (use_lock) rte_spinlock_unlock(&lk); - /* delay to make lock duty cycle slighlty realistic */ - rte_delay_us(1); - time_diff = rte_get_timer_cycles() - begin; } - lock_count[lcore] = lcount; + time_diff = rte_get_timer_cycles() - begin; + time_count[lcore] = time_diff * 1000000 / hz; return 0; } @@ -172,46 +135,49 @@ test_spinlock_perf(void) printf("\nTest with no lock on single core...\n"); load_loop_fn(&lock); - printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]); - memset(lock_count, 0, sizeof(lock_count)); + printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, + time_count[lcore]); + memset(time_count, 0, sizeof(time_count)); printf("\nTest with lock on single core...\n"); lock = 1; load_loop_fn(&lock); - printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]); - memset(lock_count, 0, sizeof(lock_count)); + printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore, + time_count[lcore]); + memset(time_count, 0, sizeof(time_count)); printf("\nTest with lock on %u cores...\n", rte_lcore_count()); - /* Clear synchro and start slaves */ - rte_atomic32_set(&synchro, 0); - rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER); + /* Clear synchro and start workers */ + __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED); + rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN); - /* start synchro and launch test on master */ - rte_atomic32_set(&synchro, 1); + /* start synchro and launch test on main */ + __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED); load_loop_fn(&lock); rte_eal_mp_wait_lcore(); RTE_LCORE_FOREACH(i) { - printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]); - total += lock_count[i]; + printf("Core [%u] Cost Time = %"PRIu64" us\n", i, + time_count[i]); + total += time_count[i]; } - printf("Total count = %"PRIu64"\n", total); + printf("Total Cost Time = %"PRIu64" us\n", total); return 0; } /* * Use rte_spinlock_trylock() to trylock a spinlock object, - * If it could not lock the object sucessfully, it would + * If it could not lock the object successfully, it would * return immediately and the variable of "count" would be * increased by one per times. the value of "count" could be * checked as the result later. */ static int -test_spinlock_try(__attribute__((unused)) void *arg) +test_spinlock_try(__rte_unused void *arg) { if (rte_spinlock_trylock(&sl_try) == 0) { rte_spinlock_lock(&sl); @@ -227,14 +193,14 @@ test_spinlock_try(__attribute__((unused)) void *arg) * Test rte_eal_get_lcore_state() in addition to spinlocks * as we have "waiting" then "running" lcores. */ -int +static int test_spinlock(void) { int ret = 0; int i; - /* slave cores should be waiting: print it */ - RTE_LCORE_FOREACH_SLAVE(i) { + /* worker cores should be waiting: print it */ + RTE_LCORE_FOREACH_WORKER(i) { printf("lcore %d state: %d\n", i, (int) rte_eal_get_lcore_state(i)); } @@ -247,21 +213,21 @@ test_spinlock(void) rte_spinlock_lock(&sl); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_spinlock_lock(&sl_tab[i]); rte_eal_remote_launch(test_spinlock_per_core, NULL, i); } - /* slave cores should be busy: print it */ - RTE_LCORE_FOREACH_SLAVE(i) { + /* worker cores should be busy: print it */ + RTE_LCORE_FOREACH_WORKER(i) { printf("lcore %d state: %d\n", i, (int) rte_eal_get_lcore_state(i)); } rte_spinlock_unlock(&sl); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_spinlock_unlock(&sl_tab[i]); - rte_delay_ms(100); + rte_delay_ms(10); } rte_eal_mp_wait_lcore(); @@ -278,7 +244,7 @@ test_spinlock(void) } else rte_spinlock_recursive_unlock(&slr); - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i); } rte_spinlock_recursive_unlock(&slr); @@ -286,12 +252,12 @@ test_spinlock(void) /* * Test if it could return immediately from try-locking a locked object. - * Here it will lock the spinlock object first, then launch all the slave + * Here it will lock the spinlock object first, then launch all the worker * lcores to trylock the same spinlock object. - * All the slave lcores should give up try-locking a locked object and + * All the worker lcores should give up try-locking a locked object and * return immediately, and then increase the "count" initialized with zero * by one per times. - * We can check if the "count" is finally equal to the number of all slave + * We can check if the "count" is finally equal to the number of all worker * lcores to see if the behavior of try-locking a locked spinlock object * is correct. */ @@ -299,7 +265,7 @@ test_spinlock(void) return -1; } count = 0; - RTE_LCORE_FOREACH_SLAVE(i) { + RTE_LCORE_FOREACH_WORKER(i) { rte_eal_remote_launch(test_spinlock_try, NULL, i); } rte_eal_mp_wait_lcore(); @@ -335,3 +301,5 @@ test_spinlock(void) return ret; } + +REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock);