1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Arm Limited
12 #include <rte_common.h>
13 #include <rte_cycles.h>
15 #include <rte_launch.h>
16 #include <rte_lcore.h>
17 #include <rte_memory.h>
18 #include <rte_per_lcore.h>
19 #include <rte_ticketlock.h>
27 * - There is a global ticketlock and a table of ticketlocks (one per lcore).
29 * - The test function takes all of these locks and launches the
30 * ``test_ticketlock_per_core()`` function on each core (except the main).
32 * - The function takes the global lock, display something, then releases
34 * - The function takes the per-lcore lock, display something, then releases
37 * - The main function unlocks the per-lcore locks sequentially and
38 * waits between each lock. This triggers the display of a message
39 * for each core, in the correct order. The autotest script checks that
40 * this order is correct.
42 * - A load test is carried out, with all cores attempting to lock a single lock
46 static rte_ticketlock_t tl, tl_try;
47 static rte_ticketlock_t tl_tab[RTE_MAX_LCORE];
48 static rte_ticketlock_recursive_t tlr;
49 static unsigned int count;
51 static uint32_t synchro;
54 test_ticketlock_per_core(__rte_unused void *arg)
56 rte_ticketlock_lock(&tl);
57 printf("Global lock taken on core %u\n", rte_lcore_id());
58 rte_ticketlock_unlock(&tl);
60 rte_ticketlock_lock(&tl_tab[rte_lcore_id()]);
61 printf("Hello from core %u !\n", rte_lcore_id());
62 rte_ticketlock_unlock(&tl_tab[rte_lcore_id()]);
68 test_ticketlock_recursive_per_core(__rte_unused void *arg)
70 unsigned int id = rte_lcore_id();
72 rte_ticketlock_recursive_lock(&tlr);
73 printf("Global recursive lock taken on core %u - count = %d\n",
75 rte_ticketlock_recursive_lock(&tlr);
76 printf("Global recursive lock taken on core %u - count = %d\n",
78 rte_ticketlock_recursive_lock(&tlr);
79 printf("Global recursive lock taken on core %u - count = %d\n",
82 printf("Hello from within recursive locks from core %u !\n", id);
84 rte_ticketlock_recursive_unlock(&tlr);
85 printf("Global recursive lock released on core %u - count = %d\n",
87 rte_ticketlock_recursive_unlock(&tlr);
88 printf("Global recursive lock released on core %u - count = %d\n",
90 rte_ticketlock_recursive_unlock(&tlr);
91 printf("Global recursive lock released on core %u - count = %d\n",
97 static rte_ticketlock_t lk = RTE_TICKETLOCK_INITIALIZER;
98 static uint64_t lcount __rte_cache_aligned;
99 static uint64_t lcore_count[RTE_MAX_LCORE] __rte_cache_aligned;
100 static uint64_t time_cost[RTE_MAX_LCORE];
102 #define MAX_LOOP 10000
105 load_loop_fn(void *func_param)
107 uint64_t time_diff = 0, begin;
108 uint64_t hz = rte_get_timer_hz();
109 const int use_lock = *(int *)func_param;
110 const unsigned int lcore = rte_lcore_id();
112 /* wait synchro for workers */
113 if (lcore != rte_get_main_lcore())
114 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
116 begin = rte_rdtsc_precise();
117 while (lcore_count[lcore] < MAX_LOOP) {
119 rte_ticketlock_lock(&lk);
120 lcore_count[lcore]++;
123 rte_ticketlock_unlock(&lk);
125 time_diff = rte_rdtsc_precise() - begin;
126 time_cost[lcore] = time_diff * 1000000 / hz;
131 test_ticketlock_perf(void)
135 uint64_t total_time = 0;
137 const unsigned int lcore = rte_lcore_id();
139 printf("\nTest with no lock on single core...\n");
141 printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
142 memset(lcore_count, 0, sizeof(lcore_count));
143 memset(time_cost, 0, sizeof(time_cost));
145 printf("\nTest with lock on single core...\n");
148 printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
149 memset(lcore_count, 0, sizeof(lcore_count));
150 memset(time_cost, 0, sizeof(time_cost));
153 printf("\nTest with lock on %u cores...\n", rte_lcore_count());
155 /* Clear synchro and start workers */
156 __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
157 rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
159 /* start synchro and launch test on main */
160 __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
163 rte_eal_mp_wait_lcore();
165 RTE_LCORE_FOREACH(i) {
166 printf("Core [%u] cost time = %"PRIu64" us\n", i, time_cost[i]);
167 tcount += lcore_count[i];
168 total_time += time_cost[i];
171 if (tcount != lcount)
174 printf("Total cost time = %"PRIu64" us\n", total_time);
180 * Use rte_ticketlock_trylock() to trylock a ticketlock object,
181 * If it could not lock the object successfully, it would
182 * return immediately and the variable of "count" would be
183 * increased by one per times. the value of "count" could be
184 * checked as the result later.
187 test_ticketlock_try(__rte_unused void *arg)
189 if (rte_ticketlock_trylock(&tl_try) == 0) {
190 rte_ticketlock_lock(&tl);
192 rte_ticketlock_unlock(&tl);
200 * Test rte_eal_get_lcore_state() in addition to ticketlocks
201 * as we have "waiting" then "running" lcores.
204 test_ticketlock(void)
209 /* worker cores should be waiting: print it */
210 RTE_LCORE_FOREACH_WORKER(i) {
211 printf("lcore %d state: %d\n", i,
212 (int) rte_eal_get_lcore_state(i));
215 rte_ticketlock_init(&tl);
216 rte_ticketlock_init(&tl_try);
217 rte_ticketlock_recursive_init(&tlr);
218 RTE_LCORE_FOREACH_WORKER(i) {
219 rte_ticketlock_init(&tl_tab[i]);
222 rte_ticketlock_lock(&tl);
224 RTE_LCORE_FOREACH_WORKER(i) {
225 rte_ticketlock_lock(&tl_tab[i]);
226 rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
229 /* worker cores should be busy: print it */
230 RTE_LCORE_FOREACH_WORKER(i) {
231 printf("lcore %d state: %d\n", i,
232 (int) rte_eal_get_lcore_state(i));
234 rte_ticketlock_unlock(&tl);
236 RTE_LCORE_FOREACH_WORKER(i) {
237 rte_ticketlock_unlock(&tl_tab[i]);
241 rte_eal_mp_wait_lcore();
243 rte_ticketlock_recursive_lock(&tlr);
246 * Try to acquire a lock that we already own
248 if (!rte_ticketlock_recursive_trylock(&tlr)) {
249 printf("rte_ticketlock_recursive_trylock failed on a lock that "
253 rte_ticketlock_recursive_unlock(&tlr);
255 RTE_LCORE_FOREACH_WORKER(i) {
256 rte_eal_remote_launch(test_ticketlock_recursive_per_core,
259 rte_ticketlock_recursive_unlock(&tlr);
260 rte_eal_mp_wait_lcore();
263 * Test if it could return immediately from try-locking a locked object.
264 * Here it will lock the ticketlock object first, then launch all the
265 * worker lcores to trylock the same ticketlock object.
266 * All the worker lcores should give up try-locking a locked object and
267 * return immediately, and then increase the "count" initialized with
268 * zero by one per times.
269 * We can check if the "count" is finally equal to the number of all
270 * worker lcores to see if the behavior of try-locking a locked
271 * ticketlock object is correct.
273 if (rte_ticketlock_trylock(&tl_try) == 0)
277 RTE_LCORE_FOREACH_WORKER(i) {
278 rte_eal_remote_launch(test_ticketlock_try, NULL, i);
280 rte_eal_mp_wait_lcore();
281 rte_ticketlock_unlock(&tl_try);
282 if (rte_ticketlock_is_locked(&tl)) {
283 printf("ticketlock is locked but it should not be\n");
286 rte_ticketlock_lock(&tl);
287 if (count != (rte_lcore_count() - 1))
290 rte_ticketlock_unlock(&tl);
293 * Test if it can trylock recursively.
294 * Use rte_ticketlock_recursive_trylock() to check if it can lock
295 * a ticketlock object recursively. Here it will try to lock a
296 * ticketlock object twice.
298 if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
299 printf("It failed to do the first ticketlock_recursive_trylock "
300 "but it should able to do\n");
303 if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
304 printf("It failed to do the second ticketlock_recursive_trylock "
305 "but it should able to do\n");
308 rte_ticketlock_recursive_unlock(&tlr);
309 rte_ticketlock_recursive_unlock(&tlr);
311 if (test_ticketlock_perf() < 0)
317 REGISTER_TEST_COMMAND(ticketlock_autotest, test_ticketlock);