1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_common.h>
13 #include <rte_memory.h>
14 #include <rte_per_lcore.h>
15 #include <rte_launch.h>
17 #include <rte_lcore.h>
18 #include <rte_cycles.h>
19 #include <rte_spinlock.h>
27 * - There is a global spinlock and a table of spinlocks (one per lcore).
29 * - The test function takes all of these locks and launches the
30 * ``test_spinlock_per_core()`` function on each core (except the main).
32 * - The function takes the global lock, display something, then releases
34 * - The function takes the per-lcore lock, display something, then releases
37 * - The main function unlocks the per-lcore locks sequentially and
38 * waits between each lock. This triggers the display of a message
39 * for each core, in the correct order. The autotest script checks that
40 * this order is correct.
42 * - A load test is carried out, with all cores attempting to lock a single lock
46 static rte_spinlock_t sl, sl_try;
47 static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
48 static rte_spinlock_recursive_t slr;
49 static unsigned count = 0;
51 static uint32_t synchro;
54 test_spinlock_per_core(__rte_unused void *arg)
56 rte_spinlock_lock(&sl);
57 printf("Global lock taken on core %u\n", rte_lcore_id());
58 rte_spinlock_unlock(&sl);
60 rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
61 printf("Hello from core %u !\n", rte_lcore_id());
62 rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
68 test_spinlock_recursive_per_core(__rte_unused void *arg)
70 unsigned id = rte_lcore_id();
72 rte_spinlock_recursive_lock(&slr);
73 printf("Global recursive lock taken on core %u - count = %d\n",
75 rte_spinlock_recursive_lock(&slr);
76 printf("Global recursive lock taken on core %u - count = %d\n",
78 rte_spinlock_recursive_lock(&slr);
79 printf("Global recursive lock taken on core %u - count = %d\n",
82 printf("Hello from within recursive locks from core %u !\n", id);
84 rte_spinlock_recursive_unlock(&slr);
85 printf("Global recursive lock released on core %u - count = %d\n",
87 rte_spinlock_recursive_unlock(&slr);
88 printf("Global recursive lock released on core %u - count = %d\n",
90 rte_spinlock_recursive_unlock(&slr);
91 printf("Global recursive lock released on core %u - count = %d\n",
97 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
98 static uint64_t time_count[RTE_MAX_LCORE] = {0};
100 #define MAX_LOOP 10000
103 load_loop_fn(void *func_param)
105 uint64_t time_diff = 0, begin;
106 uint64_t hz = rte_get_timer_hz();
107 volatile uint64_t lcount = 0;
108 const int use_lock = *(int*)func_param;
109 const unsigned lcore = rte_lcore_id();
111 /* wait synchro for workers */
112 if (lcore != rte_get_main_lcore())
113 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
115 begin = rte_get_timer_cycles();
116 while (lcount < MAX_LOOP) {
118 rte_spinlock_lock(&lk);
121 rte_spinlock_unlock(&lk);
123 time_diff = rte_get_timer_cycles() - begin;
124 time_count[lcore] = time_diff * 1000000 / hz;
129 test_spinlock_perf(void)
134 const unsigned lcore = rte_lcore_id();
136 printf("\nTest with no lock on single core...\n");
138 printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
140 memset(time_count, 0, sizeof(time_count));
142 printf("\nTest with lock on single core...\n");
145 printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
147 memset(time_count, 0, sizeof(time_count));
149 printf("\nTest with lock on %u cores...\n", rte_lcore_count());
151 /* Clear synchro and start workers */
152 __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
153 rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
155 /* start synchro and launch test on main */
156 __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
159 rte_eal_mp_wait_lcore();
161 RTE_LCORE_FOREACH(i) {
162 printf("Core [%u] Cost Time = %"PRIu64" us\n", i,
164 total += time_count[i];
167 printf("Total Cost Time = %"PRIu64" us\n", total);
173 * Use rte_spinlock_trylock() to trylock a spinlock object,
174 * If it could not lock the object successfully, it would
175 * return immediately and the variable of "count" would be
176 * increased by one per times. the value of "count" could be
177 * checked as the result later.
180 test_spinlock_try(__rte_unused void *arg)
182 if (rte_spinlock_trylock(&sl_try) == 0) {
183 rte_spinlock_lock(&sl);
185 rte_spinlock_unlock(&sl);
193 * Test rte_eal_get_lcore_state() in addition to spinlocks
194 * as we have "waiting" then "running" lcores.
202 /* worker cores should be waiting: print it */
203 RTE_LCORE_FOREACH_WORKER(i) {
204 printf("lcore %d state: %d\n", i,
205 (int) rte_eal_get_lcore_state(i));
208 rte_spinlock_init(&sl);
209 rte_spinlock_init(&sl_try);
210 rte_spinlock_recursive_init(&slr);
211 for (i=0; i<RTE_MAX_LCORE; i++)
212 rte_spinlock_init(&sl_tab[i]);
214 rte_spinlock_lock(&sl);
216 RTE_LCORE_FOREACH_WORKER(i) {
217 rte_spinlock_lock(&sl_tab[i]);
218 rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
221 /* worker cores should be busy: print it */
222 RTE_LCORE_FOREACH_WORKER(i) {
223 printf("lcore %d state: %d\n", i,
224 (int) rte_eal_get_lcore_state(i));
226 rte_spinlock_unlock(&sl);
228 RTE_LCORE_FOREACH_WORKER(i) {
229 rte_spinlock_unlock(&sl_tab[i]);
233 rte_eal_mp_wait_lcore();
235 rte_spinlock_recursive_lock(&slr);
238 * Try to acquire a lock that we already own
240 if(!rte_spinlock_recursive_trylock(&slr)) {
241 printf("rte_spinlock_recursive_trylock failed on a lock that "
245 rte_spinlock_recursive_unlock(&slr);
247 RTE_LCORE_FOREACH_WORKER(i) {
248 rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
250 rte_spinlock_recursive_unlock(&slr);
251 rte_eal_mp_wait_lcore();
254 * Test if it could return immediately from try-locking a locked object.
255 * Here it will lock the spinlock object first, then launch all the worker
256 * lcores to trylock the same spinlock object.
257 * All the worker lcores should give up try-locking a locked object and
258 * return immediately, and then increase the "count" initialized with zero
260 * We can check if the "count" is finally equal to the number of all worker
261 * lcores to see if the behavior of try-locking a locked spinlock object
264 if (rte_spinlock_trylock(&sl_try) == 0) {
268 RTE_LCORE_FOREACH_WORKER(i) {
269 rte_eal_remote_launch(test_spinlock_try, NULL, i);
271 rte_eal_mp_wait_lcore();
272 rte_spinlock_unlock(&sl_try);
273 if (rte_spinlock_is_locked(&sl)) {
274 printf("spinlock is locked but it should not be\n");
277 rte_spinlock_lock(&sl);
278 if (count != ( rte_lcore_count() - 1)) {
281 rte_spinlock_unlock(&sl);
284 * Test if it can trylock recursively.
285 * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
286 * object recursively. Here it will try to lock a spinlock object twice.
288 if (rte_spinlock_recursive_trylock(&slr) == 0) {
289 printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
292 if (rte_spinlock_recursive_trylock(&slr) == 0) {
293 printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
296 rte_spinlock_recursive_unlock(&slr);
297 rte_spinlock_recursive_unlock(&slr);
299 if (test_spinlock_perf() < 0)
305 REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock);