test/spinlock: remove delay for correct benchmarking
[dpdk.git] / app / test / test_spinlock.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <stdint.h>
7 #include <inttypes.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <sys/queue.h>
11
12 #include <rte_common.h>
13 #include <rte_memory.h>
14 #include <rte_per_lcore.h>
15 #include <rte_launch.h>
16 #include <rte_eal.h>
17 #include <rte_lcore.h>
18 #include <rte_cycles.h>
19 #include <rte_spinlock.h>
20 #include <rte_atomic.h>
21
22 #include "test.h"
23
24 /*
25  * Spinlock test
26  * =============
27  *
28  * - There is a global spinlock and a table of spinlocks (one per lcore).
29  *
30  * - The test function takes all of these locks and launches the
31  *   ``test_spinlock_per_core()`` function on each core (except the master).
32  *
33  *   - The function takes the global lock, display something, then releases
34  *     the global lock.
35  *   - The function takes the per-lcore lock, display something, then releases
36  *     the per-core lock.
37  *
38  * - The main function unlocks the per-lcore locks sequentially and
39  *   waits between each lock. This triggers the display of a message
40  *   for each core, in the correct order. The autotest script checks that
41  *   this order is correct.
42  *
43  * - A load test is carried out, with all cores attempting to lock a single lock
44  *   multiple times
45  */
46
47 static rte_spinlock_t sl, sl_try;
48 static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
49 static rte_spinlock_recursive_t slr;
50 static unsigned count = 0;
51
52 static rte_atomic32_t synchro;
53
54 static int
55 test_spinlock_per_core(__attribute__((unused)) void *arg)
56 {
57         rte_spinlock_lock(&sl);
58         printf("Global lock taken on core %u\n", rte_lcore_id());
59         rte_spinlock_unlock(&sl);
60
61         rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
62         printf("Hello from core %u !\n", rte_lcore_id());
63         rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
64
65         return 0;
66 }
67
68 static int
69 test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
70 {
71         unsigned id = rte_lcore_id();
72
73         rte_spinlock_recursive_lock(&slr);
74         printf("Global recursive lock taken on core %u - count = %d\n",
75                id, slr.count);
76         rte_spinlock_recursive_lock(&slr);
77         printf("Global recursive lock taken on core %u - count = %d\n",
78                id, slr.count);
79         rte_spinlock_recursive_lock(&slr);
80         printf("Global recursive lock taken on core %u - count = %d\n",
81                id, slr.count);
82
83         printf("Hello from within recursive locks from core %u !\n", id);
84
85         rte_spinlock_recursive_unlock(&slr);
86         printf("Global recursive lock released on core %u - count = %d\n",
87                id, slr.count);
88         rte_spinlock_recursive_unlock(&slr);
89         printf("Global recursive lock released on core %u - count = %d\n",
90                id, slr.count);
91         rte_spinlock_recursive_unlock(&slr);
92         printf("Global recursive lock released on core %u - count = %d\n",
93                id, slr.count);
94
95         return 0;
96 }
97
98 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
99 static uint64_t lock_count[RTE_MAX_LCORE] = {0};
100
101 #define TIME_MS 100
102
103 static int
104 load_loop_fn(void *func_param)
105 {
106         uint64_t time_diff = 0, begin;
107         uint64_t hz = rte_get_timer_hz();
108         uint64_t lcount = 0;
109         const int use_lock = *(int*)func_param;
110         const unsigned lcore = rte_lcore_id();
111
112         /* wait synchro for slaves */
113         if (lcore != rte_get_master_lcore())
114                 while (rte_atomic32_read(&synchro) == 0);
115
116         begin = rte_get_timer_cycles();
117         while (time_diff < hz * TIME_MS / 1000) {
118                 if (use_lock)
119                         rte_spinlock_lock(&lk);
120                 lcount++;
121                 if (use_lock)
122                         rte_spinlock_unlock(&lk);
123                 time_diff = rte_get_timer_cycles() - begin;
124         }
125         lock_count[lcore] = lcount;
126         return 0;
127 }
128
129 static int
130 test_spinlock_perf(void)
131 {
132         unsigned int i;
133         uint64_t total = 0;
134         int lock = 0;
135         const unsigned lcore = rte_lcore_id();
136
137         printf("\nTest with no lock on single core...\n");
138         load_loop_fn(&lock);
139         printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
140         memset(lock_count, 0, sizeof(lock_count));
141
142         printf("\nTest with lock on single core...\n");
143         lock = 1;
144         load_loop_fn(&lock);
145         printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
146         memset(lock_count, 0, sizeof(lock_count));
147
148         printf("\nTest with lock on %u cores...\n", rte_lcore_count());
149
150         /* Clear synchro and start slaves */
151         rte_atomic32_set(&synchro, 0);
152         rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
153
154         /* start synchro and launch test on master */
155         rte_atomic32_set(&synchro, 1);
156         load_loop_fn(&lock);
157
158         rte_eal_mp_wait_lcore();
159
160         RTE_LCORE_FOREACH(i) {
161                 printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
162                 total += lock_count[i];
163         }
164
165         printf("Total count = %"PRIu64"\n", total);
166
167         return 0;
168 }
169
170 /*
171  * Use rte_spinlock_trylock() to trylock a spinlock object,
172  * If it could not lock the object successfully, it would
173  * return immediately and the variable of "count" would be
174  * increased by one per times. the value of "count" could be
175  * checked as the result later.
176  */
177 static int
178 test_spinlock_try(__attribute__((unused)) void *arg)
179 {
180         if (rte_spinlock_trylock(&sl_try) == 0) {
181                 rte_spinlock_lock(&sl);
182                 count ++;
183                 rte_spinlock_unlock(&sl);
184         }
185
186         return 0;
187 }
188
189
190 /*
191  * Test rte_eal_get_lcore_state() in addition to spinlocks
192  * as we have "waiting" then "running" lcores.
193  */
194 static int
195 test_spinlock(void)
196 {
197         int ret = 0;
198         int i;
199
200         /* slave cores should be waiting: print it */
201         RTE_LCORE_FOREACH_SLAVE(i) {
202                 printf("lcore %d state: %d\n", i,
203                        (int) rte_eal_get_lcore_state(i));
204         }
205
206         rte_spinlock_init(&sl);
207         rte_spinlock_init(&sl_try);
208         rte_spinlock_recursive_init(&slr);
209         for (i=0; i<RTE_MAX_LCORE; i++)
210                 rte_spinlock_init(&sl_tab[i]);
211
212         rte_spinlock_lock(&sl);
213
214         RTE_LCORE_FOREACH_SLAVE(i) {
215                 rte_spinlock_lock(&sl_tab[i]);
216                 rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
217         }
218
219         /* slave cores should be busy: print it */
220         RTE_LCORE_FOREACH_SLAVE(i) {
221                 printf("lcore %d state: %d\n", i,
222                        (int) rte_eal_get_lcore_state(i));
223         }
224         rte_spinlock_unlock(&sl);
225
226         RTE_LCORE_FOREACH_SLAVE(i) {
227                 rte_spinlock_unlock(&sl_tab[i]);
228                 rte_delay_ms(10);
229         }
230
231         rte_eal_mp_wait_lcore();
232
233         rte_spinlock_recursive_lock(&slr);
234
235         /*
236          * Try to acquire a lock that we already own
237          */
238         if(!rte_spinlock_recursive_trylock(&slr)) {
239                 printf("rte_spinlock_recursive_trylock failed on a lock that "
240                        "we already own\n");
241                 ret = -1;
242         } else
243                 rte_spinlock_recursive_unlock(&slr);
244
245         RTE_LCORE_FOREACH_SLAVE(i) {
246                 rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
247         }
248         rte_spinlock_recursive_unlock(&slr);
249         rte_eal_mp_wait_lcore();
250
251         /*
252          * Test if it could return immediately from try-locking a locked object.
253          * Here it will lock the spinlock object first, then launch all the slave
254          * lcores to trylock the same spinlock object.
255          * All the slave lcores should give up try-locking a locked object and
256          * return immediately, and then increase the "count" initialized with zero
257          * by one per times.
258          * We can check if the "count" is finally equal to the number of all slave
259          * lcores to see if the behavior of try-locking a locked spinlock object
260          * is correct.
261          */
262         if (rte_spinlock_trylock(&sl_try) == 0) {
263                 return -1;
264         }
265         count = 0;
266         RTE_LCORE_FOREACH_SLAVE(i) {
267                 rte_eal_remote_launch(test_spinlock_try, NULL, i);
268         }
269         rte_eal_mp_wait_lcore();
270         rte_spinlock_unlock(&sl_try);
271         if (rte_spinlock_is_locked(&sl)) {
272                 printf("spinlock is locked but it should not be\n");
273                 return -1;
274         }
275         rte_spinlock_lock(&sl);
276         if (count != ( rte_lcore_count() - 1)) {
277                 ret = -1;
278         }
279         rte_spinlock_unlock(&sl);
280
281         /*
282          * Test if it can trylock recursively.
283          * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
284          * object recursively. Here it will try to lock a spinlock object twice.
285          */
286         if (rte_spinlock_recursive_trylock(&slr) == 0) {
287                 printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
288                 return -1;
289         }
290         if (rte_spinlock_recursive_trylock(&slr) == 0) {
291                 printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
292                 return -1;
293         }
294         rte_spinlock_recursive_unlock(&slr);
295         rte_spinlock_recursive_unlock(&slr);
296
297         if (test_spinlock_perf() < 0)
298                 return -1;
299
300         return ret;
301 }
302
303 REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock);