app/test: convert all tests to register system
[dpdk.git] / app / test / test_spinlock.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <inttypes.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_common.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_per_lcore.h>
45 #include <rte_launch.h>
46 #include <rte_tailq.h>
47 #include <rte_eal.h>
48 #include <rte_per_lcore.h>
49 #include <rte_lcore.h>
50 #include <rte_cycles.h>
51 #include <rte_spinlock.h>
52 #include <rte_atomic.h>
53
54 #include "test.h"
55
56 /*
57  * Spinlock test
58  * =============
59  *
60  * - There is a global spinlock and a table of spinlocks (one per lcore).
61  *
62  * - The test function takes all of these locks and launches the
63  *   ``test_spinlock_per_core()`` function on each core (except the master).
64  *
65  *   - The function takes the global lock, display something, then releases
66  *     the global lock.
67  *   - The function takes the per-lcore lock, display something, then releases
68  *     the per-core lock.
69  *
70  * - The main function unlocks the per-lcore locks sequentially and
71  *   waits between each lock. This triggers the display of a message
72  *   for each core, in the correct order. The autotest script checks that
73  *   this order is correct.
74  *
75  * - A load test is carried out, with all cores attempting to lock a single lock
76  *   multiple times
77  */
78
79 static rte_spinlock_t sl, sl_try;
80 static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
81 static rte_spinlock_recursive_t slr;
82 static unsigned count = 0;
83
84 static rte_atomic32_t synchro;
85
86 static int
87 test_spinlock_per_core(__attribute__((unused)) void *arg)
88 {
89         rte_spinlock_lock(&sl);
90         printf("Global lock taken on core %u\n", rte_lcore_id());
91         rte_spinlock_unlock(&sl);
92
93         rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
94         printf("Hello from core %u !\n", rte_lcore_id());
95         rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
96
97         return 0;
98 }
99
100 static int
101 test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
102 {
103         unsigned id = rte_lcore_id();
104
105         rte_spinlock_recursive_lock(&slr);
106         printf("Global recursive lock taken on core %u - count = %d\n",
107                id, slr.count);
108         rte_spinlock_recursive_lock(&slr);
109         printf("Global recursive lock taken on core %u - count = %d\n",
110                id, slr.count);
111         rte_spinlock_recursive_lock(&slr);
112         printf("Global recursive lock taken on core %u - count = %d\n",
113                id, slr.count);
114
115         printf("Hello from within recursive locks from core %u !\n", id);
116
117         rte_spinlock_recursive_unlock(&slr);
118         printf("Global recursive lock released on core %u - count = %d\n",
119                id, slr.count);
120         rte_spinlock_recursive_unlock(&slr);
121         printf("Global recursive lock released on core %u - count = %d\n",
122                id, slr.count);
123         rte_spinlock_recursive_unlock(&slr);
124         printf("Global recursive lock released on core %u - count = %d\n",
125                id, slr.count);
126
127         return 0;
128 }
129
130 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
131 static uint64_t lock_count[RTE_MAX_LCORE] = {0};
132
133 #define TIME_S 5
134
135 static int
136 load_loop_fn(void *func_param)
137 {
138         uint64_t time_diff = 0, begin;
139         uint64_t hz = rte_get_timer_hz();
140         uint64_t lcount = 0;
141         const int use_lock = *(int*)func_param;
142         const unsigned lcore = rte_lcore_id();
143
144         /* wait synchro for slaves */
145         if (lcore != rte_get_master_lcore())
146                 while (rte_atomic32_read(&synchro) == 0);
147
148         begin = rte_get_timer_cycles();
149         while (time_diff / hz < TIME_S) {
150                 if (use_lock)
151                         rte_spinlock_lock(&lk);
152                 lcount++;
153                 if (use_lock)
154                         rte_spinlock_unlock(&lk);
155                 /* delay to make lock duty cycle slighlty realistic */
156                 rte_delay_us(1);
157                 time_diff = rte_get_timer_cycles() - begin;
158         }
159         lock_count[lcore] = lcount;
160         return 0;
161 }
162
163 static int
164 test_spinlock_perf(void)
165 {
166         unsigned int i;
167         uint64_t total = 0;
168         int lock = 0;
169         const unsigned lcore = rte_lcore_id();
170
171         printf("\nTest with no lock on single core...\n");
172         load_loop_fn(&lock);
173         printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
174         memset(lock_count, 0, sizeof(lock_count));
175
176         printf("\nTest with lock on single core...\n");
177         lock = 1;
178         load_loop_fn(&lock);
179         printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
180         memset(lock_count, 0, sizeof(lock_count));
181
182         printf("\nTest with lock on %u cores...\n", rte_lcore_count());
183
184         /* Clear synchro and start slaves */
185         rte_atomic32_set(&synchro, 0);
186         rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
187
188         /* start synchro and launch test on master */
189         rte_atomic32_set(&synchro, 1);
190         load_loop_fn(&lock);
191
192         rte_eal_mp_wait_lcore();
193
194         RTE_LCORE_FOREACH(i) {
195                 printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
196                 total += lock_count[i];
197         }
198
199         printf("Total count = %"PRIu64"\n", total);
200
201         return 0;
202 }
203
204 /*
205  * Use rte_spinlock_trylock() to trylock a spinlock object,
206  * If it could not lock the object sucessfully, it would
207  * return immediately and the variable of "count" would be
208  * increased by one per times. the value of "count" could be
209  * checked as the result later.
210  */
211 static int
212 test_spinlock_try(__attribute__((unused)) void *arg)
213 {
214         if (rte_spinlock_trylock(&sl_try) == 0) {
215                 rte_spinlock_lock(&sl);
216                 count ++;
217                 rte_spinlock_unlock(&sl);
218         }
219
220         return 0;
221 }
222
223
224 /*
225  * Test rte_eal_get_lcore_state() in addition to spinlocks
226  * as we have "waiting" then "running" lcores.
227  */
228 static int
229 test_spinlock(void)
230 {
231         int ret = 0;
232         int i;
233
234         /* slave cores should be waiting: print it */
235         RTE_LCORE_FOREACH_SLAVE(i) {
236                 printf("lcore %d state: %d\n", i,
237                        (int) rte_eal_get_lcore_state(i));
238         }
239
240         rte_spinlock_init(&sl);
241         rte_spinlock_init(&sl_try);
242         rte_spinlock_recursive_init(&slr);
243         for (i=0; i<RTE_MAX_LCORE; i++)
244                 rte_spinlock_init(&sl_tab[i]);
245
246         rte_spinlock_lock(&sl);
247
248         RTE_LCORE_FOREACH_SLAVE(i) {
249                 rte_spinlock_lock(&sl_tab[i]);
250                 rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
251         }
252
253         /* slave cores should be busy: print it */
254         RTE_LCORE_FOREACH_SLAVE(i) {
255                 printf("lcore %d state: %d\n", i,
256                        (int) rte_eal_get_lcore_state(i));
257         }
258         rte_spinlock_unlock(&sl);
259
260         RTE_LCORE_FOREACH_SLAVE(i) {
261                 rte_spinlock_unlock(&sl_tab[i]);
262                 rte_delay_ms(100);
263         }
264
265         rte_eal_mp_wait_lcore();
266
267         rte_spinlock_recursive_lock(&slr);
268
269         /*
270          * Try to acquire a lock that we already own
271          */
272         if(!rte_spinlock_recursive_trylock(&slr)) {
273                 printf("rte_spinlock_recursive_trylock failed on a lock that "
274                        "we already own\n");
275                 ret = -1;
276         } else
277                 rte_spinlock_recursive_unlock(&slr);
278
279         RTE_LCORE_FOREACH_SLAVE(i) {
280                 rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
281         }
282         rte_spinlock_recursive_unlock(&slr);
283         rte_eal_mp_wait_lcore();
284
285         /*
286          * Test if it could return immediately from try-locking a locked object.
287          * Here it will lock the spinlock object first, then launch all the slave
288          * lcores to trylock the same spinlock object.
289          * All the slave lcores should give up try-locking a locked object and
290          * return immediately, and then increase the "count" initialized with zero
291          * by one per times.
292          * We can check if the "count" is finally equal to the number of all slave
293          * lcores to see if the behavior of try-locking a locked spinlock object
294          * is correct.
295          */
296         if (rte_spinlock_trylock(&sl_try) == 0) {
297                 return -1;
298         }
299         count = 0;
300         RTE_LCORE_FOREACH_SLAVE(i) {
301                 rte_eal_remote_launch(test_spinlock_try, NULL, i);
302         }
303         rte_eal_mp_wait_lcore();
304         rte_spinlock_unlock(&sl_try);
305         if (rte_spinlock_is_locked(&sl)) {
306                 printf("spinlock is locked but it should not be\n");
307                 return -1;
308         }
309         rte_spinlock_lock(&sl);
310         if (count != ( rte_lcore_count() - 1)) {
311                 ret = -1;
312         }
313         rte_spinlock_unlock(&sl);
314
315         /*
316          * Test if it can trylock recursively.
317          * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
318          * object recursively. Here it will try to lock a spinlock object twice.
319          */
320         if (rte_spinlock_recursive_trylock(&slr) == 0) {
321                 printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
322                 return -1;
323         }
324         if (rte_spinlock_recursive_trylock(&slr) == 0) {
325                 printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
326                 return -1;
327         }
328         rte_spinlock_recursive_unlock(&slr);
329         rte_spinlock_recursive_unlock(&slr);
330
331         if (test_spinlock_perf() < 0)
332                 return -1;
333
334         return ret;
335 }
336
337 static struct test_command spinlock_cmd = {
338         .command = "spinlock_autotest",
339         .callback = test_spinlock,
340 };
341 REGISTER_TEST_COMMAND(spinlock_cmd);