doc: whitespace changes in licenses
[dpdk.git] / app / test / test_spinlock.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <inttypes.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <cmdline_parse.h>
42
43 #include <rte_common.h>
44 #include <rte_memory.h>
45 #include <rte_memzone.h>
46 #include <rte_per_lcore.h>
47 #include <rte_launch.h>
48 #include <rte_tailq.h>
49 #include <rte_eal.h>
50 #include <rte_per_lcore.h>
51 #include <rte_lcore.h>
52 #include <rte_cycles.h>
53 #include <rte_spinlock.h>
54 #include <rte_atomic.h>
55
56 #include "test.h"
57
58 /*
59  * Spinlock test
60  * =============
61  *
62  * - There is a global spinlock and a table of spinlocks (one per lcore).
63  *
64  * - The test function takes all of these locks and launches the
65  *   ``test_spinlock_per_core()`` function on each core (except the master).
66  *
67  *   - The function takes the global lock, display something, then releases
68  *     the global lock.
69  *   - The function takes the per-lcore lock, display something, then releases
70  *     the per-core lock.
71  *
72  * - The main function unlocks the per-lcore locks sequentially and
73  *   waits between each lock. This triggers the display of a message
74  *   for each core, in the correct order. The autotest script checks that
75  *   this order is correct.
76  *
77  * - A load test is carried out, with all cores attempting to lock a single lock
78  *   multiple times
79  */
80
81 static rte_spinlock_t sl, sl_try;
82 static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
83 static rte_spinlock_recursive_t slr;
84 static unsigned count = 0;
85
86 static rte_atomic32_t synchro;
87
88 static int
89 test_spinlock_per_core(__attribute__((unused)) void *arg)
90 {
91         rte_spinlock_lock(&sl);
92         printf("Global lock taken on core %u\n", rte_lcore_id());
93         rte_spinlock_unlock(&sl);
94
95         rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
96         printf("Hello from core %u !\n", rte_lcore_id());
97         rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
98
99         return 0;
100 }
101
102 static int
103 test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
104 {
105         unsigned id = rte_lcore_id();
106
107         rte_spinlock_recursive_lock(&slr);
108         printf("Global recursive lock taken on core %u - count = %d\n",
109                id, slr.count);
110         rte_spinlock_recursive_lock(&slr);
111         printf("Global recursive lock taken on core %u - count = %d\n",
112                id, slr.count);
113         rte_spinlock_recursive_lock(&slr);
114         printf("Global recursive lock taken on core %u - count = %d\n",
115                id, slr.count);
116
117         printf("Hello from within recursive locks from core %u !\n", id);
118
119         rte_spinlock_recursive_unlock(&slr);
120         printf("Global recursive lock released on core %u - count = %d\n",
121                id, slr.count);
122         rte_spinlock_recursive_unlock(&slr);
123         printf("Global recursive lock released on core %u - count = %d\n",
124                id, slr.count);
125         rte_spinlock_recursive_unlock(&slr);
126         printf("Global recursive lock released on core %u - count = %d\n",
127                id, slr.count);
128
129         return 0;
130 }
131
132 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
133 static uint64_t lock_count[RTE_MAX_LCORE] = {0};
134
135 #define TIME_S 5
136
137 static int
138 load_loop_fn(void *func_param)
139 {
140         uint64_t time_diff = 0, begin;
141         uint64_t hz = rte_get_timer_hz();
142         uint64_t lcount = 0;
143         const int use_lock = *(int*)func_param;
144         const unsigned lcore = rte_lcore_id();
145
146         /* wait synchro for slaves */
147         if (lcore != rte_get_master_lcore())
148                 while (rte_atomic32_read(&synchro) == 0);
149
150         begin = rte_get_timer_cycles();
151         while (time_diff / hz < TIME_S) {
152                 if (use_lock)
153                         rte_spinlock_lock(&lk);
154                 lcount++;
155                 if (use_lock)
156                         rte_spinlock_unlock(&lk);
157                 /* delay to make lock duty cycle slighlty realistic */
158                 rte_delay_us(1);
159                 time_diff = rte_get_timer_cycles() - begin;
160         }
161         lock_count[lcore] = lcount;
162         return 0;
163 }
164
165 static int
166 test_spinlock_perf(void)
167 {
168         unsigned int i;
169         uint64_t total = 0;
170         int lock = 0;
171         const unsigned lcore = rte_lcore_id();
172
173         printf("\nTest with no lock on single core...\n");
174         load_loop_fn(&lock);
175         printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
176         memset(lock_count, 0, sizeof(lock_count));
177
178         printf("\nTest with lock on single core...\n");
179         lock = 1;
180         load_loop_fn(&lock);
181         printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
182         memset(lock_count, 0, sizeof(lock_count));
183
184         printf("\nTest with lock on %u cores...\n", rte_lcore_count());
185
186         /* Clear synchro and start slaves */
187         rte_atomic32_set(&synchro, 0);
188         rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
189
190         /* start synchro and launch test on master */
191         rte_atomic32_set(&synchro, 1);
192         load_loop_fn(&lock);
193
194         rte_eal_mp_wait_lcore();
195
196         RTE_LCORE_FOREACH(i) {
197                 printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
198                 total += lock_count[i];
199         }
200
201         printf("Total count = %"PRIu64"\n", total);
202
203         return 0;
204 }
205
206 /*
207  * Use rte_spinlock_trylock() to trylock a spinlock object,
208  * If it could not lock the object sucessfully, it would
209  * return immediately and the variable of "count" would be
210  * increased by one per times. the value of "count" could be
211  * checked as the result later.
212  */
213 static int
214 test_spinlock_try(__attribute__((unused)) void *arg)
215 {
216         if (rte_spinlock_trylock(&sl_try) == 0) {
217                 rte_spinlock_lock(&sl);
218                 count ++;
219                 rte_spinlock_unlock(&sl);
220         }
221
222         return 0;
223 }
224
225
226 /*
227  * Test rte_eal_get_lcore_state() in addition to spinlocks
228  * as we have "waiting" then "running" lcores.
229  */
230 int
231 test_spinlock(void)
232 {
233         int ret = 0;
234         int i;
235
236         /* slave cores should be waiting: print it */
237         RTE_LCORE_FOREACH_SLAVE(i) {
238                 printf("lcore %d state: %d\n", i,
239                        (int) rte_eal_get_lcore_state(i));
240         }
241
242         rte_spinlock_init(&sl);
243         rte_spinlock_init(&sl_try);
244         rte_spinlock_recursive_init(&slr);
245         for (i=0; i<RTE_MAX_LCORE; i++)
246                 rte_spinlock_init(&sl_tab[i]);
247
248         rte_spinlock_lock(&sl);
249
250         RTE_LCORE_FOREACH_SLAVE(i) {
251                 rte_spinlock_lock(&sl_tab[i]);
252                 rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
253         }
254
255         /* slave cores should be busy: print it */
256         RTE_LCORE_FOREACH_SLAVE(i) {
257                 printf("lcore %d state: %d\n", i,
258                        (int) rte_eal_get_lcore_state(i));
259         }
260         rte_spinlock_unlock(&sl);
261
262         RTE_LCORE_FOREACH_SLAVE(i) {
263                 rte_spinlock_unlock(&sl_tab[i]);
264                 rte_delay_ms(100);
265         }
266
267         rte_eal_mp_wait_lcore();
268
269         rte_spinlock_recursive_lock(&slr);
270
271         /*
272          * Try to acquire a lock that we already own
273          */
274         if(!rte_spinlock_recursive_trylock(&slr)) {
275                 printf("rte_spinlock_recursive_trylock failed on a lock that "
276                        "we already own\n");
277                 ret = -1;
278         } else
279                 rte_spinlock_recursive_unlock(&slr);
280
281         RTE_LCORE_FOREACH_SLAVE(i) {
282                 rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
283         }
284         rte_spinlock_recursive_unlock(&slr);
285         rte_eal_mp_wait_lcore();
286
287         /*
288          * Test if it could return immediately from try-locking a locked object.
289          * Here it will lock the spinlock object first, then launch all the slave
290          * lcores to trylock the same spinlock object.
291          * All the slave lcores should give up try-locking a locked object and
292          * return immediately, and then increase the "count" initialized with zero
293          * by one per times.
294          * We can check if the "count" is finally equal to the number of all slave
295          * lcores to see if the behavior of try-locking a locked spinlock object
296          * is correct.
297          */
298         if (rte_spinlock_trylock(&sl_try) == 0) {
299                 return -1;
300         }
301         count = 0;
302         RTE_LCORE_FOREACH_SLAVE(i) {
303                 rte_eal_remote_launch(test_spinlock_try, NULL, i);
304         }
305         rte_eal_mp_wait_lcore();
306         rte_spinlock_unlock(&sl_try);
307         if (rte_spinlock_is_locked(&sl)) {
308                 printf("spinlock is locked but it should not be\n");
309                 return -1;
310         }
311         rte_spinlock_lock(&sl);
312         if (count != ( rte_lcore_count() - 1)) {
313                 ret = -1;
314         }
315         rte_spinlock_unlock(&sl);
316
317         /*
318          * Test if it can trylock recursively.
319          * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
320          * object recursively. Here it will try to lock a spinlock object twice.
321          */
322         if (rte_spinlock_recursive_trylock(&slr) == 0) {
323                 printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
324                 return -1;
325         }
326         if (rte_spinlock_recursive_trylock(&slr) == 0) {
327                 printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
328                 return -1;
329         }
330         rte_spinlock_recursive_unlock(&slr);
331         rte_spinlock_recursive_unlock(&slr);
332
333         if (test_spinlock_perf() < 0)
334                 return -1;
335
336         return ret;
337 }