first public release
[dpdk.git] / app / test / test_spinlock.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  *  version: DPDK.L.1.2.3-3
34  */
35
36 #include <stdio.h>
37 #include <stdint.h>
38 #include <inttypes.h>
39 #include <unistd.h>
40 #include <sys/queue.h>
41
42 #include <cmdline_parse.h>
43
44 #include <rte_common.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_per_lcore.h>
48 #include <rte_launch.h>
49 #include <rte_tailq.h>
50 #include <rte_eal.h>
51 #include <rte_per_lcore.h>
52 #include <rte_lcore.h>
53 #include <rte_cycles.h>
54 #include <rte_spinlock.h>
55
56 #include "test.h"
57
58 /*
59  * Spinlock test
60  * =============
61  *
62  * - There is a global spinlock and a table of spinlocks (one per lcore).
63  *
64  * - The test function takes all of these locks and launches the
65  *   ``test_spinlock_per_core()`` function on each core (except the master).
66  *
67  *   - The function takes the global lock, display something, then releases
68  *     the global lock.
69  *   - The function takes the per-lcore lock, display something, then releases
70  *     the per-core lock.
71  *
72  * - The main function unlocks the per-lcore locks sequentially and
73  *   waits between each lock. This triggers the display of a message
74  *   for each core, in the correct order. The autotest script checks that
75  *   this order is correct.
76  *
77  * - A load test is carried out, with all cores attempting to lock a single lock
78  *   multiple times
79  */
80
81 static rte_spinlock_t sl, sl_try;
82 static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
83 static rte_spinlock_recursive_t slr;
84 static unsigned count;
85
86 static int
87 test_spinlock_per_core(__attribute__((unused)) void *arg)
88 {
89         rte_spinlock_lock(&sl);
90         printf("Global lock taken on core %u\n", rte_lcore_id());
91         rte_spinlock_unlock(&sl);
92
93         rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
94         printf("Hello from core %u !\n", rte_lcore_id());
95         rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
96
97         return 0;
98 }
99
100 static int
101 test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
102 {
103         unsigned id = rte_lcore_id();
104
105         rte_spinlock_recursive_lock(&slr);
106         printf("Global recursive lock taken on core %u - count = %d\n",
107                id, slr.count);
108         rte_spinlock_recursive_lock(&slr);
109         printf("Global recursive lock taken on core %u - count = %d\n",
110                id, slr.count);
111         rte_spinlock_recursive_lock(&slr);
112         printf("Global recursive lock taken on core %u - count = %d\n",
113                id, slr.count);
114
115         printf("Hello from within recursive locks from core %u !\n", id);
116
117         rte_spinlock_recursive_unlock(&slr);
118         printf("Global recursive lock released on core %u - count = %d\n",
119                id, slr.count);
120         rte_spinlock_recursive_unlock(&slr);
121         printf("Global recursive lock released on core %u - count = %d\n",
122                id, slr.count);
123         rte_spinlock_recursive_unlock(&slr);
124         printf("Global recursive lock released on core %u - count = %d\n",
125                id, slr.count);
126
127         return 0;
128 }
129
130 static volatile int count1, count2;
131 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
132 static unsigned int max = 10000000; /* 10M */
133 static volatile uint64_t looptime[RTE_MAX_LCORE];
134
135 static int
136 load_loop_fn(__attribute__((unused)) void *dummy)
137 {
138         uint64_t end, begin;
139         begin = rte_get_hpet_cycles();
140         unsigned int i = 0;
141         for ( i = 0; i < max; i++) {
142                 rte_spinlock_lock(&lk);
143                 count1++;
144                 rte_spinlock_unlock(&lk);
145                 count2++;
146         }
147         end = rte_get_hpet_cycles();
148         looptime[rte_lcore_id()] = end - begin;
149         return 0;
150 }
151
152 static int
153 test_spinlock_load(void)
154 {
155         if (rte_lcore_count()<= 1) {
156                 printf("no cores counted\n");
157                 return -1;
158         }
159         printf ("Running %u tests.......\n", max);
160         printf ("Number of cores = %u\n", rte_lcore_count());
161
162         rte_eal_mp_remote_launch(load_loop_fn, NULL , CALL_MASTER);
163         rte_eal_mp_wait_lcore();
164
165         unsigned int k = 0;
166         uint64_t avgtime = 0;
167
168         RTE_LCORE_FOREACH(k) {
169                 printf("Core [%u] time = %"PRIu64"\n", k, looptime[k]);
170                 avgtime += looptime[k];
171         }
172
173         avgtime = avgtime / rte_lcore_count();
174         printf("Average time = %"PRIu64"\n", avgtime);
175
176         int check = 0;
177         check =  max * rte_lcore_count();
178         if (count1 == check && count2 != check)
179                 printf("Passed Load test\n");
180         else {
181                 printf("Failed load test\n");
182                 return -1;
183         }
184         return 0;
185 }
186
187 /*
188  * Use rte_spinlock_trylock() to trylock a spinlock object,
189  * If it could not lock the object sucessfully, it would
190  * return immediately and the variable of "count" would be
191  * increased by one per times. the value of "count" could be
192  * checked as the result later.
193  */
194 static int
195 test_spinlock_try(__attribute__((unused)) void *arg)
196 {
197         if (rte_spinlock_trylock(&sl_try) == 0) {
198                 rte_spinlock_lock(&sl);
199                 count ++;
200                 rte_spinlock_unlock(&sl);
201         }
202
203         return 0;
204 }
205
206
207 /*
208  * Test rte_eal_get_lcore_state() in addition to spinlocks
209  * as we have "waiting" then "running" lcores.
210  */
211 int
212 test_spinlock(void)
213 {
214         int ret = 0;
215         int i;
216
217         /* slave cores should be waiting: print it */
218         RTE_LCORE_FOREACH_SLAVE(i) {
219                 printf("lcore %d state: %d\n", i,
220                        (int) rte_eal_get_lcore_state(i));
221         }
222
223         rte_spinlock_init(&sl);
224         rte_spinlock_init(&sl_try);
225         rte_spinlock_recursive_init(&slr);
226         for (i=0; i<RTE_MAX_LCORE; i++)
227                 rte_spinlock_init(&sl_tab[i]);
228
229         rte_spinlock_lock(&sl);
230
231         RTE_LCORE_FOREACH_SLAVE(i) {
232                 rte_spinlock_lock(&sl_tab[i]);
233                 rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
234         }
235
236         /* slave cores should be busy: print it */
237         RTE_LCORE_FOREACH_SLAVE(i) {
238                 printf("lcore %d state: %d\n", i,
239                        (int) rte_eal_get_lcore_state(i));
240         }
241         rte_spinlock_unlock(&sl);
242
243         RTE_LCORE_FOREACH_SLAVE(i) {
244                 rte_spinlock_unlock(&sl_tab[i]);
245                 rte_delay_ms(100);
246         }
247
248         rte_eal_mp_wait_lcore();
249
250         if (test_spinlock_load()<0)
251                 return -1;
252
253         rte_spinlock_recursive_lock(&slr);
254
255         /*
256          * Try to acquire a lock that we already own
257          */
258         if(!rte_spinlock_recursive_trylock(&slr)) {
259                 printf("rte_spinlock_recursive_trylock failed on a lock that "
260                        "we already own\n");
261                 ret = -1;
262         } else
263                 rte_spinlock_recursive_unlock(&slr);
264
265         RTE_LCORE_FOREACH_SLAVE(i) {
266                 rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
267         }
268         rte_spinlock_recursive_unlock(&slr);
269         rte_eal_mp_wait_lcore();
270
271         /*
272          * Test if it could return immediately from try-locking a locked object.
273          * Here it will lock the spinlock object first, then launch all the slave
274          * lcores to trylock the same spinlock object.
275          * All the slave lcores should give up try-locking a locked object and
276          * return immediately, and then increase the "count" initialized with zero
277          * by one per times.
278          * We can check if the "count" is finally equal to the number of all slave
279          * lcores to see if the behavior of try-locking a locked spinlock object
280          * is correct.
281          */
282         if (rte_spinlock_trylock(&sl_try) == 0) {
283                 return -1;
284         }
285         count = 0;
286         RTE_LCORE_FOREACH_SLAVE(i) {
287                 rte_eal_remote_launch(test_spinlock_try, NULL, i);
288         }
289         rte_eal_mp_wait_lcore();
290         rte_spinlock_unlock(&sl_try);
291         if (rte_spinlock_is_locked(&sl)) {
292                 printf("spinlock is locked but it should not be\n");
293                 return -1;
294         }
295         rte_spinlock_lock(&sl);
296         if (count != ( rte_lcore_count() - 1)) {
297                 ret = -1;
298         }
299         rte_spinlock_unlock(&sl);
300
301         /*
302          * Test if it can trylock recursively.
303          * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
304          * object recursively. Here it will try to lock a spinlock object twice.
305          */
306         if (rte_spinlock_recursive_trylock(&slr) == 0) {
307                 printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
308                 return -1;
309         }
310         if (rte_spinlock_recursive_trylock(&slr) == 0) {
311                 printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
312                 return -1;
313         }
314         rte_spinlock_recursive_unlock(&slr);
315         rte_spinlock_recursive_unlock(&slr);
316
317         return ret;
318 }