bpf: allow self-xor operation
[dpdk.git] / app / test / test_ticketlock.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Arm Limited
3  */
4
5 #include <inttypes.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/queue.h>
10 #include <unistd.h>
11
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_eal.h>
15 #include <rte_launch.h>
16 #include <rte_lcore.h>
17 #include <rte_memory.h>
18 #include <rte_per_lcore.h>
19 #include <rte_ticketlock.h>
20
21 #include "test.h"
22
23 /*
24  * Ticketlock test
25  * =============
26  *
27  * - There is a global ticketlock and a table of ticketlocks (one per lcore).
28  *
29  * - The test function takes all of these locks and launches the
30  *   ``test_ticketlock_per_core()`` function on each core (except the main).
31  *
32  *   - The function takes the global lock, display something, then releases
33  *     the global lock.
34  *   - The function takes the per-lcore lock, display something, then releases
35  *     the per-core lock.
36  *
37  * - The main function unlocks the per-lcore locks sequentially and
38  *   waits between each lock. This triggers the display of a message
39  *   for each core, in the correct order. The autotest script checks that
40  *   this order is correct.
41  *
42  * - A load test is carried out, with all cores attempting to lock a single lock
43  *   multiple times
44  */
45
46 static rte_ticketlock_t tl, tl_try;
47 static rte_ticketlock_t tl_tab[RTE_MAX_LCORE];
48 static rte_ticketlock_recursive_t tlr;
49 static unsigned int count;
50
51 static uint32_t synchro;
52
53 static int
54 test_ticketlock_per_core(__rte_unused void *arg)
55 {
56         rte_ticketlock_lock(&tl);
57         printf("Global lock taken on core %u\n", rte_lcore_id());
58         rte_ticketlock_unlock(&tl);
59
60         rte_ticketlock_lock(&tl_tab[rte_lcore_id()]);
61         printf("Hello from core %u !\n", rte_lcore_id());
62         rte_ticketlock_unlock(&tl_tab[rte_lcore_id()]);
63
64         return 0;
65 }
66
67 static int
68 test_ticketlock_recursive_per_core(__rte_unused void *arg)
69 {
70         unsigned int id = rte_lcore_id();
71
72         rte_ticketlock_recursive_lock(&tlr);
73         printf("Global recursive lock taken on core %u - count = %d\n",
74                id, tlr.count);
75         rte_ticketlock_recursive_lock(&tlr);
76         printf("Global recursive lock taken on core %u - count = %d\n",
77                id, tlr.count);
78         rte_ticketlock_recursive_lock(&tlr);
79         printf("Global recursive lock taken on core %u - count = %d\n",
80                id, tlr.count);
81
82         printf("Hello from within recursive locks from core %u !\n", id);
83
84         rte_ticketlock_recursive_unlock(&tlr);
85         printf("Global recursive lock released on core %u - count = %d\n",
86                id, tlr.count);
87         rte_ticketlock_recursive_unlock(&tlr);
88         printf("Global recursive lock released on core %u - count = %d\n",
89                id, tlr.count);
90         rte_ticketlock_recursive_unlock(&tlr);
91         printf("Global recursive lock released on core %u - count = %d\n",
92                id, tlr.count);
93
94         return 0;
95 }
96
97 static rte_ticketlock_t lk = RTE_TICKETLOCK_INITIALIZER;
98 static uint64_t lcount __rte_cache_aligned;
99 static uint64_t lcore_count[RTE_MAX_LCORE] __rte_cache_aligned;
100 static uint64_t time_cost[RTE_MAX_LCORE];
101
102 #define MAX_LOOP 10000
103
104 static int
105 load_loop_fn(void *func_param)
106 {
107         uint64_t time_diff = 0, begin;
108         uint64_t hz = rte_get_timer_hz();
109         const int use_lock = *(int *)func_param;
110         const unsigned int lcore = rte_lcore_id();
111
112         /* wait synchro for workers */
113         if (lcore != rte_get_main_lcore())
114                 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED);
115
116         begin = rte_rdtsc_precise();
117         while (lcore_count[lcore] < MAX_LOOP) {
118                 if (use_lock)
119                         rte_ticketlock_lock(&lk);
120                 lcore_count[lcore]++;
121                 lcount++;
122                 if (use_lock)
123                         rte_ticketlock_unlock(&lk);
124         }
125         time_diff = rte_rdtsc_precise() - begin;
126         time_cost[lcore] = time_diff * 1000000 / hz;
127         return 0;
128 }
129
130 static int
131 test_ticketlock_perf(void)
132 {
133         unsigned int i;
134         uint64_t tcount = 0;
135         uint64_t total_time = 0;
136         int lock = 0;
137         const unsigned int lcore = rte_lcore_id();
138
139         printf("\nTest with no lock on single core...\n");
140         load_loop_fn(&lock);
141         printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
142         memset(lcore_count, 0, sizeof(lcore_count));
143         memset(time_cost, 0, sizeof(time_cost));
144
145         printf("\nTest with lock on single core...\n");
146         lock = 1;
147         load_loop_fn(&lock);
148         printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
149         memset(lcore_count, 0, sizeof(lcore_count));
150         memset(time_cost, 0, sizeof(time_cost));
151
152         lcount = 0;
153         printf("\nTest with lock on %u cores...\n", rte_lcore_count());
154
155         /* Clear synchro and start workers */
156         __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
157         rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
158
159         /* start synchro and launch test on main */
160         __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
161         load_loop_fn(&lock);
162
163         rte_eal_mp_wait_lcore();
164
165         RTE_LCORE_FOREACH(i) {
166                 printf("Core [%u] cost time = %"PRIu64" us\n", i, time_cost[i]);
167                 tcount += lcore_count[i];
168                 total_time += time_cost[i];
169         }
170
171         if (tcount != lcount)
172                 return -1;
173
174         printf("Total cost time = %"PRIu64" us\n", total_time);
175
176         return 0;
177 }
178
179 /*
180  * Use rte_ticketlock_trylock() to trylock a ticketlock object,
181  * If it could not lock the object successfully, it would
182  * return immediately and the variable of "count" would be
183  * increased by one per times. the value of "count" could be
184  * checked as the result later.
185  */
186 static int
187 test_ticketlock_try(__rte_unused void *arg)
188 {
189         if (rte_ticketlock_trylock(&tl_try) == 0) {
190                 rte_ticketlock_lock(&tl);
191                 count++;
192                 rte_ticketlock_unlock(&tl);
193         }
194
195         return 0;
196 }
197
198
199 /*
200  * Test rte_eal_get_lcore_state() in addition to ticketlocks
201  * as we have "waiting" then "running" lcores.
202  */
203 static int
204 test_ticketlock(void)
205 {
206         int ret = 0;
207         int i;
208
209         /* worker cores should be waiting: print it */
210         RTE_LCORE_FOREACH_WORKER(i) {
211                 printf("lcore %d state: %d\n", i,
212                        (int) rte_eal_get_lcore_state(i));
213         }
214
215         rte_ticketlock_init(&tl);
216         rte_ticketlock_init(&tl_try);
217         rte_ticketlock_recursive_init(&tlr);
218         RTE_LCORE_FOREACH_WORKER(i) {
219                 rte_ticketlock_init(&tl_tab[i]);
220         }
221
222         rte_ticketlock_lock(&tl);
223
224         RTE_LCORE_FOREACH_WORKER(i) {
225                 rte_ticketlock_lock(&tl_tab[i]);
226                 rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
227         }
228
229         /* worker cores should be busy: print it */
230         RTE_LCORE_FOREACH_WORKER(i) {
231                 printf("lcore %d state: %d\n", i,
232                        (int) rte_eal_get_lcore_state(i));
233         }
234         rte_ticketlock_unlock(&tl);
235
236         RTE_LCORE_FOREACH_WORKER(i) {
237                 rte_ticketlock_unlock(&tl_tab[i]);
238                 rte_delay_ms(10);
239         }
240
241         rte_eal_mp_wait_lcore();
242
243         rte_ticketlock_recursive_lock(&tlr);
244
245         /*
246          * Try to acquire a lock that we already own
247          */
248         if (!rte_ticketlock_recursive_trylock(&tlr)) {
249                 printf("rte_ticketlock_recursive_trylock failed on a lock that "
250                        "we already own\n");
251                 ret = -1;
252         } else
253                 rte_ticketlock_recursive_unlock(&tlr);
254
255         RTE_LCORE_FOREACH_WORKER(i) {
256                 rte_eal_remote_launch(test_ticketlock_recursive_per_core,
257                                         NULL, i);
258         }
259         rte_ticketlock_recursive_unlock(&tlr);
260         rte_eal_mp_wait_lcore();
261
262         /*
263          * Test if it could return immediately from try-locking a locked object.
264          * Here it will lock the ticketlock object first, then launch all the
265          * worker lcores to trylock the same ticketlock object.
266          * All the worker lcores should give up try-locking a locked object and
267          * return immediately, and then increase the "count" initialized with
268          * zero by one per times.
269          * We can check if the "count" is finally equal to the number of all
270          * worker lcores to see if the behavior of try-locking a locked
271          * ticketlock object is correct.
272          */
273         if (rte_ticketlock_trylock(&tl_try) == 0)
274                 return -1;
275
276         count = 0;
277         RTE_LCORE_FOREACH_WORKER(i) {
278                 rte_eal_remote_launch(test_ticketlock_try, NULL, i);
279         }
280         rte_eal_mp_wait_lcore();
281         rte_ticketlock_unlock(&tl_try);
282         if (rte_ticketlock_is_locked(&tl)) {
283                 printf("ticketlock is locked but it should not be\n");
284                 return -1;
285         }
286         rte_ticketlock_lock(&tl);
287         if (count != (rte_lcore_count() - 1))
288                 ret = -1;
289
290         rte_ticketlock_unlock(&tl);
291
292         /*
293          * Test if it can trylock recursively.
294          * Use rte_ticketlock_recursive_trylock() to check if it can lock
295          * a ticketlock object recursively. Here it will try to lock a
296          * ticketlock object twice.
297          */
298         if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
299                 printf("It failed to do the first ticketlock_recursive_trylock "
300                            "but it should able to do\n");
301                 return -1;
302         }
303         if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
304                 printf("It failed to do the second ticketlock_recursive_trylock "
305                            "but it should able to do\n");
306                 return -1;
307         }
308         rte_ticketlock_recursive_unlock(&tlr);
309         rte_ticketlock_recursive_unlock(&tlr);
310
311         if (test_ticketlock_perf() < 0)
312                 return -1;
313
314         return ret;
315 }
316
317 REGISTER_TEST_COMMAND(ticketlock_autotest, test_ticketlock);