4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/queue.h>
39 #include <rte_memory.h>
40 #include <rte_memzone.h>
41 #include <rte_per_lcore.h>
42 #include <rte_launch.h>
43 #include <rte_atomic.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
54 * - The main test function performs three subtests. The first test
55 * checks that the usual inc/dec/add/sub functions are working
58 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to specific
61 * - These variables are incremented and decremented on each core at
62 * the same time in ``test_atomic_usual()``.
64 * - The function checks that once all lcores finish their function,
65 * the value of the atomic variables are still the same.
67 * - The second test verifies the behavior of "test and set" functions.
69 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
71 * - Invoke ``test_atomic_tas()`` on each lcore: before doing anything
72 * else. The cores are waiting a synchro using ``while
73 * (rte_atomic32_read(&val) == 0)`` which is triggered by the main test
74 * function. Then all cores do a
75 * ``rte_atomicXX_test_and_set()`` at the same time. If it is successful,
76 * it increments another atomic counter.
78 * - The main function checks that the atomic counter was incremented
79 * twice only (one for 16-bit, one for 32-bit and one for 64-bit values).
81 * - Test "add/sub and return"
83 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
85 * - Invoke ``test_atomic_addsub_return()`` on each lcore. Before doing
86 * anything else, the cores are waiting a synchro. Each lcore does
87 * this operation several times::
89 * tmp = rte_atomicXX_add_return(&a, 1);
90 * atomic_add(&count, tmp);
91 * tmp = rte_atomicXX_sub_return(&a, 1);
92 * atomic_sub(&count, tmp+1);
94 * - At the end of the test, the *count* value must be 0.
97 #define NUM_ATOMIC_TYPES 3
101 static rte_atomic16_t a16;
102 static rte_atomic32_t a32;
103 static rte_atomic64_t a64;
104 static rte_atomic64_t count;
105 static rte_atomic32_t synchro;
108 test_atomic_usual(__attribute__((unused)) void *arg)
112 while (rte_atomic32_read(&synchro) == 0)
115 for (i = 0; i < N; i++)
116 rte_atomic16_inc(&a16);
117 for (i = 0; i < N; i++)
118 rte_atomic16_dec(&a16);
119 for (i = 0; i < (N / 5); i++)
120 rte_atomic16_add(&a16, 5);
121 for (i = 0; i < (N / 5); i++)
122 rte_atomic16_sub(&a16, 5);
124 for (i = 0; i < N; i++)
125 rte_atomic32_inc(&a32);
126 for (i = 0; i < N; i++)
127 rte_atomic32_dec(&a32);
128 for (i = 0; i < (N / 5); i++)
129 rte_atomic32_add(&a32, 5);
130 for (i = 0; i < (N / 5); i++)
131 rte_atomic32_sub(&a32, 5);
133 for (i = 0; i < N; i++)
134 rte_atomic64_inc(&a64);
135 for (i = 0; i < N; i++)
136 rte_atomic64_dec(&a64);
137 for (i = 0; i < (N / 5); i++)
138 rte_atomic64_add(&a64, 5);
139 for (i = 0; i < (N / 5); i++)
140 rte_atomic64_sub(&a64, 5);
146 test_atomic_tas(__attribute__((unused)) void *arg)
148 while (rte_atomic32_read(&synchro) == 0)
151 if (rte_atomic16_test_and_set(&a16))
152 rte_atomic64_inc(&count);
153 if (rte_atomic32_test_and_set(&a32))
154 rte_atomic64_inc(&count);
155 if (rte_atomic64_test_and_set(&a64))
156 rte_atomic64_inc(&count);
162 test_atomic_addsub_and_return(__attribute__((unused)) void *arg)
169 while (rte_atomic32_read(&synchro) == 0)
172 for (i = 0; i < N; i++) {
173 tmp16 = rte_atomic16_add_return(&a16, 1);
174 rte_atomic64_add(&count, tmp16);
176 tmp16 = rte_atomic16_sub_return(&a16, 1);
177 rte_atomic64_sub(&count, tmp16+1);
179 tmp32 = rte_atomic32_add_return(&a32, 1);
180 rte_atomic64_add(&count, tmp32);
182 tmp32 = rte_atomic32_sub_return(&a32, 1);
183 rte_atomic64_sub(&count, tmp32+1);
185 tmp64 = rte_atomic64_add_return(&a64, 1);
186 rte_atomic64_add(&count, tmp64);
188 tmp64 = rte_atomic64_sub_return(&a64, 1);
189 rte_atomic64_sub(&count, tmp64+1);
196 * rte_atomic32_inc_and_test() would increase a 32 bits counter by one and then
197 * test if that counter is equal to 0. It would return true if the counter is 0
198 * and false if the counter is not 0. rte_atomic64_inc_and_test() could do the
199 * same thing but for a 64 bits counter.
200 * Here checks that if the 32/64 bits counter is equal to 0 after being atomically
201 * increased by one. If it is, increase the variable of "count" by one which would
202 * be checked as the result later.
206 test_atomic_inc_and_test(__attribute__((unused)) void *arg)
208 while (rte_atomic32_read(&synchro) == 0)
211 if (rte_atomic16_inc_and_test(&a16)) {
212 rte_atomic64_inc(&count);
214 if (rte_atomic32_inc_and_test(&a32)) {
215 rte_atomic64_inc(&count);
217 if (rte_atomic64_inc_and_test(&a64)) {
218 rte_atomic64_inc(&count);
225 * rte_atomicXX_dec_and_test() should decrease a 32 bits counter by one and then
226 * test if that counter is equal to 0. It should return true if the counter is 0
227 * and false if the counter is not 0.
228 * This test checks if the counter is equal to 0 after being atomically
229 * decreased by one. If it is, increase the value of "count" by one which is to
230 * be checked as the result later.
233 test_atomic_dec_and_test(__attribute__((unused)) void *arg)
235 while (rte_atomic32_read(&synchro) == 0)
238 if (rte_atomic16_dec_and_test(&a16))
239 rte_atomic64_inc(&count);
241 if (rte_atomic32_dec_and_test(&a32))
242 rte_atomic64_inc(&count);
244 if (rte_atomic64_dec_and_test(&a64))
245 rte_atomic64_inc(&count);
253 rte_atomic16_init(&a16);
254 rte_atomic32_init(&a32);
255 rte_atomic64_init(&a64);
256 rte_atomic64_init(&count);
257 rte_atomic32_init(&synchro);
259 rte_atomic16_set(&a16, 1UL << 10);
260 rte_atomic32_set(&a32, 1UL << 10);
261 rte_atomic64_set(&a64, 1ULL << 33);
263 printf("usual inc/dec/add/sub functions\n");
265 rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER);
266 rte_atomic32_set(&synchro, 1);
267 rte_eal_mp_wait_lcore();
268 rte_atomic32_set(&synchro, 0);
270 if (rte_atomic16_read(&a16) != 1UL << 10) {
271 printf("Atomic16 usual functions failed\n");
275 if (rte_atomic32_read(&a32) != 1UL << 10) {
276 printf("Atomic32 usual functions failed\n");
280 if (rte_atomic64_read(&a64) != 1ULL << 33) {
281 printf("Atomic64 usual functions failed\n");
285 printf("test and set\n");
287 rte_atomic64_set(&a64, 0);
288 rte_atomic32_set(&a32, 0);
289 rte_atomic16_set(&a16, 0);
290 rte_atomic64_set(&count, 0);
291 rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
292 rte_atomic32_set(&synchro, 1);
293 rte_eal_mp_wait_lcore();
294 rte_atomic32_set(&synchro, 0);
296 if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
297 printf("Atomic test and set failed\n");
301 printf("add/sub and return\n");
303 rte_atomic64_set(&a64, 0);
304 rte_atomic32_set(&a32, 0);
305 rte_atomic16_set(&a16, 0);
306 rte_atomic64_set(&count, 0);
307 rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
309 rte_atomic32_set(&synchro, 1);
310 rte_eal_mp_wait_lcore();
311 rte_atomic32_set(&synchro, 0);
313 if (rte_atomic64_read(&count) != 0) {
314 printf("Atomic add/sub+return failed\n");
319 * Set a64, a32 and a16 with the same value of minus "number of slave
320 * lcores", launch all slave lcores to atomically increase by one and
321 * test them respectively.
322 * Each lcore should have only one chance to increase a64 by one and
323 * then check if it is equal to 0, but there should be only one lcore
324 * that finds that it is 0. It is similar for a32 and a16.
325 * Then a variable of "count", initialized to zero, is increased by
326 * one if a64, a32 or a16 is 0 after being increased and tested
328 * We can check if "count" is finally equal to 3 to see if all slave
329 * lcores performed "atomic inc and test" right.
331 printf("inc and test\n");
333 rte_atomic64_clear(&a64);
334 rte_atomic32_clear(&a32);
335 rte_atomic16_clear(&a16);
336 rte_atomic32_clear(&synchro);
337 rte_atomic64_clear(&count);
339 rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
340 rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
341 rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count()));
342 rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER);
343 rte_atomic32_set(&synchro, 1);
344 rte_eal_mp_wait_lcore();
345 rte_atomic32_clear(&synchro);
347 if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
348 printf("Atomic inc and test failed %d\n", (int)count.cnt);
353 * Same as above, but this time we set the values to "number of slave
354 * lcores", and decrement instead of increment.
356 printf("dec and test\n");
358 rte_atomic32_clear(&synchro);
359 rte_atomic64_clear(&count);
361 rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
362 rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
363 rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1));
364 rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER);
365 rte_atomic32_set(&synchro, 1);
366 rte_eal_mp_wait_lcore();
367 rte_atomic32_clear(&synchro);
369 if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
370 printf("Atomic dec and test failed\n");
377 static struct test_command atomic_cmd = {
378 .command = "atomic_autotest",
379 .callback = test_atomic,
381 REGISTER_TEST_COMMAND(atomic_cmd);