X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_atomic.c;h=de3030d221ee3e0d25c6fb74aa15eeadd5be3ce7;hb=edec6dd83824ca02a10edb24a8f7c9ad31ab7a17;hp=6afca31c609d9b7db8f62228390e641952e01109;hpb=b6df9fc8715f9a925136006b18fdd65f9c621757;p=dpdk.git diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c index 6afca31c60..de3030d221 100644 --- a/app/test/test_atomic.c +++ b/app/test/test_atomic.c @@ -1,53 +1,22 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2019 Arm Limited */ #include #include #include +#include #include -#include - #include -#include #include #include #include -#include #include -#include #include +#include +#include #include "test.h" @@ -55,7 +24,7 @@ * Atomic Variables * ================ * - * - The main test function performs three subtests. The first test + * - The main test function performs several subtests. The first * checks that the usual inc/dec/add/sub functions are working * correctly: * @@ -68,7 +37,7 @@ * - The function checks that once all lcores finish their function, * the value of the atomic variables are still the same. * - * - The second test verifies the behavior of "test and set" functions. + * - Test "test and set" functions. * * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero. * @@ -82,7 +51,7 @@ * - The main function checks that the atomic counter was incremented * twice only (one for 16-bit, one for 32-bit and one for 64-bit values). * - * - Test "add/sub and return" + * - Test "add/sub and return" functions * * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero. * @@ -96,16 +65,46 @@ * atomic_sub(&count, tmp+1); * * - At the end of the test, the *count* value must be 0. + * + * - Test "128-bit compare and swap" (aarch64 and x86_64 only) + * + * - Initialize 128-bit atomic variables to zero. + * + * - Invoke ``test_atomic128_cmp_exchange()`` on each lcore. Before doing + * anything else, the cores are waiting a synchro. Each lcore does + * these compare and swap (CAS) operations several times:: + * + * Acquired CAS update counter.val[0] + 2; counter.val[1] + 1; + * Released CAS update counter.val[0] + 2; counter.val[1] + 1; + * Acquired_Released CAS update counter.val[0] + 2; counter.val[1] + 1; + * Relaxed CAS update counter.val[0] + 2; counter.val[1] + 1; + * + * - At the end of the test, the *count128* first 64-bit value and + * second 64-bit value differ by the total iterations. + * + * - Test "atomic exchange" functions + * + * - Create a 64 bit token that can be tested for data integrity + * + * - Invoke ``test_atomic_exchange`` on each lcore. Before doing + * anything else, the cores wait for a synchronization event. + * Each core then does the follwoing for N iterations: + * + * Generate a new token with a data integrity check + * Exchange the new token for previously generated token + * Increment a counter if a corrupt token was received + * + * - At the end of the test, the number of corrupted tokens must be 0. */ #define NUM_ATOMIC_TYPES 3 -#define N 10000 +#define N 1000000 static rte_atomic16_t a16; static rte_atomic32_t a32; static rte_atomic64_t a64; -static rte_atomic32_t count; +static rte_atomic64_t count; static rte_atomic32_t synchro; static int @@ -153,11 +152,11 @@ test_atomic_tas(__attribute__((unused)) void *arg) ; if (rte_atomic16_test_and_set(&a16)) - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); if (rte_atomic32_test_and_set(&a32)) - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); if (rte_atomic64_test_and_set(&a64)) - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); return 0; } @@ -175,22 +174,22 @@ test_atomic_addsub_and_return(__attribute__((unused)) void *arg) for (i = 0; i < N; i++) { tmp16 = rte_atomic16_add_return(&a16, 1); - rte_atomic32_add(&count, tmp16); + rte_atomic64_add(&count, tmp16); tmp16 = rte_atomic16_sub_return(&a16, 1); - rte_atomic32_sub(&count, tmp16+1); + rte_atomic64_sub(&count, tmp16+1); tmp32 = rte_atomic32_add_return(&a32, 1); - rte_atomic32_add(&count, tmp32); + rte_atomic64_add(&count, tmp32); tmp32 = rte_atomic32_sub_return(&a32, 1); - rte_atomic32_sub(&count, tmp32+1); + rte_atomic64_sub(&count, tmp32+1); tmp64 = rte_atomic64_add_return(&a64, 1); - rte_atomic32_add(&count, tmp64); + rte_atomic64_add(&count, tmp64); tmp64 = rte_atomic64_sub_return(&a64, 1); - rte_atomic32_sub(&count, tmp64+1); + rte_atomic64_sub(&count, tmp64+1); } return 0; @@ -213,13 +212,13 @@ test_atomic_inc_and_test(__attribute__((unused)) void *arg) ; if (rte_atomic16_inc_and_test(&a16)) { - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); } if (rte_atomic32_inc_and_test(&a32)) { - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); } if (rte_atomic64_inc_and_test(&a64)) { - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); } return 0; @@ -240,24 +239,215 @@ test_atomic_dec_and_test(__attribute__((unused)) void *arg) ; if (rte_atomic16_dec_and_test(&a16)) - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); if (rte_atomic32_dec_and_test(&a32)) - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); if (rte_atomic64_dec_and_test(&a64)) - rte_atomic32_inc(&count); + rte_atomic64_inc(&count); return 0; } -int +#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) +static rte_int128_t count128; + +/* + * rte_atomic128_cmp_exchange() should update a 128 bits counter's first 64 + * bits by 2 and the second 64 bits by 1 in this test. It should return true + * if the compare exchange operation is successful. + * This test repeats 128 bits compare and swap operations N rounds. In each + * iteration it runs compare and swap operation with different memory models. + */ +static int +test_atomic128_cmp_exchange(__attribute__((unused)) void *arg) +{ + rte_int128_t expected; + int success; + unsigned int i; + + while (rte_atomic32_read(&synchro) == 0) + ; + + expected = count128; + + for (i = 0; i < N; i++) { + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, + &expected, &desired, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + } while (success == 0); + + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, + &expected, &desired, 1, + __ATOMIC_RELEASE, __ATOMIC_RELAXED); + } while (success == 0); + + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, + &expected, &desired, 1, + __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); + } while (success == 0); + + do { + rte_int128_t desired; + + desired.val[0] = expected.val[0] + 2; + desired.val[1] = expected.val[1] + 1; + + success = rte_atomic128_cmp_exchange(&count128, + &expected, &desired, 1, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + } while (success == 0); + } + + return 0; +} +#endif + +/* + * Helper definitions/variables/functions for + * atomic exchange tests + */ +typedef union { + uint16_t u16; + uint8_t u8[2]; +} test16_t; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8[4]; +} test32_t; + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8[8]; +} test64_t; + +const uint8_t CRC8_POLY = 0x91; +uint8_t crc8_table[256]; + +volatile uint16_t token16; +volatile uint32_t token32; +volatile uint64_t token64; + +static void +build_crc8_table(void) +{ + uint8_t val; + int i, j; + + for (i = 0; i < 256; i++) { + val = i; + for (j = 0; j < 8; j++) { + if (val & 1) + val ^= CRC8_POLY; + val >>= 1; + } + crc8_table[i] = val; + } +} + +static uint8_t +get_crc8(uint8_t *message, int length) +{ + uint8_t crc = 0; + int i; + + for (i = 0; i < length; i++) + crc = crc8_table[crc ^ message[i]]; + return crc; +} + +/* + * The atomic exchange test sets up a token in memory and + * then spins up multiple lcores whose job is to generate + * new tokens, exchange that new token for the old one held + * in memory, and then verify that the old token is still + * valid (i.e. the exchange did not corrupt the token). + * + * A token is made up of random data and 8 bits of crc + * covering that random data. The following is an example + * of a 64bit token. + * + * +------------+------------+ + * | 63 56 | 55 0 | + * +------------+------------+ + * | CRC8 | Data | + * +------------+------------+ + */ +static int +test_atomic_exchange(__attribute__((unused)) void *arg) +{ + int i; + test16_t nt16, ot16; /* new token, old token */ + test32_t nt32, ot32; + test64_t nt64, ot64; + + /* Wait until all of the other threads have been dispatched */ + while (rte_atomic32_read(&synchro) == 0) + ; + + /* + * Let the battle begin! Every thread attempts to steal the current + * token with an atomic exchange operation and install its own newly + * generated token. If the old token is valid (i.e. it has the + * appropriate crc32 hash for the data) then the test iteration has + * passed. If the token is invalid, increment the counter. + */ + for (i = 0; i < N; i++) { + + /* Test 64bit Atomic Exchange */ + nt64.u64 = rte_rand(); + nt64.u8[7] = get_crc8(&nt64.u8[0], sizeof(nt64) - 1); + ot64.u64 = rte_atomic64_exchange(&token64, nt64.u64); + if (ot64.u8[7] != get_crc8(&ot64.u8[0], sizeof(ot64) - 1)) + rte_atomic64_inc(&count); + + /* Test 32bit Atomic Exchange */ + nt32.u32 = (uint32_t)rte_rand(); + nt32.u8[3] = get_crc8(&nt32.u8[0], sizeof(nt32) - 1); + ot32.u32 = rte_atomic32_exchange(&token32, nt32.u32); + if (ot32.u8[3] != get_crc8(&ot32.u8[0], sizeof(ot32) - 1)) + rte_atomic64_inc(&count); + + /* Test 16bit Atomic Exchange */ + nt16.u16 = (uint16_t)rte_rand(); + nt16.u8[1] = get_crc8(&nt16.u8[0], sizeof(nt16) - 1); + ot16.u16 = rte_atomic16_exchange(&token16, nt16.u16); + if (ot16.u8[1] != get_crc8(&ot16.u8[0], sizeof(ot16) - 1)) + rte_atomic64_inc(&count); + } + + return 0; +} +static int test_atomic(void) { rte_atomic16_init(&a16); rte_atomic32_init(&a32); rte_atomic64_init(&a64); - rte_atomic32_init(&count); + rte_atomic64_init(&count); rte_atomic32_init(&synchro); rte_atomic16_set(&a16, 1UL << 10); @@ -291,13 +481,13 @@ test_atomic(void) rte_atomic64_set(&a64, 0); rte_atomic32_set(&a32, 0); rte_atomic16_set(&a16, 0); - rte_atomic32_set(&count, 0); + rte_atomic64_set(&count, 0); rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER); rte_atomic32_set(&synchro, 1); rte_eal_mp_wait_lcore(); rte_atomic32_set(&synchro, 0); - if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) { + if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) { printf("Atomic test and set failed\n"); return -1; } @@ -307,14 +497,14 @@ test_atomic(void) rte_atomic64_set(&a64, 0); rte_atomic32_set(&a32, 0); rte_atomic16_set(&a16, 0); - rte_atomic32_set(&count, 0); + rte_atomic64_set(&count, 0); rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL, SKIP_MASTER); rte_atomic32_set(&synchro, 1); rte_eal_mp_wait_lcore(); rte_atomic32_set(&synchro, 0); - if (rte_atomic32_read(&count) != 0) { + if (rte_atomic64_read(&count) != 0) { printf("Atomic add/sub+return failed\n"); return -1; } @@ -338,7 +528,7 @@ test_atomic(void) rte_atomic32_clear(&a32); rte_atomic16_clear(&a16); rte_atomic32_clear(&synchro); - rte_atomic32_clear(&count); + rte_atomic64_clear(&count); rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count())); rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count())); @@ -348,7 +538,7 @@ test_atomic(void) rte_eal_mp_wait_lcore(); rte_atomic32_clear(&synchro); - if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) { + if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) { printf("Atomic inc and test failed %d\n", (int)count.cnt); return -1; } @@ -360,7 +550,7 @@ test_atomic(void) printf("dec and test\n"); rte_atomic32_clear(&synchro); - rte_atomic32_clear(&count); + rte_atomic64_clear(&count); rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1)); rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1)); @@ -370,11 +560,75 @@ test_atomic(void) rte_eal_mp_wait_lcore(); rte_atomic32_clear(&synchro); - if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) { + if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) { printf("Atomic dec and test failed\n"); return -1; } +#if defined(RTE_ARCH_X86_64) || defined(RTE_ARCH_ARM64) + /* + * This case tests the functionality of rte_atomic128_cmp_exchange + * API. It calls rte_atomic128_cmp_exchange with four kinds of memory + * models successively on each slave core. Once each 128-bit atomic + * compare and swap operation is successful, it updates the global + * 128-bit counter by 2 for the first 64-bit and 1 for the second + * 64-bit. Each slave core iterates this test N times. + * At the end of test, verify whether the first 64-bits of the 128-bit + * counter and the second 64bits is differ by the total iterations. If + * it is, the test passes. + */ + printf("128-bit compare and swap test\n"); + uint64_t iterations = 0; + + rte_atomic32_clear(&synchro); + count128.val[0] = 0; + count128.val[1] = 0; + + rte_eal_mp_remote_launch(test_atomic128_cmp_exchange, NULL, + SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_clear(&synchro); + + iterations = count128.val[0] - count128.val[1]; + if (iterations != 4*N*(rte_lcore_count()-1)) { + printf("128-bit compare and swap failed\n"); + return -1; + } +#endif + + /* + * Test 16/32/64bit atomic exchange. + */ + test64_t t; + + printf("exchange test\n"); + + rte_atomic32_clear(&synchro); + rte_atomic64_clear(&count); + + /* Generate the CRC8 lookup table */ + build_crc8_table(); + + /* Create the initial tokens used by the test */ + t.u64 = rte_rand(); + token16 = (get_crc8(&t.u8[0], sizeof(token16) - 1) << 8) + | (t.u16[0] & 0x00ff); + token32 = ((uint32_t)get_crc8(&t.u8[0], sizeof(token32) - 1) << 24) + | (t.u32[0] & 0x00ffffff); + token64 = ((uint64_t)get_crc8(&t.u8[0], sizeof(token64) - 1) << 56) + | (t.u64 & 0x00ffffffffffffff); + + rte_eal_mp_remote_launch(test_atomic_exchange, NULL, SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_clear(&synchro); + + if (rte_atomic64_read(&count) > 0) { + printf("Atomic exchange test failed\n"); + return -1; + } + return 0; } - +REGISTER_TEST_COMMAND(atomic_autotest, test_atomic);