X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=app%2Ftest%2Ftest_func_reentrancy.c;h=838ab6f0f90b03303becc71a4e8bef7d58f832c1;hb=0f4531903acb42d894ff5bd4373a222400837315;hp=45928fa56301b2842974653053ab203e49363468;hpb=942405f9e2f2c22aa817be374ccfe939a72df2ce;p=dpdk.git diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c index 45928fa563..838ab6f0f9 100644 --- a/app/test/test_func_reentrancy.c +++ b/app/test/test_func_reentrancy.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -44,10 +15,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -58,15 +27,15 @@ #include #include -#ifdef RTE_LIBRTE_HASH +#ifdef RTE_LIB_HASH #include #include #include -#endif /* RTE_LIBRTE_HASH */ +#endif /* RTE_LIB_HASH */ -#ifdef RTE_LIBRTE_LPM +#ifdef RTE_LIB_LPM #include -#endif /* RTE_LIBRTE_LPM */ +#endif /* RTE_LIB_LPM */ #include @@ -76,18 +45,20 @@ typedef int (*case_func_t)(void* arg); typedef void (*case_clean_t)(unsigned lcore_id); #define MAX_STRING_SIZE (256) -#define MAX_ITER_TIMES (16) -#define MAX_LPM_ITER_TIMES (8) +#define MAX_ITER_MULTI (16) +#define MAX_ITER_ONCE (4) +#define MAX_LPM_ITER_TIMES (6) -#define MEMPOOL_ELT_SIZE (0) +#define MEMPOOL_ELT_SIZE (sizeof(uint32_t)) #define MEMPOOL_SIZE (4) -#define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U) +#define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U)) +static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0); static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0); -#define WAIT_SYNCHRO_FOR_SLAVES() do{ \ - if (lcore_self != rte_get_master_lcore()) \ +#define WAIT_SYNCHRO_FOR_WORKERS() do { \ + if (lcore_self != rte_get_main_lcore()) \ while (rte_atomic32_read(&synchro) == 0); \ } while(0) @@ -95,12 +66,13 @@ static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0); * rte_eal_init only init once */ static int -test_eal_init_once(__attribute__((unused)) void *arg) +test_eal_init_once(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); + rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */ if (rte_eal_init(0, NULL) != -1) return -1; @@ -110,36 +82,53 @@ test_eal_init_once(__attribute__((unused)) void *arg) /* * ring create/lookup reentrancy test */ +static void +ring_clean(unsigned int lcore_id) +{ + struct rte_ring *rp; + char ring_name[MAX_STRING_SIZE]; + int i; + + rp = rte_ring_lookup("fr_test_once"); + if (rp != NULL) + rte_ring_free(rp); + + for (i = 0; i < MAX_ITER_MULTI; i++) { + snprintf(ring_name, sizeof(ring_name), + "fr_test_%d_%d", lcore_id, i); + rp = rte_ring_lookup(ring_name); + if (rp != NULL) + rte_ring_free(rp); + } +} + static int -ring_create_lookup(__attribute__((unused)) void *arg) +ring_create_lookup(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); struct rte_ring * rp; char ring_name[MAX_STRING_SIZE]; int i; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same ring simultaneously on all threads */ - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_ONCE; i++) { rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0); - if ((NULL == rp) && (rte_ring_lookup("fr_test_once") == NULL)) - return -1; + if (rp != NULL) + rte_atomic32_inc(&obj_count); } /* create/lookup new ring several times */ - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i); rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0); if (NULL == rp) return -1; if (rte_ring_lookup(ring_name) != rp) return -1; - } - /* verify all ring created sucessful */ - for (i = 0; i < MAX_ITER_TIMES; i++) { - snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i); + /* verify all ring created successful */ if (rte_ring_lookup(ring_name) == NULL) return -1; } @@ -148,7 +137,7 @@ ring_create_lookup(__attribute__((unused)) void *arg) } static void -my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg, +my_obj_init(struct rte_mempool *mp, __rte_unused void *arg, void *obj, unsigned i) { uint32_t *objnum = obj; @@ -156,29 +145,49 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg, *objnum = i; } +static void +mempool_clean(unsigned int lcore_id) +{ + struct rte_mempool *mp; + char mempool_name[MAX_STRING_SIZE]; + int i; + + mp = rte_mempool_lookup("fr_test_once"); + if (mp != NULL) + rte_mempool_free(mp); + + for (i = 0; i < MAX_ITER_MULTI; i++) { + snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", + lcore_id, i); + mp = rte_mempool_lookup(mempool_name); + if (mp != NULL) + rte_mempool_free(mp); + } +} + static int -mempool_create_lookup(__attribute__((unused)) void *arg) +mempool_create_lookup(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); struct rte_mempool * mp; char mempool_name[MAX_STRING_SIZE]; int i; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same mempool simultaneously on all threads */ - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_ONCE; i++) { mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE, MEMPOOL_ELT_SIZE, 0, 0, NULL, NULL, my_obj_init, NULL, SOCKET_ID_ANY, 0); - if ((NULL == mp) && (rte_mempool_lookup("fr_test_once") == NULL)) - return -1; + if (mp != NULL) + rte_atomic32_inc(&obj_count); } /* create/lookup new ring several times */ - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i); mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE, MEMPOOL_ELT_SIZE, 0, 0, @@ -189,11 +198,8 @@ mempool_create_lookup(__attribute__((unused)) void *arg) return -1; if (rte_mempool_lookup(mempool_name) != mp) return -1; - } - /* verify all ring created sucessful */ - for (i = 0; i < MAX_ITER_TIMES; i++) { - snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i); + /* verify all ring created successful */ if (rte_mempool_lookup(mempool_name) == NULL) return -1; } @@ -201,7 +207,7 @@ mempool_create_lookup(__attribute__((unused)) void *arg) return 0; } -#ifdef RTE_LIBRTE_HASH +#ifdef RTE_LIB_HASH static void hash_clean(unsigned lcore_id) { @@ -209,7 +215,11 @@ hash_clean(unsigned lcore_id) struct rte_hash *handle; int i; - for (i = 0; i < MAX_ITER_TIMES; i++) { + handle = rte_hash_find_existing("fr_test_once"); + if (handle != NULL) + rte_hash_free(handle); + + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i); if ((handle = rte_hash_find_existing(hash_name)) != NULL) @@ -218,7 +228,7 @@ hash_clean(unsigned lcore_id) } static int -hash_create_free(__attribute__((unused)) void *arg) +hash_create_free(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); struct rte_hash *handle; @@ -227,25 +237,24 @@ hash_create_free(__attribute__((unused)) void *arg) struct rte_hash_parameters hash_params = { .name = NULL, .entries = 16, - .bucket_entries = 4, .key_len = 4, - .hash_func = (rte_hash_function)rte_jhash2, + .hash_func = (rte_hash_function)rte_jhash_32b, .hash_func_init_val = 0, .socket_id = 0, }; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same hash simultaneously on all threads */ hash_params.name = "fr_test_once"; - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_ONCE; i++) { handle = rte_hash_create(&hash_params); - if ((NULL == handle) && (rte_hash_find_existing("fr_test_once") == NULL)) - return -1; + if (handle != NULL) + rte_atomic32_inc(&obj_count); } /* create mutiple times simultaneously */ - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i); hash_params.name = hash_name; @@ -258,12 +267,8 @@ hash_create_free(__attribute__((unused)) void *arg) return -1; rte_hash_free(handle); - } - - /* verify free correct */ - for (i = 0; i < MAX_ITER_TIMES; i++) { - snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i); + /* verify free correct */ if (NULL != rte_hash_find_existing(hash_name)) return -1; } @@ -278,7 +283,11 @@ fbk_clean(unsigned lcore_id) struct rte_fbk_hash_table *handle; int i; - for (i = 0; i < MAX_ITER_TIMES; i++) { + handle = rte_fbk_hash_find_existing("fr_test_once"); + if (handle != NULL) + rte_fbk_hash_free(handle); + + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i); if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL) @@ -287,7 +296,7 @@ fbk_clean(unsigned lcore_id) } static int -fbk_create_free(__attribute__((unused)) void *arg) +fbk_create_free(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); struct rte_fbk_hash_table *handle; @@ -302,18 +311,18 @@ fbk_create_free(__attribute__((unused)) void *arg) .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT, }; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same fbk hash table simultaneously on all threads */ fbk_params.name = "fr_test_once"; - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_ONCE; i++) { handle = rte_fbk_hash_create(&fbk_params); - if ((NULL == handle) && (rte_fbk_hash_find_existing("fr_test_once") == NULL)) - return -1; + if (handle != NULL) + rte_atomic32_inc(&obj_count); } /* create mutiple fbk tables simultaneously */ - for (i = 0; i < MAX_ITER_TIMES; i++) { + for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i); fbk_params.name = fbk_name; @@ -326,28 +335,28 @@ fbk_create_free(__attribute__((unused)) void *arg) return -1; rte_fbk_hash_free(handle); - } - - /* verify free correct */ - for (i = 0; i < MAX_ITER_TIMES; i++) { - snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i); + /* verify free correct */ if (NULL != rte_fbk_hash_find_existing(fbk_name)) return -1; } return 0; } -#endif /* RTE_LIBRTE_HASH */ +#endif /* RTE_LIB_HASH */ -#ifdef RTE_LIBRTE_LPM +#ifdef RTE_LIB_LPM static void -lpm_clean(unsigned lcore_id) +lpm_clean(unsigned int lcore_id) { char lpm_name[MAX_STRING_SIZE]; struct rte_lpm *lpm; int i; + lpm = rte_lpm_find_existing("fr_test_once"); + if (lpm != NULL) + rte_lpm_free(lpm); + for (i = 0; i < MAX_LPM_ITER_TIMES; i++) { snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i); @@ -357,26 +366,31 @@ lpm_clean(unsigned lcore_id) } static int -lpm_create_free(__attribute__((unused)) void *arg) +lpm_create_free(__rte_unused void *arg) { unsigned lcore_self = rte_lcore_id(); struct rte_lpm *lpm; + struct rte_lpm_config config; + + config.max_rules = 4; + config.number_tbl8s = 256; + config.flags = 0; char lpm_name[MAX_STRING_SIZE]; int i; - WAIT_SYNCHRO_FOR_SLAVES(); + WAIT_SYNCHRO_FOR_WORKERS(); /* create the same lpm simultaneously on all threads */ - for (i = 0; i < MAX_ITER_TIMES; i++) { - lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, 4, RTE_LPM_HEAP); - if ((NULL == lpm) && (rte_lpm_find_existing("fr_test_once") == NULL)) - return -1; + for (i = 0; i < MAX_ITER_ONCE; i++) { + lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config); + if (lpm != NULL) + rte_atomic32_inc(&obj_count); } /* create mutiple fbk tables simultaneously */ for (i = 0; i < MAX_LPM_ITER_TIMES; i++) { snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i); - lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, 4, RTE_LPM_HEAP); + lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config); if (NULL == lpm) return -1; @@ -385,18 +399,15 @@ lpm_create_free(__attribute__((unused)) void *arg) return -1; rte_lpm_free(lpm); - } - /* verify free correct */ - for (i = 0; i < MAX_LPM_ITER_TIMES; i++) { - snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i); + /* verify free correct */ if (NULL != rte_lpm_find_existing(lpm_name)) return -1; } return 0; } -#endif /* RTE_LIBRTE_LPM */ +#endif /* RTE_LIB_LPM */ struct test_case{ case_func_t func; @@ -408,15 +419,16 @@ struct test_case{ /* All test cases in the test suite */ struct test_case test_cases[] = { { test_eal_init_once, NULL, NULL, "eal init once" }, - { ring_create_lookup, NULL, NULL, "ring create/lookup" }, - { mempool_create_lookup, NULL, NULL, "mempool create/lookup" }, -#ifdef RTE_LIBRTE_HASH + { ring_create_lookup, NULL, ring_clean, "ring create/lookup" }, + { mempool_create_lookup, NULL, mempool_clean, + "mempool create/lookup" }, +#ifdef RTE_LIB_HASH { hash_create_free, NULL, hash_clean, "hash create/free" }, { fbk_create_free, NULL, fbk_clean, "fbk create/free" }, -#endif /* RTE_LIBRTE_HASH */ -#ifdef RTE_LIBRTE_LPM +#endif /* RTE_LIB_HASH */ +#ifdef RTE_LIB_LPM { lpm_create_free, NULL, lpm_clean, "lpm create/free" }, -#endif /* RTE_LIBRTE_LPM */ +#endif /* RTE_LIB_LPM */ }; /** @@ -425,17 +437,19 @@ struct test_case test_cases[] = { static int launch_test(struct test_case *pt_case) { + unsigned int lcore_id; + unsigned int cores; + unsigned int count; int ret = 0; - unsigned lcore_id; - unsigned cores_save = rte_lcore_count(); - unsigned cores = RTE_MIN(cores_save, MAX_LCORES); if (pt_case->func == NULL) return -1; + rte_atomic32_set(&obj_count, 0); rte_atomic32_set(&synchro, 0); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { + cores = RTE_MIN(rte_lcore_count(), MAX_LCORES); + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (cores == 1) break; cores--; @@ -447,18 +461,23 @@ launch_test(struct test_case *pt_case) if (pt_case->func(pt_case->arg) < 0) ret = -1; - cores = cores_save; - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (cores == 1) - break; - cores--; + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) ret = -1; + } + RTE_LCORE_FOREACH(lcore_id) { if (pt_case->clean != NULL) pt_case->clean(lcore_id); } + count = rte_atomic32_read(&obj_count); + if (count != 1) { + printf("%s: common object allocated %d times (should be 1)\n", + pt_case->name, count); + ret = -1; + } + return ret; } @@ -471,14 +490,14 @@ test_func_reentrancy(void) uint32_t case_id; struct test_case *pt_case = NULL; - if (rte_lcore_count() <= 1) { - printf("Not enough lcore for testing\n"); - return -1; + if (rte_lcore_count() < 2) { + printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n"); + return TEST_SKIPPED; } else if (rte_lcore_count() > MAX_LCORES) printf("Too many lcores, some cores will be disabled\n"); - for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) { + for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) { pt_case = &test_cases[case_id]; if (pt_case->func == NULL) continue; @@ -493,8 +512,4 @@ test_func_reentrancy(void) return 0; } -static struct test_command func_reentrancy_cmd = { - .command = "func_reentrancy_autotest", - .callback = test_func_reentrancy, -}; -REGISTER_TEST_COMMAND(func_reentrancy_cmd); +REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);