1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
31 #include <rte_fbk_hash.h>
32 #include <rte_jhash.h>
33 #endif /* RTE_LIB_HASH */
37 #endif /* RTE_LIB_LPM */
39 #include <rte_string_fns.h>
43 typedef int (*case_func_t)(void* arg);
44 typedef void (*case_clean_t)(unsigned lcore_id);
46 #define MAX_STRING_SIZE (256)
47 #define MAX_ITER_MULTI (16)
48 #define MAX_ITER_ONCE (4)
49 #define MAX_LPM_ITER_TIMES (6)
51 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
52 #define MEMPOOL_SIZE (4)
54 #define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
56 static uint32_t obj_count;
57 static uint32_t synchro;
59 #define WAIT_SYNCHRO_FOR_WORKERS() do { \
60 if (lcore_self != rte_get_main_lcore()) \
61 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
65 * rte_eal_init only init once
68 test_eal_init_once(__rte_unused void *arg)
70 unsigned lcore_self = rte_lcore_id();
72 WAIT_SYNCHRO_FOR_WORKERS();
74 __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
75 if (rte_eal_init(0, NULL) != -1)
82 * ring create/lookup reentrancy test
85 ring_clean(unsigned int lcore_id)
88 char ring_name[MAX_STRING_SIZE];
91 rp = rte_ring_lookup("fr_test_once");
94 for (i = 0; i < MAX_ITER_MULTI; i++) {
95 snprintf(ring_name, sizeof(ring_name),
96 "fr_test_%d_%d", lcore_id, i);
97 rp = rte_ring_lookup(ring_name);
103 ring_create_lookup(__rte_unused void *arg)
105 unsigned lcore_self = rte_lcore_id();
106 struct rte_ring * rp;
107 char ring_name[MAX_STRING_SIZE];
110 WAIT_SYNCHRO_FOR_WORKERS();
112 /* create the same ring simultaneously on all threads */
113 for (i = 0; i < MAX_ITER_ONCE; i++) {
114 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
116 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
119 /* create/lookup new ring several times */
120 for (i = 0; i < MAX_ITER_MULTI; i++) {
121 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
122 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
125 if (rte_ring_lookup(ring_name) != rp)
128 /* verify all ring created successful */
129 if (rte_ring_lookup(ring_name) == NULL)
137 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
138 void *obj, unsigned i)
140 uint32_t *objnum = obj;
141 memset(obj, 0, mp->elt_size);
146 mempool_clean(unsigned int lcore_id)
148 struct rte_mempool *mp;
149 char mempool_name[MAX_STRING_SIZE];
152 mp = rte_mempool_lookup("fr_test_once");
153 rte_mempool_free(mp);
155 for (i = 0; i < MAX_ITER_MULTI; i++) {
156 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
158 mp = rte_mempool_lookup(mempool_name);
159 rte_mempool_free(mp);
164 mempool_create_lookup(__rte_unused void *arg)
166 unsigned lcore_self = rte_lcore_id();
167 struct rte_mempool * mp;
168 char mempool_name[MAX_STRING_SIZE];
171 WAIT_SYNCHRO_FOR_WORKERS();
173 /* create the same mempool simultaneously on all threads */
174 for (i = 0; i < MAX_ITER_ONCE; i++) {
175 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
176 MEMPOOL_ELT_SIZE, 0, 0,
181 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
184 /* create/lookup new ring several times */
185 for (i = 0; i < MAX_ITER_MULTI; i++) {
186 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
187 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
188 MEMPOOL_ELT_SIZE, 0, 0,
194 if (rte_mempool_lookup(mempool_name) != mp)
197 /* verify all ring created successful */
198 if (rte_mempool_lookup(mempool_name) == NULL)
207 hash_clean(unsigned lcore_id)
209 char hash_name[MAX_STRING_SIZE];
210 struct rte_hash *handle;
213 handle = rte_hash_find_existing("fr_test_once");
214 rte_hash_free(handle);
216 for (i = 0; i < MAX_ITER_MULTI; i++) {
217 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
219 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
220 rte_hash_free(handle);
225 hash_create_free(__rte_unused void *arg)
227 unsigned lcore_self = rte_lcore_id();
228 struct rte_hash *handle;
229 char hash_name[MAX_STRING_SIZE];
231 struct rte_hash_parameters hash_params = {
235 .hash_func = (rte_hash_function)rte_jhash_32b,
236 .hash_func_init_val = 0,
240 WAIT_SYNCHRO_FOR_WORKERS();
242 /* create the same hash simultaneously on all threads */
243 hash_params.name = "fr_test_once";
244 for (i = 0; i < MAX_ITER_ONCE; i++) {
245 handle = rte_hash_create(&hash_params);
247 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
250 /* create multiple times simultaneously */
251 for (i = 0; i < MAX_ITER_MULTI; i++) {
252 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
253 hash_params.name = hash_name;
255 handle = rte_hash_create(&hash_params);
259 /* verify correct existing and then free all */
260 if (handle != rte_hash_find_existing(hash_name))
263 rte_hash_free(handle);
265 /* verify free correct */
266 if (NULL != rte_hash_find_existing(hash_name))
274 fbk_clean(unsigned lcore_id)
276 char fbk_name[MAX_STRING_SIZE];
277 struct rte_fbk_hash_table *handle;
280 handle = rte_fbk_hash_find_existing("fr_test_once");
282 rte_fbk_hash_free(handle);
284 for (i = 0; i < MAX_ITER_MULTI; i++) {
285 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
287 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
288 rte_fbk_hash_free(handle);
293 fbk_create_free(__rte_unused void *arg)
295 unsigned lcore_self = rte_lcore_id();
296 struct rte_fbk_hash_table *handle;
297 char fbk_name[MAX_STRING_SIZE];
299 struct rte_fbk_hash_params fbk_params = {
302 .entries_per_bucket = 4,
304 .hash_func = rte_jhash_1word,
305 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
308 WAIT_SYNCHRO_FOR_WORKERS();
310 /* create the same fbk hash table simultaneously on all threads */
311 fbk_params.name = "fr_test_once";
312 for (i = 0; i < MAX_ITER_ONCE; i++) {
313 handle = rte_fbk_hash_create(&fbk_params);
315 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
318 /* create multiple fbk tables simultaneously */
319 for (i = 0; i < MAX_ITER_MULTI; i++) {
320 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
321 fbk_params.name = fbk_name;
323 handle = rte_fbk_hash_create(&fbk_params);
327 /* verify correct existing and then free all */
328 if (handle != rte_fbk_hash_find_existing(fbk_name))
331 rte_fbk_hash_free(handle);
333 /* verify free correct */
334 if (NULL != rte_fbk_hash_find_existing(fbk_name))
340 #endif /* RTE_LIB_HASH */
344 lpm_clean(unsigned int lcore_id)
346 char lpm_name[MAX_STRING_SIZE];
350 lpm = rte_lpm_find_existing("fr_test_once");
354 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
355 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
357 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
363 lpm_create_free(__rte_unused void *arg)
365 unsigned lcore_self = rte_lcore_id();
367 struct rte_lpm_config config;
369 config.max_rules = 4;
370 config.number_tbl8s = 256;
372 char lpm_name[MAX_STRING_SIZE];
375 WAIT_SYNCHRO_FOR_WORKERS();
377 /* create the same lpm simultaneously on all threads */
378 for (i = 0; i < MAX_ITER_ONCE; i++) {
379 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
381 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
384 /* create multiple fbk tables simultaneously */
385 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
386 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
387 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
391 /* verify correct existing and then free all */
392 if (lpm != rte_lpm_find_existing(lpm_name))
397 /* verify free correct */
398 if (NULL != rte_lpm_find_existing(lpm_name))
404 #endif /* RTE_LIB_LPM */
410 char name[MAX_STRING_SIZE];
413 /* All test cases in the test suite */
414 struct test_case test_cases[] = {
415 { test_eal_init_once, NULL, NULL, "eal init once" },
416 { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
417 { mempool_create_lookup, NULL, mempool_clean,
418 "mempool create/lookup" },
420 { hash_create_free, NULL, hash_clean, "hash create/free" },
421 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
422 #endif /* RTE_LIB_HASH */
424 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
425 #endif /* RTE_LIB_LPM */
429 * launch test case in two separate thread
432 launch_test(struct test_case *pt_case)
434 unsigned int lcore_id;
439 if (pt_case->func == NULL)
442 __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
443 __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
445 cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
446 RTE_LCORE_FOREACH_WORKER(lcore_id) {
450 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
453 __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
455 if (pt_case->func(pt_case->arg) < 0)
458 RTE_LCORE_FOREACH_WORKER(lcore_id) {
459 if (rte_eal_wait_lcore(lcore_id) < 0)
463 RTE_LCORE_FOREACH(lcore_id) {
464 if (pt_case->clean != NULL)
465 pt_case->clean(lcore_id);
468 count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
470 printf("%s: common object allocated %d times (should be 1)\n",
471 pt_case->name, count);
479 * Main entry of func_reentrancy test
482 test_func_reentrancy(void)
485 struct test_case *pt_case = NULL;
487 if (RTE_EXEC_ENV_IS_WINDOWS)
490 if (rte_lcore_count() < 2) {
491 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
494 else if (rte_lcore_count() > MAX_LCORES)
495 printf("Too many lcores, some cores will be disabled\n");
497 for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
498 pt_case = &test_cases[case_id];
499 if (pt_case->func == NULL)
502 if (launch_test(pt_case) < 0) {
503 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
506 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
512 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);