1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIB_HASH */
38 #endif /* RTE_LIB_LPM */
40 #include <rte_string_fns.h>
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
47 #define MAX_STRING_SIZE (256)
48 #define MAX_ITER_MULTI (16)
49 #define MAX_ITER_ONCE (4)
50 #define MAX_LPM_ITER_TIMES (6)
52 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
53 #define MEMPOOL_SIZE (4)
55 #define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
57 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
58 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
60 #define WAIT_SYNCHRO_FOR_WORKERS() do { \
61 if (lcore_self != rte_get_main_lcore()) \
62 while (rte_atomic32_read(&synchro) == 0); \
66 * rte_eal_init only init once
69 test_eal_init_once(__rte_unused void *arg)
71 unsigned lcore_self = rte_lcore_id();
73 WAIT_SYNCHRO_FOR_WORKERS();
75 rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
76 if (rte_eal_init(0, NULL) != -1)
83 * ring create/lookup reentrancy test
86 ring_clean(unsigned int lcore_id)
89 char ring_name[MAX_STRING_SIZE];
92 rp = rte_ring_lookup("fr_test_once");
96 for (i = 0; i < MAX_ITER_MULTI; i++) {
97 snprintf(ring_name, sizeof(ring_name),
98 "fr_test_%d_%d", lcore_id, i);
99 rp = rte_ring_lookup(ring_name);
106 ring_create_lookup(__rte_unused void *arg)
108 unsigned lcore_self = rte_lcore_id();
109 struct rte_ring * rp;
110 char ring_name[MAX_STRING_SIZE];
113 WAIT_SYNCHRO_FOR_WORKERS();
115 /* create the same ring simultaneously on all threads */
116 for (i = 0; i < MAX_ITER_ONCE; i++) {
117 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
119 rte_atomic32_inc(&obj_count);
122 /* create/lookup new ring several times */
123 for (i = 0; i < MAX_ITER_MULTI; i++) {
124 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
125 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
128 if (rte_ring_lookup(ring_name) != rp)
131 /* verify all ring created successful */
132 if (rte_ring_lookup(ring_name) == NULL)
140 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
141 void *obj, unsigned i)
143 uint32_t *objnum = obj;
144 memset(obj, 0, mp->elt_size);
149 mempool_clean(unsigned int lcore_id)
151 struct rte_mempool *mp;
152 char mempool_name[MAX_STRING_SIZE];
155 mp = rte_mempool_lookup("fr_test_once");
157 rte_mempool_free(mp);
159 for (i = 0; i < MAX_ITER_MULTI; i++) {
160 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
162 mp = rte_mempool_lookup(mempool_name);
164 rte_mempool_free(mp);
169 mempool_create_lookup(__rte_unused void *arg)
171 unsigned lcore_self = rte_lcore_id();
172 struct rte_mempool * mp;
173 char mempool_name[MAX_STRING_SIZE];
176 WAIT_SYNCHRO_FOR_WORKERS();
178 /* create the same mempool simultaneously on all threads */
179 for (i = 0; i < MAX_ITER_ONCE; i++) {
180 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
181 MEMPOOL_ELT_SIZE, 0, 0,
186 rte_atomic32_inc(&obj_count);
189 /* create/lookup new ring several times */
190 for (i = 0; i < MAX_ITER_MULTI; i++) {
191 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
192 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
193 MEMPOOL_ELT_SIZE, 0, 0,
199 if (rte_mempool_lookup(mempool_name) != mp)
202 /* verify all ring created successful */
203 if (rte_mempool_lookup(mempool_name) == NULL)
212 hash_clean(unsigned lcore_id)
214 char hash_name[MAX_STRING_SIZE];
215 struct rte_hash *handle;
218 handle = rte_hash_find_existing("fr_test_once");
220 rte_hash_free(handle);
222 for (i = 0; i < MAX_ITER_MULTI; i++) {
223 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
225 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
226 rte_hash_free(handle);
231 hash_create_free(__rte_unused void *arg)
233 unsigned lcore_self = rte_lcore_id();
234 struct rte_hash *handle;
235 char hash_name[MAX_STRING_SIZE];
237 struct rte_hash_parameters hash_params = {
241 .hash_func = (rte_hash_function)rte_jhash_32b,
242 .hash_func_init_val = 0,
246 WAIT_SYNCHRO_FOR_WORKERS();
248 /* create the same hash simultaneously on all threads */
249 hash_params.name = "fr_test_once";
250 for (i = 0; i < MAX_ITER_ONCE; i++) {
251 handle = rte_hash_create(&hash_params);
253 rte_atomic32_inc(&obj_count);
256 /* create mutiple times simultaneously */
257 for (i = 0; i < MAX_ITER_MULTI; i++) {
258 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
259 hash_params.name = hash_name;
261 handle = rte_hash_create(&hash_params);
265 /* verify correct existing and then free all */
266 if (handle != rte_hash_find_existing(hash_name))
269 rte_hash_free(handle);
271 /* verify free correct */
272 if (NULL != rte_hash_find_existing(hash_name))
280 fbk_clean(unsigned lcore_id)
282 char fbk_name[MAX_STRING_SIZE];
283 struct rte_fbk_hash_table *handle;
286 handle = rte_fbk_hash_find_existing("fr_test_once");
288 rte_fbk_hash_free(handle);
290 for (i = 0; i < MAX_ITER_MULTI; i++) {
291 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
293 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
294 rte_fbk_hash_free(handle);
299 fbk_create_free(__rte_unused void *arg)
301 unsigned lcore_self = rte_lcore_id();
302 struct rte_fbk_hash_table *handle;
303 char fbk_name[MAX_STRING_SIZE];
305 struct rte_fbk_hash_params fbk_params = {
308 .entries_per_bucket = 4,
310 .hash_func = rte_jhash_1word,
311 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
314 WAIT_SYNCHRO_FOR_WORKERS();
316 /* create the same fbk hash table simultaneously on all threads */
317 fbk_params.name = "fr_test_once";
318 for (i = 0; i < MAX_ITER_ONCE; i++) {
319 handle = rte_fbk_hash_create(&fbk_params);
321 rte_atomic32_inc(&obj_count);
324 /* create mutiple fbk tables simultaneously */
325 for (i = 0; i < MAX_ITER_MULTI; i++) {
326 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
327 fbk_params.name = fbk_name;
329 handle = rte_fbk_hash_create(&fbk_params);
333 /* verify correct existing and then free all */
334 if (handle != rte_fbk_hash_find_existing(fbk_name))
337 rte_fbk_hash_free(handle);
339 /* verify free correct */
340 if (NULL != rte_fbk_hash_find_existing(fbk_name))
346 #endif /* RTE_LIB_HASH */
350 lpm_clean(unsigned int lcore_id)
352 char lpm_name[MAX_STRING_SIZE];
356 lpm = rte_lpm_find_existing("fr_test_once");
360 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
361 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
363 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
369 lpm_create_free(__rte_unused void *arg)
371 unsigned lcore_self = rte_lcore_id();
373 struct rte_lpm_config config;
375 config.max_rules = 4;
376 config.number_tbl8s = 256;
378 char lpm_name[MAX_STRING_SIZE];
381 WAIT_SYNCHRO_FOR_WORKERS();
383 /* create the same lpm simultaneously on all threads */
384 for (i = 0; i < MAX_ITER_ONCE; i++) {
385 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
387 rte_atomic32_inc(&obj_count);
390 /* create mutiple fbk tables simultaneously */
391 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
392 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
393 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
397 /* verify correct existing and then free all */
398 if (lpm != rte_lpm_find_existing(lpm_name))
403 /* verify free correct */
404 if (NULL != rte_lpm_find_existing(lpm_name))
410 #endif /* RTE_LIB_LPM */
416 char name[MAX_STRING_SIZE];
419 /* All test cases in the test suite */
420 struct test_case test_cases[] = {
421 { test_eal_init_once, NULL, NULL, "eal init once" },
422 { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
423 { mempool_create_lookup, NULL, mempool_clean,
424 "mempool create/lookup" },
426 { hash_create_free, NULL, hash_clean, "hash create/free" },
427 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
428 #endif /* RTE_LIB_HASH */
430 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
431 #endif /* RTE_LIB_LPM */
435 * launch test case in two separate thread
438 launch_test(struct test_case *pt_case)
440 unsigned int lcore_id;
445 if (pt_case->func == NULL)
448 rte_atomic32_set(&obj_count, 0);
449 rte_atomic32_set(&synchro, 0);
451 cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
452 RTE_LCORE_FOREACH_WORKER(lcore_id) {
456 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
459 rte_atomic32_set(&synchro, 1);
461 if (pt_case->func(pt_case->arg) < 0)
464 RTE_LCORE_FOREACH_WORKER(lcore_id) {
465 if (rte_eal_wait_lcore(lcore_id) < 0)
469 RTE_LCORE_FOREACH(lcore_id) {
470 if (pt_case->clean != NULL)
471 pt_case->clean(lcore_id);
474 count = rte_atomic32_read(&obj_count);
476 printf("%s: common object allocated %d times (should be 1)\n",
477 pt_case->name, count);
485 * Main entry of func_reentrancy test
488 test_func_reentrancy(void)
491 struct test_case *pt_case = NULL;
493 if (rte_lcore_count() < 2) {
494 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
497 else if (rte_lcore_count() > MAX_LCORES)
498 printf("Too many lcores, some cores will be disabled\n");
500 for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
501 pt_case = &test_cases[case_id];
502 if (pt_case->func == NULL)
505 if (launch_test(pt_case) < 0) {
506 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
509 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
515 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);