1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
31 #include <rte_fbk_hash.h>
32 #include <rte_jhash.h>
33 #endif /* RTE_LIB_HASH */
37 #endif /* RTE_LIB_LPM */
39 #include <rte_string_fns.h>
43 typedef int (*case_func_t)(void* arg);
44 typedef void (*case_clean_t)(unsigned lcore_id);
46 #define MAX_STRING_SIZE (256)
47 #define MAX_ITER_MULTI (16)
48 #define MAX_ITER_ONCE (4)
49 #define MAX_LPM_ITER_TIMES (6)
51 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
52 #define MEMPOOL_SIZE (4)
54 #define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
56 static uint32_t obj_count;
57 static uint32_t synchro;
59 #define WAIT_SYNCHRO_FOR_WORKERS() do { \
60 if (lcore_self != rte_get_main_lcore()) \
61 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
65 * rte_eal_init only init once
68 test_eal_init_once(__rte_unused void *arg)
70 unsigned lcore_self = rte_lcore_id();
72 WAIT_SYNCHRO_FOR_WORKERS();
74 __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
75 if (rte_eal_init(0, NULL) != -1)
82 * ring create/lookup reentrancy test
85 ring_clean(unsigned int lcore_id)
88 char ring_name[MAX_STRING_SIZE];
91 rp = rte_ring_lookup("fr_test_once");
95 for (i = 0; i < MAX_ITER_MULTI; i++) {
96 snprintf(ring_name, sizeof(ring_name),
97 "fr_test_%d_%d", lcore_id, i);
98 rp = rte_ring_lookup(ring_name);
105 ring_create_lookup(__rte_unused void *arg)
107 unsigned lcore_self = rte_lcore_id();
108 struct rte_ring * rp;
109 char ring_name[MAX_STRING_SIZE];
112 WAIT_SYNCHRO_FOR_WORKERS();
114 /* create the same ring simultaneously on all threads */
115 for (i = 0; i < MAX_ITER_ONCE; i++) {
116 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
118 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
121 /* create/lookup new ring several times */
122 for (i = 0; i < MAX_ITER_MULTI; i++) {
123 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
124 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
127 if (rte_ring_lookup(ring_name) != rp)
130 /* verify all ring created successful */
131 if (rte_ring_lookup(ring_name) == NULL)
139 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
140 void *obj, unsigned i)
142 uint32_t *objnum = obj;
143 memset(obj, 0, mp->elt_size);
148 mempool_clean(unsigned int lcore_id)
150 struct rte_mempool *mp;
151 char mempool_name[MAX_STRING_SIZE];
154 mp = rte_mempool_lookup("fr_test_once");
156 rte_mempool_free(mp);
158 for (i = 0; i < MAX_ITER_MULTI; i++) {
159 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
161 mp = rte_mempool_lookup(mempool_name);
163 rte_mempool_free(mp);
168 mempool_create_lookup(__rte_unused void *arg)
170 unsigned lcore_self = rte_lcore_id();
171 struct rte_mempool * mp;
172 char mempool_name[MAX_STRING_SIZE];
175 WAIT_SYNCHRO_FOR_WORKERS();
177 /* create the same mempool simultaneously on all threads */
178 for (i = 0; i < MAX_ITER_ONCE; i++) {
179 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
180 MEMPOOL_ELT_SIZE, 0, 0,
185 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
188 /* create/lookup new ring several times */
189 for (i = 0; i < MAX_ITER_MULTI; i++) {
190 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
191 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
192 MEMPOOL_ELT_SIZE, 0, 0,
198 if (rte_mempool_lookup(mempool_name) != mp)
201 /* verify all ring created successful */
202 if (rte_mempool_lookup(mempool_name) == NULL)
211 hash_clean(unsigned lcore_id)
213 char hash_name[MAX_STRING_SIZE];
214 struct rte_hash *handle;
217 handle = rte_hash_find_existing("fr_test_once");
219 rte_hash_free(handle);
221 for (i = 0; i < MAX_ITER_MULTI; i++) {
222 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
224 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
225 rte_hash_free(handle);
230 hash_create_free(__rte_unused void *arg)
232 unsigned lcore_self = rte_lcore_id();
233 struct rte_hash *handle;
234 char hash_name[MAX_STRING_SIZE];
236 struct rte_hash_parameters hash_params = {
240 .hash_func = (rte_hash_function)rte_jhash_32b,
241 .hash_func_init_val = 0,
245 WAIT_SYNCHRO_FOR_WORKERS();
247 /* create the same hash simultaneously on all threads */
248 hash_params.name = "fr_test_once";
249 for (i = 0; i < MAX_ITER_ONCE; i++) {
250 handle = rte_hash_create(&hash_params);
252 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
255 /* create multiple times simultaneously */
256 for (i = 0; i < MAX_ITER_MULTI; i++) {
257 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
258 hash_params.name = hash_name;
260 handle = rte_hash_create(&hash_params);
264 /* verify correct existing and then free all */
265 if (handle != rte_hash_find_existing(hash_name))
268 rte_hash_free(handle);
270 /* verify free correct */
271 if (NULL != rte_hash_find_existing(hash_name))
279 fbk_clean(unsigned lcore_id)
281 char fbk_name[MAX_STRING_SIZE];
282 struct rte_fbk_hash_table *handle;
285 handle = rte_fbk_hash_find_existing("fr_test_once");
287 rte_fbk_hash_free(handle);
289 for (i = 0; i < MAX_ITER_MULTI; i++) {
290 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
292 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
293 rte_fbk_hash_free(handle);
298 fbk_create_free(__rte_unused void *arg)
300 unsigned lcore_self = rte_lcore_id();
301 struct rte_fbk_hash_table *handle;
302 char fbk_name[MAX_STRING_SIZE];
304 struct rte_fbk_hash_params fbk_params = {
307 .entries_per_bucket = 4,
309 .hash_func = rte_jhash_1word,
310 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
313 WAIT_SYNCHRO_FOR_WORKERS();
315 /* create the same fbk hash table simultaneously on all threads */
316 fbk_params.name = "fr_test_once";
317 for (i = 0; i < MAX_ITER_ONCE; i++) {
318 handle = rte_fbk_hash_create(&fbk_params);
320 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
323 /* create multiple fbk tables simultaneously */
324 for (i = 0; i < MAX_ITER_MULTI; i++) {
325 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
326 fbk_params.name = fbk_name;
328 handle = rte_fbk_hash_create(&fbk_params);
332 /* verify correct existing and then free all */
333 if (handle != rte_fbk_hash_find_existing(fbk_name))
336 rte_fbk_hash_free(handle);
338 /* verify free correct */
339 if (NULL != rte_fbk_hash_find_existing(fbk_name))
345 #endif /* RTE_LIB_HASH */
349 lpm_clean(unsigned int lcore_id)
351 char lpm_name[MAX_STRING_SIZE];
355 lpm = rte_lpm_find_existing("fr_test_once");
359 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
360 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
362 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
368 lpm_create_free(__rte_unused void *arg)
370 unsigned lcore_self = rte_lcore_id();
372 struct rte_lpm_config config;
374 config.max_rules = 4;
375 config.number_tbl8s = 256;
377 char lpm_name[MAX_STRING_SIZE];
380 WAIT_SYNCHRO_FOR_WORKERS();
382 /* create the same lpm simultaneously on all threads */
383 for (i = 0; i < MAX_ITER_ONCE; i++) {
384 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
386 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
389 /* create multiple fbk tables simultaneously */
390 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
391 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
392 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
396 /* verify correct existing and then free all */
397 if (lpm != rte_lpm_find_existing(lpm_name))
402 /* verify free correct */
403 if (NULL != rte_lpm_find_existing(lpm_name))
409 #endif /* RTE_LIB_LPM */
415 char name[MAX_STRING_SIZE];
418 /* All test cases in the test suite */
419 struct test_case test_cases[] = {
420 { test_eal_init_once, NULL, NULL, "eal init once" },
421 { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
422 { mempool_create_lookup, NULL, mempool_clean,
423 "mempool create/lookup" },
425 { hash_create_free, NULL, hash_clean, "hash create/free" },
426 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
427 #endif /* RTE_LIB_HASH */
429 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
430 #endif /* RTE_LIB_LPM */
434 * launch test case in two separate thread
437 launch_test(struct test_case *pt_case)
439 unsigned int lcore_id;
444 if (pt_case->func == NULL)
447 __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
448 __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
450 cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
451 RTE_LCORE_FOREACH_WORKER(lcore_id) {
455 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
458 __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
460 if (pt_case->func(pt_case->arg) < 0)
463 RTE_LCORE_FOREACH_WORKER(lcore_id) {
464 if (rte_eal_wait_lcore(lcore_id) < 0)
468 RTE_LCORE_FOREACH(lcore_id) {
469 if (pt_case->clean != NULL)
470 pt_case->clean(lcore_id);
473 count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
475 printf("%s: common object allocated %d times (should be 1)\n",
476 pt_case->name, count);
484 * Main entry of func_reentrancy test
487 test_func_reentrancy(void)
490 struct test_case *pt_case = NULL;
492 if (rte_lcore_count() < 2) {
493 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
496 else if (rte_lcore_count() > MAX_LCORES)
497 printf("Too many lcores, some cores will be disabled\n");
499 for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
500 pt_case = &test_cases[case_id];
501 if (pt_case->func == NULL)
504 if (launch_test(pt_case) < 0) {
505 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
508 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
514 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);