1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
30 #ifdef RTE_LIBRTE_HASH
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIBRTE_HASH */
38 #endif /* RTE_LIBRTE_LPM */
40 #include <rte_string_fns.h>
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
47 #define MAX_STRING_SIZE (256)
48 #define MAX_ITER_TIMES (16)
49 #define MAX_LPM_ITER_TIMES (8)
51 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
52 #define MEMPOOL_SIZE (4)
54 #define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
56 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
57 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
59 #define WAIT_SYNCHRO_FOR_SLAVES() do{ \
60 if (lcore_self != rte_get_master_lcore()) \
61 while (rte_atomic32_read(&synchro) == 0); \
65 * rte_eal_init only init once
68 test_eal_init_once(__attribute__((unused)) void *arg)
70 unsigned lcore_self = rte_lcore_id();
72 WAIT_SYNCHRO_FOR_SLAVES();
74 rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
75 if (rte_eal_init(0, NULL) != -1)
82 * ring create/lookup reentrancy test
85 ring_create_lookup(__attribute__((unused)) void *arg)
87 unsigned lcore_self = rte_lcore_id();
89 char ring_name[MAX_STRING_SIZE];
92 WAIT_SYNCHRO_FOR_SLAVES();
94 /* create the same ring simultaneously on all threads */
95 for (i = 0; i < MAX_ITER_TIMES; i++) {
96 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
98 rte_atomic32_inc(&obj_count);
101 /* create/lookup new ring several times */
102 for (i = 0; i < MAX_ITER_TIMES; i++) {
103 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
104 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
107 if (rte_ring_lookup(ring_name) != rp)
111 /* verify all ring created successful */
112 for (i = 0; i < MAX_ITER_TIMES; i++) {
113 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
114 if (rte_ring_lookup(ring_name) == NULL)
122 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
123 void *obj, unsigned i)
125 uint32_t *objnum = obj;
126 memset(obj, 0, mp->elt_size);
131 mempool_create_lookup(__attribute__((unused)) void *arg)
133 unsigned lcore_self = rte_lcore_id();
134 struct rte_mempool * mp;
135 char mempool_name[MAX_STRING_SIZE];
138 WAIT_SYNCHRO_FOR_SLAVES();
140 /* create the same mempool simultaneously on all threads */
141 for (i = 0; i < MAX_ITER_TIMES; i++) {
142 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
143 MEMPOOL_ELT_SIZE, 0, 0,
148 rte_atomic32_inc(&obj_count);
151 /* create/lookup new ring several times */
152 for (i = 0; i < MAX_ITER_TIMES; i++) {
153 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
154 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
155 MEMPOOL_ELT_SIZE, 0, 0,
161 if (rte_mempool_lookup(mempool_name) != mp)
165 /* verify all ring created successful */
166 for (i = 0; i < MAX_ITER_TIMES; i++) {
167 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
168 if (rte_mempool_lookup(mempool_name) == NULL)
175 #ifdef RTE_LIBRTE_HASH
177 hash_clean(unsigned lcore_id)
179 char hash_name[MAX_STRING_SIZE];
180 struct rte_hash *handle;
183 for (i = 0; i < MAX_ITER_TIMES; i++) {
184 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
186 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
187 rte_hash_free(handle);
192 hash_create_free(__attribute__((unused)) void *arg)
194 unsigned lcore_self = rte_lcore_id();
195 struct rte_hash *handle;
196 char hash_name[MAX_STRING_SIZE];
198 struct rte_hash_parameters hash_params = {
202 .hash_func = (rte_hash_function)rte_jhash_32b,
203 .hash_func_init_val = 0,
207 WAIT_SYNCHRO_FOR_SLAVES();
209 /* create the same hash simultaneously on all threads */
210 hash_params.name = "fr_test_once";
211 for (i = 0; i < MAX_ITER_TIMES; i++) {
212 handle = rte_hash_create(&hash_params);
214 rte_atomic32_inc(&obj_count);
217 /* create mutiple times simultaneously */
218 for (i = 0; i < MAX_ITER_TIMES; i++) {
219 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
220 hash_params.name = hash_name;
222 handle = rte_hash_create(&hash_params);
226 /* verify correct existing and then free all */
227 if (handle != rte_hash_find_existing(hash_name))
230 rte_hash_free(handle);
233 /* verify free correct */
234 for (i = 0; i < MAX_ITER_TIMES; i++) {
235 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
237 if (NULL != rte_hash_find_existing(hash_name))
245 fbk_clean(unsigned lcore_id)
247 char fbk_name[MAX_STRING_SIZE];
248 struct rte_fbk_hash_table *handle;
251 for (i = 0; i < MAX_ITER_TIMES; i++) {
252 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
254 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
255 rte_fbk_hash_free(handle);
260 fbk_create_free(__attribute__((unused)) void *arg)
262 unsigned lcore_self = rte_lcore_id();
263 struct rte_fbk_hash_table *handle;
264 char fbk_name[MAX_STRING_SIZE];
266 struct rte_fbk_hash_params fbk_params = {
269 .entries_per_bucket = 4,
271 .hash_func = rte_jhash_1word,
272 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
275 WAIT_SYNCHRO_FOR_SLAVES();
277 /* create the same fbk hash table simultaneously on all threads */
278 fbk_params.name = "fr_test_once";
279 for (i = 0; i < MAX_ITER_TIMES; i++) {
280 handle = rte_fbk_hash_create(&fbk_params);
282 rte_atomic32_inc(&obj_count);
285 /* create mutiple fbk tables simultaneously */
286 for (i = 0; i < MAX_ITER_TIMES; i++) {
287 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
288 fbk_params.name = fbk_name;
290 handle = rte_fbk_hash_create(&fbk_params);
294 /* verify correct existing and then free all */
295 if (handle != rte_fbk_hash_find_existing(fbk_name))
298 rte_fbk_hash_free(handle);
301 /* verify free correct */
302 for (i = 0; i < MAX_ITER_TIMES; i++) {
303 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
305 if (NULL != rte_fbk_hash_find_existing(fbk_name))
311 #endif /* RTE_LIBRTE_HASH */
313 #ifdef RTE_LIBRTE_LPM
315 lpm_clean(unsigned lcore_id)
317 char lpm_name[MAX_STRING_SIZE];
321 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
322 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
324 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
330 lpm_create_free(__attribute__((unused)) void *arg)
332 unsigned lcore_self = rte_lcore_id();
334 struct rte_lpm_config config;
336 config.max_rules = 4;
337 config.number_tbl8s = 256;
339 char lpm_name[MAX_STRING_SIZE];
342 WAIT_SYNCHRO_FOR_SLAVES();
344 /* create the same lpm simultaneously on all threads */
345 for (i = 0; i < MAX_ITER_TIMES; i++) {
346 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
348 rte_atomic32_inc(&obj_count);
351 /* create mutiple fbk tables simultaneously */
352 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
353 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
354 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
358 /* verify correct existing and then free all */
359 if (lpm != rte_lpm_find_existing(lpm_name))
365 /* verify free correct */
366 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
367 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
368 if (NULL != rte_lpm_find_existing(lpm_name))
374 #endif /* RTE_LIBRTE_LPM */
380 char name[MAX_STRING_SIZE];
383 /* All test cases in the test suite */
384 struct test_case test_cases[] = {
385 { test_eal_init_once, NULL, NULL, "eal init once" },
386 { ring_create_lookup, NULL, NULL, "ring create/lookup" },
387 { mempool_create_lookup, NULL, NULL, "mempool create/lookup" },
388 #ifdef RTE_LIBRTE_HASH
389 { hash_create_free, NULL, hash_clean, "hash create/free" },
390 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
391 #endif /* RTE_LIBRTE_HASH */
392 #ifdef RTE_LIBRTE_LPM
393 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
394 #endif /* RTE_LIBRTE_LPM */
398 * launch test case in two separate thread
401 launch_test(struct test_case *pt_case)
405 unsigned cores_save = rte_lcore_count();
406 unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
409 if (pt_case->func == NULL)
412 rte_atomic32_set(&obj_count, 0);
413 rte_atomic32_set(&synchro, 0);
415 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
419 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
422 rte_atomic32_set(&synchro, 1);
424 if (pt_case->func(pt_case->arg) < 0)
428 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
432 if (rte_eal_wait_lcore(lcore_id) < 0)
435 if (pt_case->clean != NULL)
436 pt_case->clean(lcore_id);
439 count = rte_atomic32_read(&obj_count);
441 printf("%s: common object allocated %d times (should be 1)\n",
442 pt_case->name, count);
450 * Main entry of func_reentrancy test
453 test_func_reentrancy(void)
456 struct test_case *pt_case = NULL;
458 if (rte_lcore_count() <= 1) {
459 printf("Not enough lcore for testing\n");
462 else if (rte_lcore_count() > MAX_LCORES)
463 printf("Too many lcores, some cores will be disabled\n");
465 for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
466 pt_case = &test_cases[case_id];
467 if (pt_case->func == NULL)
470 if (launch_test(pt_case) < 0) {
471 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
474 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
480 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);