1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
30 #ifdef RTE_LIBRTE_HASH
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIBRTE_HASH */
38 #endif /* RTE_LIBRTE_LPM */
40 #include <rte_string_fns.h>
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
47 #define MAX_STRING_SIZE (256)
48 #define MAX_ITER_MULTI (16)
49 #define MAX_ITER_ONCE (4)
50 #define MAX_LPM_ITER_TIMES (6)
52 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
53 #define MEMPOOL_SIZE (4)
55 #define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
57 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
58 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
60 #define WAIT_SYNCHRO_FOR_SLAVES() do{ \
61 if (lcore_self != rte_get_master_lcore()) \
62 while (rte_atomic32_read(&synchro) == 0); \
66 * rte_eal_init only init once
69 test_eal_init_once(__rte_unused void *arg)
71 unsigned lcore_self = rte_lcore_id();
73 WAIT_SYNCHRO_FOR_SLAVES();
75 rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
76 if (rte_eal_init(0, NULL) != -1)
83 * ring create/lookup reentrancy test
86 ring_clean(unsigned int lcore_id)
89 char ring_name[MAX_STRING_SIZE];
92 for (i = 0; i < MAX_ITER_MULTI; i++) {
93 snprintf(ring_name, sizeof(ring_name),
94 "fr_test_%d_%d", lcore_id, i);
95 rp = rte_ring_lookup(ring_name);
102 ring_create_lookup(__rte_unused void *arg)
104 unsigned lcore_self = rte_lcore_id();
105 struct rte_ring * rp;
106 char ring_name[MAX_STRING_SIZE];
109 WAIT_SYNCHRO_FOR_SLAVES();
111 /* create the same ring simultaneously on all threads */
112 for (i = 0; i < MAX_ITER_ONCE; i++) {
113 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
115 rte_atomic32_inc(&obj_count);
118 /* create/lookup new ring several times */
119 for (i = 0; i < MAX_ITER_MULTI; i++) {
120 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
121 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
124 if (rte_ring_lookup(ring_name) != rp)
127 /* verify all ring created successful */
128 if (rte_ring_lookup(ring_name) == NULL)
136 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
137 void *obj, unsigned i)
139 uint32_t *objnum = obj;
140 memset(obj, 0, mp->elt_size);
145 mempool_clean(unsigned int lcore_id)
147 struct rte_mempool *mp;
148 char mempool_name[MAX_STRING_SIZE];
151 /* verify all ring created successful */
152 for (i = 0; i < MAX_ITER_MULTI; i++) {
153 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
155 mp = rte_mempool_lookup(mempool_name);
157 rte_mempool_free(mp);
162 mempool_create_lookup(__rte_unused void *arg)
164 unsigned lcore_self = rte_lcore_id();
165 struct rte_mempool * mp;
166 char mempool_name[MAX_STRING_SIZE];
169 WAIT_SYNCHRO_FOR_SLAVES();
171 /* create the same mempool simultaneously on all threads */
172 for (i = 0; i < MAX_ITER_ONCE; i++) {
173 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
174 MEMPOOL_ELT_SIZE, 0, 0,
179 rte_atomic32_inc(&obj_count);
182 /* create/lookup new ring several times */
183 for (i = 0; i < MAX_ITER_MULTI; i++) {
184 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
185 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
186 MEMPOOL_ELT_SIZE, 0, 0,
192 if (rte_mempool_lookup(mempool_name) != mp)
195 /* verify all ring created successful */
196 if (rte_mempool_lookup(mempool_name) == NULL)
203 #ifdef RTE_LIBRTE_HASH
205 hash_clean(unsigned lcore_id)
207 char hash_name[MAX_STRING_SIZE];
208 struct rte_hash *handle;
211 for (i = 0; i < MAX_ITER_MULTI; i++) {
212 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
214 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
215 rte_hash_free(handle);
220 hash_create_free(__rte_unused void *arg)
222 unsigned lcore_self = rte_lcore_id();
223 struct rte_hash *handle;
224 char hash_name[MAX_STRING_SIZE];
226 struct rte_hash_parameters hash_params = {
230 .hash_func = (rte_hash_function)rte_jhash_32b,
231 .hash_func_init_val = 0,
235 WAIT_SYNCHRO_FOR_SLAVES();
237 /* create the same hash simultaneously on all threads */
238 hash_params.name = "fr_test_once";
239 for (i = 0; i < MAX_ITER_ONCE; i++) {
240 handle = rte_hash_create(&hash_params);
242 rte_atomic32_inc(&obj_count);
245 /* create mutiple times simultaneously */
246 for (i = 0; i < MAX_ITER_MULTI; i++) {
247 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
248 hash_params.name = hash_name;
250 handle = rte_hash_create(&hash_params);
254 /* verify correct existing and then free all */
255 if (handle != rte_hash_find_existing(hash_name))
258 rte_hash_free(handle);
260 /* verify free correct */
261 if (NULL != rte_hash_find_existing(hash_name))
269 fbk_clean(unsigned lcore_id)
271 char fbk_name[MAX_STRING_SIZE];
272 struct rte_fbk_hash_table *handle;
275 for (i = 0; i < MAX_ITER_MULTI; i++) {
276 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
278 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
279 rte_fbk_hash_free(handle);
284 fbk_create_free(__rte_unused void *arg)
286 unsigned lcore_self = rte_lcore_id();
287 struct rte_fbk_hash_table *handle;
288 char fbk_name[MAX_STRING_SIZE];
290 struct rte_fbk_hash_params fbk_params = {
293 .entries_per_bucket = 4,
295 .hash_func = rte_jhash_1word,
296 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
299 WAIT_SYNCHRO_FOR_SLAVES();
301 /* create the same fbk hash table simultaneously on all threads */
302 fbk_params.name = "fr_test_once";
303 for (i = 0; i < MAX_ITER_ONCE; i++) {
304 handle = rte_fbk_hash_create(&fbk_params);
306 rte_atomic32_inc(&obj_count);
309 /* create mutiple fbk tables simultaneously */
310 for (i = 0; i < MAX_ITER_MULTI; i++) {
311 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
312 fbk_params.name = fbk_name;
314 handle = rte_fbk_hash_create(&fbk_params);
318 /* verify correct existing and then free all */
319 if (handle != rte_fbk_hash_find_existing(fbk_name))
322 rte_fbk_hash_free(handle);
324 /* verify free correct */
325 if (NULL != rte_fbk_hash_find_existing(fbk_name))
331 #endif /* RTE_LIBRTE_HASH */
333 #ifdef RTE_LIBRTE_LPM
335 lpm_clean(unsigned int lcore_id)
337 char lpm_name[MAX_STRING_SIZE];
341 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
342 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
344 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
350 lpm_create_free(__rte_unused void *arg)
352 unsigned lcore_self = rte_lcore_id();
354 struct rte_lpm_config config;
356 config.max_rules = 4;
357 config.number_tbl8s = 256;
359 char lpm_name[MAX_STRING_SIZE];
362 WAIT_SYNCHRO_FOR_SLAVES();
364 /* create the same lpm simultaneously on all threads */
365 for (i = 0; i < MAX_ITER_ONCE; i++) {
366 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
368 rte_atomic32_inc(&obj_count);
371 /* create mutiple fbk tables simultaneously */
372 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
373 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
374 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
378 /* verify correct existing and then free all */
379 if (lpm != rte_lpm_find_existing(lpm_name))
384 /* verify free correct */
385 if (NULL != rte_lpm_find_existing(lpm_name))
391 #endif /* RTE_LIBRTE_LPM */
397 char name[MAX_STRING_SIZE];
400 /* All test cases in the test suite */
401 struct test_case test_cases[] = {
402 { test_eal_init_once, NULL, NULL, "eal init once" },
403 { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
404 { mempool_create_lookup, NULL, mempool_clean,
405 "mempool create/lookup" },
406 #ifdef RTE_LIBRTE_HASH
407 { hash_create_free, NULL, hash_clean, "hash create/free" },
408 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
409 #endif /* RTE_LIBRTE_HASH */
410 #ifdef RTE_LIBRTE_LPM
411 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
412 #endif /* RTE_LIBRTE_LPM */
416 * launch test case in two separate thread
419 launch_test(struct test_case *pt_case)
423 unsigned cores_save = rte_lcore_count();
424 unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
427 if (pt_case->func == NULL)
430 rte_atomic32_set(&obj_count, 0);
431 rte_atomic32_set(&synchro, 0);
433 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
437 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
440 rte_atomic32_set(&synchro, 1);
442 if (pt_case->func(pt_case->arg) < 0)
446 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
450 if (rte_eal_wait_lcore(lcore_id) < 0)
453 if (pt_case->clean != NULL)
454 pt_case->clean(lcore_id);
457 count = rte_atomic32_read(&obj_count);
459 printf("%s: common object allocated %d times (should be 1)\n",
460 pt_case->name, count);
468 * Main entry of func_reentrancy test
471 test_func_reentrancy(void)
474 struct test_case *pt_case = NULL;
476 if (rte_lcore_count() < 2) {
477 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
480 else if (rte_lcore_count() > MAX_LCORES)
481 printf("Too many lcores, some cores will be disabled\n");
483 for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
484 pt_case = &test_cases[case_id];
485 if (pt_case->func == NULL)
488 if (launch_test(pt_case) < 0) {
489 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
492 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
498 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);