4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_launch.h>
48 #include <rte_cycles.h>
50 #include <rte_per_lcore.h>
51 #include <rte_lcore.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
55 #include <rte_mempool.h>
56 #include <rte_spinlock.h>
57 #include <rte_malloc.h>
59 #ifdef RTE_LIBRTE_HASH
61 #include <rte_fbk_hash.h>
62 #include <rte_jhash.h>
63 #endif /* RTE_LIBRTE_HASH */
67 #endif /* RTE_LIBRTE_LPM */
69 #include <rte_string_fns.h>
73 typedef int (*case_func_t)(void* arg);
74 typedef void (*case_clean_t)(unsigned lcore_id);
76 #define MAX_STRING_SIZE (256)
77 #define MAX_ITER_TIMES (16)
78 #define MAX_LPM_ITER_TIMES (8)
80 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
81 #define MEMPOOL_SIZE (4)
83 #define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
85 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
86 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
88 #define WAIT_SYNCHRO_FOR_SLAVES() do{ \
89 if (lcore_self != rte_get_master_lcore()) \
90 while (rte_atomic32_read(&synchro) == 0); \
94 * rte_eal_init only init once
97 test_eal_init_once(__attribute__((unused)) void *arg)
99 unsigned lcore_self = rte_lcore_id();
101 WAIT_SYNCHRO_FOR_SLAVES();
103 rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
104 if (rte_eal_init(0, NULL) != -1)
111 * ring create/lookup reentrancy test
114 ring_create_lookup(__attribute__((unused)) void *arg)
116 unsigned lcore_self = rte_lcore_id();
117 struct rte_ring * rp;
118 char ring_name[MAX_STRING_SIZE];
121 WAIT_SYNCHRO_FOR_SLAVES();
123 /* create the same ring simultaneously on all threads */
124 for (i = 0; i < MAX_ITER_TIMES; i++) {
125 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
127 rte_atomic32_inc(&obj_count);
130 /* create/lookup new ring several times */
131 for (i = 0; i < MAX_ITER_TIMES; i++) {
132 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
133 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
136 if (rte_ring_lookup(ring_name) != rp)
140 /* verify all ring created successful */
141 for (i = 0; i < MAX_ITER_TIMES; i++) {
142 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
143 if (rte_ring_lookup(ring_name) == NULL)
151 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
152 void *obj, unsigned i)
154 uint32_t *objnum = obj;
155 memset(obj, 0, mp->elt_size);
160 mempool_create_lookup(__attribute__((unused)) void *arg)
162 unsigned lcore_self = rte_lcore_id();
163 struct rte_mempool * mp;
164 char mempool_name[MAX_STRING_SIZE];
167 WAIT_SYNCHRO_FOR_SLAVES();
169 /* create the same mempool simultaneously on all threads */
170 for (i = 0; i < MAX_ITER_TIMES; i++) {
171 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
172 MEMPOOL_ELT_SIZE, 0, 0,
177 rte_atomic32_inc(&obj_count);
180 /* create/lookup new ring several times */
181 for (i = 0; i < MAX_ITER_TIMES; i++) {
182 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
183 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
184 MEMPOOL_ELT_SIZE, 0, 0,
190 if (rte_mempool_lookup(mempool_name) != mp)
194 /* verify all ring created successful */
195 for (i = 0; i < MAX_ITER_TIMES; i++) {
196 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
197 if (rte_mempool_lookup(mempool_name) == NULL)
204 #ifdef RTE_LIBRTE_HASH
206 hash_clean(unsigned lcore_id)
208 char hash_name[MAX_STRING_SIZE];
209 struct rte_hash *handle;
212 for (i = 0; i < MAX_ITER_TIMES; i++) {
213 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
215 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
216 rte_hash_free(handle);
221 hash_create_free(__attribute__((unused)) void *arg)
223 unsigned lcore_self = rte_lcore_id();
224 struct rte_hash *handle;
225 char hash_name[MAX_STRING_SIZE];
227 struct rte_hash_parameters hash_params = {
231 .hash_func = (rte_hash_function)rte_jhash_32b,
232 .hash_func_init_val = 0,
236 WAIT_SYNCHRO_FOR_SLAVES();
238 /* create the same hash simultaneously on all threads */
239 hash_params.name = "fr_test_once";
240 for (i = 0; i < MAX_ITER_TIMES; i++) {
241 handle = rte_hash_create(&hash_params);
243 rte_atomic32_inc(&obj_count);
246 /* create mutiple times simultaneously */
247 for (i = 0; i < MAX_ITER_TIMES; i++) {
248 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
249 hash_params.name = hash_name;
251 handle = rte_hash_create(&hash_params);
255 /* verify correct existing and then free all */
256 if (handle != rte_hash_find_existing(hash_name))
259 rte_hash_free(handle);
262 /* verify free correct */
263 for (i = 0; i < MAX_ITER_TIMES; i++) {
264 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
266 if (NULL != rte_hash_find_existing(hash_name))
274 fbk_clean(unsigned lcore_id)
276 char fbk_name[MAX_STRING_SIZE];
277 struct rte_fbk_hash_table *handle;
280 for (i = 0; i < MAX_ITER_TIMES; i++) {
281 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
283 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
284 rte_fbk_hash_free(handle);
289 fbk_create_free(__attribute__((unused)) void *arg)
291 unsigned lcore_self = rte_lcore_id();
292 struct rte_fbk_hash_table *handle;
293 char fbk_name[MAX_STRING_SIZE];
295 struct rte_fbk_hash_params fbk_params = {
298 .entries_per_bucket = 4,
300 .hash_func = rte_jhash_1word,
301 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
304 WAIT_SYNCHRO_FOR_SLAVES();
306 /* create the same fbk hash table simultaneously on all threads */
307 fbk_params.name = "fr_test_once";
308 for (i = 0; i < MAX_ITER_TIMES; i++) {
309 handle = rte_fbk_hash_create(&fbk_params);
311 rte_atomic32_inc(&obj_count);
314 /* create mutiple fbk tables simultaneously */
315 for (i = 0; i < MAX_ITER_TIMES; i++) {
316 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
317 fbk_params.name = fbk_name;
319 handle = rte_fbk_hash_create(&fbk_params);
323 /* verify correct existing and then free all */
324 if (handle != rte_fbk_hash_find_existing(fbk_name))
327 rte_fbk_hash_free(handle);
330 /* verify free correct */
331 for (i = 0; i < MAX_ITER_TIMES; i++) {
332 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
334 if (NULL != rte_fbk_hash_find_existing(fbk_name))
340 #endif /* RTE_LIBRTE_HASH */
342 #ifdef RTE_LIBRTE_LPM
344 lpm_clean(unsigned lcore_id)
346 char lpm_name[MAX_STRING_SIZE];
350 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
351 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
353 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
359 lpm_create_free(__attribute__((unused)) void *arg)
361 unsigned lcore_self = rte_lcore_id();
363 struct rte_lpm_config config;
365 config.max_rules = 4;
366 config.number_tbl8s = 256;
368 char lpm_name[MAX_STRING_SIZE];
371 WAIT_SYNCHRO_FOR_SLAVES();
373 /* create the same lpm simultaneously on all threads */
374 for (i = 0; i < MAX_ITER_TIMES; i++) {
375 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
377 rte_atomic32_inc(&obj_count);
380 /* create mutiple fbk tables simultaneously */
381 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
382 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
383 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
387 /* verify correct existing and then free all */
388 if (lpm != rte_lpm_find_existing(lpm_name))
394 /* verify free correct */
395 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
396 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
397 if (NULL != rte_lpm_find_existing(lpm_name))
403 #endif /* RTE_LIBRTE_LPM */
409 char name[MAX_STRING_SIZE];
412 /* All test cases in the test suite */
413 struct test_case test_cases[] = {
414 { test_eal_init_once, NULL, NULL, "eal init once" },
415 { ring_create_lookup, NULL, NULL, "ring create/lookup" },
416 { mempool_create_lookup, NULL, NULL, "mempool create/lookup" },
417 #ifdef RTE_LIBRTE_HASH
418 { hash_create_free, NULL, hash_clean, "hash create/free" },
419 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
420 #endif /* RTE_LIBRTE_HASH */
421 #ifdef RTE_LIBRTE_LPM
422 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
423 #endif /* RTE_LIBRTE_LPM */
427 * launch test case in two separate thread
430 launch_test(struct test_case *pt_case)
434 unsigned cores_save = rte_lcore_count();
435 unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
438 if (pt_case->func == NULL)
441 rte_atomic32_set(&obj_count, 0);
442 rte_atomic32_set(&synchro, 0);
444 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
448 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
451 rte_atomic32_set(&synchro, 1);
453 if (pt_case->func(pt_case->arg) < 0)
457 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
461 if (rte_eal_wait_lcore(lcore_id) < 0)
464 if (pt_case->clean != NULL)
465 pt_case->clean(lcore_id);
468 count = rte_atomic32_read(&obj_count);
470 printf("%s: common object allocated %d times (should be 1)\n",
471 pt_case->name, count);
479 * Main entry of func_reentrancy test
482 test_func_reentrancy(void)
485 struct test_case *pt_case = NULL;
487 if (rte_lcore_count() <= 1) {
488 printf("Not enough lcore for testing\n");
491 else if (rte_lcore_count() > MAX_LCORES)
492 printf("Too many lcores, some cores will be disabled\n");
494 for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
495 pt_case = &test_cases[case_id];
496 if (pt_case->func == NULL)
499 if (launch_test(pt_case) < 0) {
500 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
503 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
509 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);