4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_debug.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_launch.h>
50 #include <rte_cycles.h>
51 #include <rte_tailq.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
58 #include <rte_mempool.h>
59 #include <rte_spinlock.h>
60 #include <rte_malloc.h>
62 #ifdef RTE_LIBRTE_HASH
64 #include <rte_fbk_hash.h>
65 #include <rte_jhash.h>
66 #endif /* RTE_LIBRTE_HASH */
70 #endif /* RTE_LIBRTE_LPM */
72 #include <rte_string_fns.h>
74 #include <cmdline_parse.h>
78 typedef int (*case_func_t)(void* arg);
79 typedef void (*case_clean_t)(unsigned lcore_id);
81 #define MAX_STRING_SIZE (256)
82 #define MAX_ITER_TIMES (16)
83 #define MAX_LPM_ITER_TIMES (8)
85 #define MEMPOOL_ELT_SIZE (0)
86 #define MEMPOOL_SIZE (4)
88 #define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
90 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
92 #define WAIT_SYNCHRO_FOR_SLAVES() do{ \
93 if (lcore_self != rte_get_master_lcore()) \
94 while (rte_atomic32_read(&synchro) == 0); \
98 * rte_eal_init only init once
101 test_eal_init_once(__attribute__((unused)) void *arg)
103 unsigned lcore_self = rte_lcore_id();
105 WAIT_SYNCHRO_FOR_SLAVES();
107 if (rte_eal_init(0, NULL) != -1)
114 * ring create/lookup reentrancy test
117 ring_create_lookup(__attribute__((unused)) void *arg)
119 unsigned lcore_self = rte_lcore_id();
120 struct rte_ring * rp;
121 char ring_name[MAX_STRING_SIZE];
124 WAIT_SYNCHRO_FOR_SLAVES();
126 /* create the same ring simultaneously on all threads */
127 for (i = 0; i < MAX_ITER_TIMES; i++) {
128 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
129 if ((NULL == rp) && (rte_ring_lookup("fr_test_once") == NULL))
133 /* create/lookup new ring several times */
134 for (i = 0; i < MAX_ITER_TIMES; i++) {
135 rte_snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
136 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
139 if (rte_ring_lookup(ring_name) != rp)
143 /* verify all ring created sucessful */
144 for (i = 0; i < MAX_ITER_TIMES; i++) {
145 rte_snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
146 if (rte_ring_lookup(ring_name) == NULL)
154 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
155 void *obj, unsigned i)
157 uint32_t *objnum = obj;
158 memset(obj, 0, mp->elt_size);
163 mempool_create_lookup(__attribute__((unused)) void *arg)
165 unsigned lcore_self = rte_lcore_id();
166 struct rte_mempool * mp;
167 char mempool_name[MAX_STRING_SIZE];
170 WAIT_SYNCHRO_FOR_SLAVES();
172 /* create the same mempool simultaneously on all threads */
173 for (i = 0; i < MAX_ITER_TIMES; i++) {
174 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
175 MEMPOOL_ELT_SIZE, 0, 0,
179 if ((NULL == mp) && (rte_mempool_lookup("fr_test_once") == NULL))
183 /* create/lookup new ring several times */
184 for (i = 0; i < MAX_ITER_TIMES; i++) {
185 rte_snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
186 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
187 MEMPOOL_ELT_SIZE, 0, 0,
193 if (rte_mempool_lookup(mempool_name) != mp)
197 /* verify all ring created sucessful */
198 for (i = 0; i < MAX_ITER_TIMES; i++) {
199 rte_snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
200 if (rte_mempool_lookup(mempool_name) == NULL)
207 #ifdef RTE_LIBRTE_HASH
209 hash_clean(unsigned lcore_id)
211 char hash_name[MAX_STRING_SIZE];
212 struct rte_hash *handle;
215 for (i = 0; i < MAX_ITER_TIMES; i++) {
216 rte_snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
218 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
219 rte_hash_free(handle);
224 hash_create_free(__attribute__((unused)) void *arg)
226 unsigned lcore_self = rte_lcore_id();
227 struct rte_hash *handle;
228 char hash_name[MAX_STRING_SIZE];
230 struct rte_hash_parameters hash_params = {
235 .hash_func = (rte_hash_function)rte_jhash2,
236 .hash_func_init_val = 0,
240 WAIT_SYNCHRO_FOR_SLAVES();
242 /* create the same hash simultaneously on all threads */
243 hash_params.name = "fr_test_once";
244 for (i = 0; i < MAX_ITER_TIMES; i++) {
245 handle = rte_hash_create(&hash_params);
246 if ((NULL == handle) && (rte_hash_find_existing("fr_test_once") == NULL))
250 /* create mutiple times simultaneously */
251 for (i = 0; i < MAX_ITER_TIMES; i++) {
252 rte_snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
253 hash_params.name = hash_name;
255 handle = rte_hash_create(&hash_params);
259 /* verify correct existing and then free all */
260 if (handle != rte_hash_find_existing(hash_name))
263 rte_hash_free(handle);
266 /* verify free correct */
267 for (i = 0; i < MAX_ITER_TIMES; i++) {
268 rte_snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
270 if (NULL != rte_hash_find_existing(hash_name))
278 fbk_clean(unsigned lcore_id)
280 char fbk_name[MAX_STRING_SIZE];
281 struct rte_fbk_hash_table *handle;
284 for (i = 0; i < MAX_ITER_TIMES; i++) {
285 rte_snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
287 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
288 rte_fbk_hash_free(handle);
293 fbk_create_free(__attribute__((unused)) void *arg)
295 unsigned lcore_self = rte_lcore_id();
296 struct rte_fbk_hash_table *handle;
297 char fbk_name[MAX_STRING_SIZE];
299 struct rte_fbk_hash_params fbk_params = {
302 .entries_per_bucket = 4,
304 .hash_func = rte_jhash_1word,
305 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
308 WAIT_SYNCHRO_FOR_SLAVES();
310 /* create the same fbk hash table simultaneously on all threads */
311 fbk_params.name = "fr_test_once";
312 for (i = 0; i < MAX_ITER_TIMES; i++) {
313 handle = rte_fbk_hash_create(&fbk_params);
314 if ((NULL == handle) && (rte_fbk_hash_find_existing("fr_test_once") == NULL))
318 /* create mutiple fbk tables simultaneously */
319 for (i = 0; i < MAX_ITER_TIMES; i++) {
320 rte_snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
321 fbk_params.name = fbk_name;
323 handle = rte_fbk_hash_create(&fbk_params);
327 /* verify correct existing and then free all */
328 if (handle != rte_fbk_hash_find_existing(fbk_name))
331 rte_fbk_hash_free(handle);
334 /* verify free correct */
335 for (i = 0; i < MAX_ITER_TIMES; i++) {
336 rte_snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
338 if (NULL != rte_fbk_hash_find_existing(fbk_name))
344 #endif /* RTE_LIBRTE_HASH */
346 #ifdef RTE_LIBRTE_LPM
348 lpm_clean(unsigned lcore_id)
350 char lpm_name[MAX_STRING_SIZE];
354 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
355 rte_snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
357 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
363 lpm_create_free(__attribute__((unused)) void *arg)
365 unsigned lcore_self = rte_lcore_id();
367 char lpm_name[MAX_STRING_SIZE];
370 WAIT_SYNCHRO_FOR_SLAVES();
372 /* create the same lpm simultaneously on all threads */
373 for (i = 0; i < MAX_ITER_TIMES; i++) {
374 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, 4, RTE_LPM_HEAP);
375 if ((NULL == lpm) && (rte_lpm_find_existing("fr_test_once") == NULL))
379 /* create mutiple fbk tables simultaneously */
380 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
381 rte_snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
382 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, 4, RTE_LPM_HEAP);
386 /* verify correct existing and then free all */
387 if (lpm != rte_lpm_find_existing(lpm_name))
393 /* verify free correct */
394 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
395 rte_snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
396 if (NULL != rte_lpm_find_existing(lpm_name))
402 #endif /* RTE_LIBRTE_LPM */
408 char name[MAX_STRING_SIZE];
411 /* All test cases in the test suite */
412 struct test_case test_cases[] = {
413 { test_eal_init_once, NULL, NULL, "eal init once" },
414 { ring_create_lookup, NULL, NULL, "ring create/lookup" },
415 { mempool_create_lookup, NULL, NULL, "mempool create/lookup" },
416 #ifdef RTE_LIBRTE_HASH
417 { hash_create_free, NULL, hash_clean, "hash create/free" },
418 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
419 #endif /* RTE_LIBRTE_HASH */
420 #ifdef RTE_LIBRTE_LPM
421 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
422 #endif /* RTE_LIBRTE_LPM */
426 * launch test case in two separate thread
429 launch_test(struct test_case *pt_case)
433 unsigned cores_save = rte_lcore_count();
434 unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
436 if (pt_case->func == NULL)
439 rte_atomic32_set(&synchro, 0);
441 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
445 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
448 rte_atomic32_set(&synchro, 1);
450 if (pt_case->func(pt_case->arg) < 0)
454 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
458 if (rte_eal_wait_lcore(lcore_id) < 0)
461 if (pt_case->clean != NULL)
462 pt_case->clean(lcore_id);
469 * Main entry of func_reentrancy test
472 test_func_reentrancy(void)
475 struct test_case *pt_case = NULL;
477 if (rte_lcore_count() <= 1) {
478 printf("Not enough lcore for testing\n");
481 else if (rte_lcore_count() > MAX_LCORES)
482 printf("Too many lcores, some cores will be disabled\n");
484 for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
485 pt_case = &test_cases[case_id];
486 if (pt_case->func == NULL)
489 if (launch_test(pt_case) < 0) {
490 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
493 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);