4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_debug.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_launch.h>
50 #include <rte_cycles.h>
51 #include <rte_tailq.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
58 #include <rte_mempool.h>
59 #include <rte_spinlock.h>
60 #include <rte_malloc.h>
62 #include <rte_fbk_hash.h>
63 #include <rte_jhash.h>
65 #include <rte_string_fns.h>
67 #include <cmdline_parse.h>
71 typedef int (*case_func_t)(void* arg);
72 typedef void (*case_clean_t)(unsigned lcore_id);
74 #define MAX_STRING_SIZE (256)
75 #define MAX_ITER_TIMES (16)
76 #define MAX_LPM_ITER_TIMES (8)
78 #define MEMPOOL_ELT_SIZE (0)
79 #define MEMPOOL_SIZE (4)
81 #define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
83 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
85 #define WAIT_SYNCHRO_FOR_SLAVES() do{ \
86 if (lcore_self != rte_get_master_lcore()) \
87 while (rte_atomic32_read(&synchro) == 0); \
91 * rte_eal_init only init once
94 test_eal_init_once(__attribute__((unused)) void *arg)
96 unsigned lcore_self = rte_lcore_id();
98 WAIT_SYNCHRO_FOR_SLAVES();
100 if (rte_eal_init(0, NULL) != -1)
107 * ring create/lookup reentrancy test
110 ring_create_lookup(__attribute__((unused)) void *arg)
112 unsigned lcore_self = rte_lcore_id();
113 struct rte_ring * rp;
114 char ring_name[MAX_STRING_SIZE];
117 WAIT_SYNCHRO_FOR_SLAVES();
119 /* create the same ring simultaneously on all threads */
120 for (i = 0; i < MAX_ITER_TIMES; i++) {
121 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
122 if ((NULL == rp) && (rte_ring_lookup("fr_test_once") == NULL))
126 /* create/lookup new ring several times */
127 for (i = 0; i < MAX_ITER_TIMES; i++) {
128 rte_snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
129 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
132 if (rte_ring_lookup(ring_name) != rp)
136 /* verify all ring created sucessful */
137 for (i = 0; i < MAX_ITER_TIMES; i++) {
138 rte_snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
139 if (rte_ring_lookup(ring_name) == NULL)
147 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
148 void *obj, unsigned i)
150 uint32_t *objnum = obj;
151 memset(obj, 0, mp->elt_size);
156 mempool_create_lookup(__attribute__((unused)) void *arg)
158 unsigned lcore_self = rte_lcore_id();
159 struct rte_mempool * mp;
160 char mempool_name[MAX_STRING_SIZE];
163 WAIT_SYNCHRO_FOR_SLAVES();
165 /* create the same mempool simultaneously on all threads */
166 for (i = 0; i < MAX_ITER_TIMES; i++) {
167 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
168 MEMPOOL_ELT_SIZE, 0, 0,
172 if ((NULL == mp) && (rte_mempool_lookup("fr_test_once") == NULL))
176 /* create/lookup new ring several times */
177 for (i = 0; i < MAX_ITER_TIMES; i++) {
178 rte_snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
179 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
180 MEMPOOL_ELT_SIZE, 0, 0,
186 if (rte_mempool_lookup(mempool_name) != mp)
190 /* verify all ring created sucessful */
191 for (i = 0; i < MAX_ITER_TIMES; i++) {
192 rte_snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
193 if (rte_mempool_lookup(mempool_name) == NULL)
201 hash_clean(unsigned lcore_id)
203 char hash_name[MAX_STRING_SIZE];
204 struct rte_hash *handle;
207 for (i = 0; i < MAX_ITER_TIMES; i++) {
208 rte_snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
210 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
211 rte_hash_free(handle);
216 hash_create_free(__attribute__((unused)) void *arg)
218 unsigned lcore_self = rte_lcore_id();
219 struct rte_hash *handle;
220 char hash_name[MAX_STRING_SIZE];
222 struct rte_hash_parameters hash_params = {
227 .hash_func = (rte_hash_function)rte_jhash2,
228 .hash_func_init_val = 0,
232 WAIT_SYNCHRO_FOR_SLAVES();
234 /* create the same hash simultaneously on all threads */
235 hash_params.name = "fr_test_once";
236 for (i = 0; i < MAX_ITER_TIMES; i++) {
237 handle = rte_hash_create(&hash_params);
238 if ((NULL == handle) && (rte_hash_find_existing("fr_test_once") == NULL))
242 /* create mutiple times simultaneously */
243 for (i = 0; i < MAX_ITER_TIMES; i++) {
244 rte_snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
245 hash_params.name = hash_name;
247 handle = rte_hash_create(&hash_params);
251 /* verify correct existing and then free all */
252 if (handle != rte_hash_find_existing(hash_name))
255 rte_hash_free(handle);
258 /* verify free correct */
259 for (i = 0; i < MAX_ITER_TIMES; i++) {
260 rte_snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
262 if (NULL != rte_hash_find_existing(hash_name))
270 fbk_clean(unsigned lcore_id)
272 char fbk_name[MAX_STRING_SIZE];
273 struct rte_fbk_hash_table *handle;
276 for (i = 0; i < MAX_ITER_TIMES; i++) {
277 rte_snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
279 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
280 rte_fbk_hash_free(handle);
285 fbk_create_free(__attribute__((unused)) void *arg)
287 unsigned lcore_self = rte_lcore_id();
288 struct rte_fbk_hash_table *handle;
289 char fbk_name[MAX_STRING_SIZE];
291 struct rte_fbk_hash_params fbk_params = {
294 .entries_per_bucket = 4,
296 .hash_func = rte_jhash_1word,
297 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
300 WAIT_SYNCHRO_FOR_SLAVES();
302 /* create the same fbk hash table simultaneously on all threads */
303 fbk_params.name = "fr_test_once";
304 for (i = 0; i < MAX_ITER_TIMES; i++) {
305 handle = rte_fbk_hash_create(&fbk_params);
306 if ((NULL == handle) && (rte_fbk_hash_find_existing("fr_test_once") == NULL))
310 /* create mutiple fbk tables simultaneously */
311 for (i = 0; i < MAX_ITER_TIMES; i++) {
312 rte_snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
313 fbk_params.name = fbk_name;
315 handle = rte_fbk_hash_create(&fbk_params);
319 /* verify correct existing and then free all */
320 if (handle != rte_fbk_hash_find_existing(fbk_name))
323 rte_fbk_hash_free(handle);
326 /* verify free correct */
327 for (i = 0; i < MAX_ITER_TIMES; i++) {
328 rte_snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
330 if (NULL != rte_fbk_hash_find_existing(fbk_name))
338 lpm_clean(unsigned lcore_id)
340 char lpm_name[MAX_STRING_SIZE];
344 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
345 rte_snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
347 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
353 lpm_create_free(__attribute__((unused)) void *arg)
355 unsigned lcore_self = rte_lcore_id();
357 char lpm_name[MAX_STRING_SIZE];
360 WAIT_SYNCHRO_FOR_SLAVES();
362 /* create the same lpm simultaneously on all threads */
363 for (i = 0; i < MAX_ITER_TIMES; i++) {
364 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, 4, RTE_LPM_HEAP);
365 if ((NULL == lpm) && (rte_lpm_find_existing("fr_test_once") == NULL))
369 /* create mutiple fbk tables simultaneously */
370 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
371 rte_snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
372 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, 4, RTE_LPM_HEAP);
376 /* verify correct existing and then free all */
377 if (lpm != rte_lpm_find_existing(lpm_name))
383 /* verify free correct */
384 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
385 rte_snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
386 if (NULL != rte_lpm_find_existing(lpm_name))
397 char name[MAX_STRING_SIZE];
400 /* All test cases in the test suite */
401 struct test_case test_cases[] = {
402 { test_eal_init_once, NULL, NULL, "eal init once" },
403 { ring_create_lookup, NULL, NULL, "ring create/lookup" },
404 { mempool_create_lookup, NULL, NULL, "mempool create/lookup" },
405 { hash_create_free, NULL, hash_clean, "hash create/free" },
406 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
407 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
411 * launch test case in two separate thread
414 launch_test(struct test_case *pt_case)
418 unsigned cores_save = rte_lcore_count();
419 unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
421 if (pt_case->func == NULL)
424 rte_atomic32_set(&synchro, 0);
426 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
430 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
433 rte_atomic32_set(&synchro, 1);
435 if (pt_case->func(pt_case->arg) < 0)
439 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
443 if (rte_eal_wait_lcore(lcore_id) < 0)
446 if (pt_case->clean != NULL)
447 pt_case->clean(lcore_id);
454 * Main entry of func_reentrancy test
457 test_func_reentrancy(void)
460 struct test_case *pt_case = NULL;
462 if (rte_lcore_count() <= 1) {
463 printf("Not enough lcore for testing\n");
466 else if (rte_lcore_count() > MAX_LCORES)
467 printf("Too many lcores, some cores will be disabled\n");
469 for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
470 pt_case = &test_cases[case_id];
471 if (pt_case->func == NULL)
474 if (launch_test(pt_case) < 0) {
475 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
478 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);