net/af_xdp: use libxdp if available
[dpdk.git] / app / test / test_func_reentrancy.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_ring.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
28
29 #ifdef RTE_LIB_HASH
30 #include <rte_hash.h>
31 #include <rte_fbk_hash.h>
32 #include <rte_jhash.h>
33 #endif /* RTE_LIB_HASH */
34
35 #ifdef RTE_LIB_LPM
36 #include <rte_lpm.h>
37 #endif /* RTE_LIB_LPM */
38
39 #include <rte_string_fns.h>
40
41 #include "test.h"
42
43 typedef int (*case_func_t)(void* arg);
44 typedef void (*case_clean_t)(unsigned lcore_id);
45
46 #define MAX_STRING_SIZE                     (256)
47 #define MAX_ITER_MULTI                      (16)
48 #define MAX_ITER_ONCE                       (4)
49 #define MAX_LPM_ITER_TIMES                  (6)
50
51 #define MEMPOOL_ELT_SIZE                    (sizeof(uint32_t))
52 #define MEMPOOL_SIZE                        (4)
53
54 #define MAX_LCORES      (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
55
56 static uint32_t obj_count;
57 static uint32_t synchro;
58
59 #define WAIT_SYNCHRO_FOR_WORKERS()   do { \
60         if (lcore_self != rte_get_main_lcore())                  \
61                 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
62 } while(0)
63
64 /*
65  * rte_eal_init only init once
66  */
67 static int
68 test_eal_init_once(__rte_unused void *arg)
69 {
70         unsigned lcore_self =  rte_lcore_id();
71
72         WAIT_SYNCHRO_FOR_WORKERS();
73
74         __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
75         if (rte_eal_init(0, NULL) != -1)
76                 return -1;
77
78         return 0;
79 }
80
81 /*
82  * ring create/lookup reentrancy test
83  */
84 static void
85 ring_clean(unsigned int lcore_id)
86 {
87         struct rte_ring *rp;
88         char ring_name[MAX_STRING_SIZE];
89         int i;
90
91         rp = rte_ring_lookup("fr_test_once");
92         if (rp != NULL)
93                 rte_ring_free(rp);
94
95         for (i = 0; i < MAX_ITER_MULTI; i++) {
96                 snprintf(ring_name, sizeof(ring_name),
97                                 "fr_test_%d_%d", lcore_id, i);
98                 rp = rte_ring_lookup(ring_name);
99                 if (rp != NULL)
100                         rte_ring_free(rp);
101         }
102 }
103
104 static int
105 ring_create_lookup(__rte_unused void *arg)
106 {
107         unsigned lcore_self = rte_lcore_id();
108         struct rte_ring * rp;
109         char ring_name[MAX_STRING_SIZE];
110         int i;
111
112         WAIT_SYNCHRO_FOR_WORKERS();
113
114         /* create the same ring simultaneously on all threads */
115         for (i = 0; i < MAX_ITER_ONCE; i++) {
116                 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
117                 if (rp != NULL)
118                         __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
119         }
120
121         /* create/lookup new ring several times */
122         for (i = 0; i < MAX_ITER_MULTI; i++) {
123                 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
124                 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
125                 if (NULL == rp)
126                         return -1;
127                 if (rte_ring_lookup(ring_name) != rp)
128                         return -1;
129
130                 /* verify all ring created successful */
131                 if (rte_ring_lookup(ring_name) == NULL)
132                         return -1;
133         }
134
135         return 0;
136 }
137
138 static void
139 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
140             void *obj, unsigned i)
141 {
142         uint32_t *objnum = obj;
143         memset(obj, 0, mp->elt_size);
144         *objnum = i;
145 }
146
147 static void
148 mempool_clean(unsigned int lcore_id)
149 {
150         struct rte_mempool *mp;
151         char mempool_name[MAX_STRING_SIZE];
152         int i;
153
154         mp = rte_mempool_lookup("fr_test_once");
155         if (mp != NULL)
156                 rte_mempool_free(mp);
157
158         for (i = 0; i < MAX_ITER_MULTI; i++) {
159                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
160                          lcore_id, i);
161                 mp = rte_mempool_lookup(mempool_name);
162                 if (mp != NULL)
163                         rte_mempool_free(mp);
164         }
165 }
166
167 static int
168 mempool_create_lookup(__rte_unused void *arg)
169 {
170         unsigned lcore_self = rte_lcore_id();
171         struct rte_mempool * mp;
172         char mempool_name[MAX_STRING_SIZE];
173         int i;
174
175         WAIT_SYNCHRO_FOR_WORKERS();
176
177         /* create the same mempool simultaneously on all threads */
178         for (i = 0; i < MAX_ITER_ONCE; i++) {
179                 mp = rte_mempool_create("fr_test_once",  MEMPOOL_SIZE,
180                                         MEMPOOL_ELT_SIZE, 0, 0,
181                                         NULL, NULL,
182                                         my_obj_init, NULL,
183                                         SOCKET_ID_ANY, 0);
184                 if (mp != NULL)
185                         __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
186         }
187
188         /* create/lookup new ring several times */
189         for (i = 0; i < MAX_ITER_MULTI; i++) {
190                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
191                 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
192                                                 MEMPOOL_ELT_SIZE, 0, 0,
193                                                 NULL, NULL,
194                                                 my_obj_init, NULL,
195                                                 SOCKET_ID_ANY, 0);
196                 if (NULL == mp)
197                         return -1;
198                 if (rte_mempool_lookup(mempool_name) != mp)
199                         return -1;
200
201                 /* verify all ring created successful */
202                 if (rte_mempool_lookup(mempool_name) == NULL)
203                         return -1;
204         }
205
206         return 0;
207 }
208
209 #ifdef RTE_LIB_HASH
210 static void
211 hash_clean(unsigned lcore_id)
212 {
213         char hash_name[MAX_STRING_SIZE];
214         struct rte_hash *handle;
215         int i;
216
217         handle = rte_hash_find_existing("fr_test_once");
218         if (handle != NULL)
219                 rte_hash_free(handle);
220
221         for (i = 0; i < MAX_ITER_MULTI; i++) {
222                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_id, i);
223
224                 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
225                         rte_hash_free(handle);
226         }
227 }
228
229 static int
230 hash_create_free(__rte_unused void *arg)
231 {
232         unsigned lcore_self = rte_lcore_id();
233         struct rte_hash *handle;
234         char hash_name[MAX_STRING_SIZE];
235         int i;
236         struct rte_hash_parameters hash_params = {
237                 .name = NULL,
238                 .entries = 16,
239                 .key_len = 4,
240                 .hash_func = (rte_hash_function)rte_jhash_32b,
241                 .hash_func_init_val = 0,
242                 .socket_id = 0,
243         };
244
245         WAIT_SYNCHRO_FOR_WORKERS();
246
247         /* create the same hash simultaneously on all threads */
248         hash_params.name = "fr_test_once";
249         for (i = 0; i < MAX_ITER_ONCE; i++) {
250                 handle = rte_hash_create(&hash_params);
251                 if (handle != NULL)
252                         __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
253         }
254
255         /* create multiple times simultaneously */
256         for (i = 0; i < MAX_ITER_MULTI; i++) {
257                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
258                 hash_params.name = hash_name;
259
260                 handle = rte_hash_create(&hash_params);
261                 if (NULL == handle)
262                         return -1;
263
264                 /* verify correct existing and then free all */
265                 if (handle != rte_hash_find_existing(hash_name))
266                         return -1;
267
268                 rte_hash_free(handle);
269
270                 /* verify free correct */
271                 if (NULL != rte_hash_find_existing(hash_name))
272                         return -1;
273         }
274
275         return 0;
276 }
277
278 static void
279 fbk_clean(unsigned lcore_id)
280 {
281         char fbk_name[MAX_STRING_SIZE];
282         struct rte_fbk_hash_table *handle;
283         int i;
284
285         handle = rte_fbk_hash_find_existing("fr_test_once");
286         if (handle != NULL)
287                 rte_fbk_hash_free(handle);
288
289         for (i = 0; i < MAX_ITER_MULTI; i++) {
290                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_id, i);
291
292                 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
293                         rte_fbk_hash_free(handle);
294         }
295 }
296
297 static int
298 fbk_create_free(__rte_unused void *arg)
299 {
300         unsigned lcore_self = rte_lcore_id();
301         struct rte_fbk_hash_table *handle;
302         char fbk_name[MAX_STRING_SIZE];
303         int i;
304         struct rte_fbk_hash_params fbk_params = {
305                 .name = NULL,
306                 .entries = 4,
307                 .entries_per_bucket = 4,
308                 .socket_id = 0,
309                 .hash_func = rte_jhash_1word,
310                 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
311         };
312
313         WAIT_SYNCHRO_FOR_WORKERS();
314
315         /* create the same fbk hash table simultaneously on all threads */
316         fbk_params.name = "fr_test_once";
317         for (i = 0; i < MAX_ITER_ONCE; i++) {
318                 handle = rte_fbk_hash_create(&fbk_params);
319                 if (handle != NULL)
320                         __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
321         }
322
323         /* create multiple fbk tables simultaneously */
324         for (i = 0; i < MAX_ITER_MULTI; i++) {
325                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
326                 fbk_params.name = fbk_name;
327
328                 handle = rte_fbk_hash_create(&fbk_params);
329                 if (NULL == handle)
330                         return -1;
331
332                 /* verify correct existing and then free all */
333                 if (handle != rte_fbk_hash_find_existing(fbk_name))
334                         return -1;
335
336                 rte_fbk_hash_free(handle);
337
338                 /* verify free correct */
339                 if (NULL != rte_fbk_hash_find_existing(fbk_name))
340                         return -1;
341         }
342
343         return 0;
344 }
345 #endif /* RTE_LIB_HASH */
346
347 #ifdef RTE_LIB_LPM
348 static void
349 lpm_clean(unsigned int lcore_id)
350 {
351         char lpm_name[MAX_STRING_SIZE];
352         struct rte_lpm *lpm;
353         int i;
354
355         lpm = rte_lpm_find_existing("fr_test_once");
356         if (lpm != NULL)
357                 rte_lpm_free(lpm);
358
359         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
360                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_id, i);
361
362                 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
363                         rte_lpm_free(lpm);
364         }
365 }
366
367 static int
368 lpm_create_free(__rte_unused void *arg)
369 {
370         unsigned lcore_self = rte_lcore_id();
371         struct rte_lpm *lpm;
372         struct rte_lpm_config config;
373
374         config.max_rules = 4;
375         config.number_tbl8s = 256;
376         config.flags = 0;
377         char lpm_name[MAX_STRING_SIZE];
378         int i;
379
380         WAIT_SYNCHRO_FOR_WORKERS();
381
382         /* create the same lpm simultaneously on all threads */
383         for (i = 0; i < MAX_ITER_ONCE; i++) {
384                 lpm = rte_lpm_create("fr_test_once",  SOCKET_ID_ANY, &config);
385                 if (lpm != NULL)
386                         __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
387         }
388
389         /* create multiple fbk tables simultaneously */
390         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
391                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
392                 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
393                 if (NULL == lpm)
394                         return -1;
395
396                 /* verify correct existing and then free all */
397                 if (lpm != rte_lpm_find_existing(lpm_name))
398                         return -1;
399
400                 rte_lpm_free(lpm);
401
402                 /* verify free correct */
403                 if (NULL != rte_lpm_find_existing(lpm_name))
404                         return -1;
405         }
406
407         return 0;
408 }
409 #endif /* RTE_LIB_LPM */
410
411 struct test_case{
412         case_func_t    func;
413         void*          arg;
414         case_clean_t   clean;
415         char           name[MAX_STRING_SIZE];
416 };
417
418 /* All test cases in the test suite */
419 struct test_case test_cases[] = {
420         { test_eal_init_once,     NULL,  NULL,         "eal init once" },
421         { ring_create_lookup,     NULL,  ring_clean,   "ring create/lookup" },
422         { mempool_create_lookup,  NULL,  mempool_clean,
423                         "mempool create/lookup" },
424 #ifdef RTE_LIB_HASH
425         { hash_create_free,       NULL,  hash_clean,   "hash create/free" },
426         { fbk_create_free,        NULL,  fbk_clean,    "fbk create/free" },
427 #endif /* RTE_LIB_HASH */
428 #ifdef RTE_LIB_LPM
429         { lpm_create_free,        NULL,  lpm_clean,    "lpm create/free" },
430 #endif /* RTE_LIB_LPM */
431 };
432
433 /**
434  * launch test case in two separate thread
435  */
436 static int
437 launch_test(struct test_case *pt_case)
438 {
439         unsigned int lcore_id;
440         unsigned int cores;
441         unsigned int count;
442         int ret = 0;
443
444         if (pt_case->func == NULL)
445                 return -1;
446
447         __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
448         __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
449
450         cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
451         RTE_LCORE_FOREACH_WORKER(lcore_id) {
452                 if (cores == 1)
453                         break;
454                 cores--;
455                 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
456         }
457
458         __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
459
460         if (pt_case->func(pt_case->arg) < 0)
461                 ret = -1;
462
463         RTE_LCORE_FOREACH_WORKER(lcore_id) {
464                 if (rte_eal_wait_lcore(lcore_id) < 0)
465                         ret = -1;
466         }
467
468         RTE_LCORE_FOREACH(lcore_id) {
469                 if (pt_case->clean != NULL)
470                         pt_case->clean(lcore_id);
471         }
472
473         count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
474         if (count != 1) {
475                 printf("%s: common object allocated %d times (should be 1)\n",
476                         pt_case->name, count);
477                 ret = -1;
478         }
479
480         return ret;
481 }
482
483 /**
484  * Main entry of func_reentrancy test
485  */
486 static int
487 test_func_reentrancy(void)
488 {
489         uint32_t case_id;
490         struct test_case *pt_case = NULL;
491
492         if (RTE_EXEC_ENV_IS_WINDOWS)
493                 return TEST_SKIPPED;
494
495         if (rte_lcore_count() < 2) {
496                 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
497                 return TEST_SKIPPED;
498         }
499         else if (rte_lcore_count() > MAX_LCORES)
500                 printf("Too many lcores, some cores will be disabled\n");
501
502         for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
503                 pt_case = &test_cases[case_id];
504                 if (pt_case->func == NULL)
505                         continue;
506
507                 if (launch_test(pt_case) < 0) {
508                         printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
509                         return -1;
510                 }
511                 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
512         }
513
514         return 0;
515 }
516
517 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);