net/virtio: rationalize queue flushing
[dpdk.git] / test / test / test_func_reentrancy.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ring.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
29
30 #ifdef RTE_LIBRTE_HASH
31 #include <rte_hash.h>
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIBRTE_HASH */
35
36 #ifdef RTE_LIBRTE_LPM
37 #include <rte_lpm.h>
38 #endif /* RTE_LIBRTE_LPM */
39
40 #include <rte_string_fns.h>
41
42 #include "test.h"
43
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
46
47 #define MAX_STRING_SIZE                     (256)
48 #define MAX_ITER_TIMES                      (16)
49 #define MAX_LPM_ITER_TIMES                  (8)
50
51 #define MEMPOOL_ELT_SIZE                    (sizeof(uint32_t))
52 #define MEMPOOL_SIZE                        (4)
53
54 #define MAX_LCORES      RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
55
56 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
57 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
58
59 #define WAIT_SYNCHRO_FOR_SLAVES()   do{ \
60         if (lcore_self != rte_get_master_lcore())                  \
61                 while (rte_atomic32_read(&synchro) == 0);        \
62 } while(0)
63
64 /*
65  * rte_eal_init only init once
66  */
67 static int
68 test_eal_init_once(__attribute__((unused)) void *arg)
69 {
70         unsigned lcore_self =  rte_lcore_id();
71
72         WAIT_SYNCHRO_FOR_SLAVES();
73
74         rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
75         if (rte_eal_init(0, NULL) != -1)
76                 return -1;
77
78         return 0;
79 }
80
81 /*
82  * ring create/lookup reentrancy test
83  */
84 static int
85 ring_create_lookup(__attribute__((unused)) void *arg)
86 {
87         unsigned lcore_self = rte_lcore_id();
88         struct rte_ring * rp;
89         char ring_name[MAX_STRING_SIZE];
90         int i;
91
92         WAIT_SYNCHRO_FOR_SLAVES();
93
94         /* create the same ring simultaneously on all threads */
95         for (i = 0; i < MAX_ITER_TIMES; i++) {
96                 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
97                 if (rp != NULL)
98                         rte_atomic32_inc(&obj_count);
99         }
100
101         /* create/lookup new ring several times */
102         for (i = 0; i < MAX_ITER_TIMES; i++) {
103                 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
104                 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
105                 if (NULL == rp)
106                         return -1;
107                 if (rte_ring_lookup(ring_name) != rp)
108                         return -1;
109         }
110
111         /* verify all ring created successful */
112         for (i = 0; i < MAX_ITER_TIMES; i++) {
113                 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
114                 if (rte_ring_lookup(ring_name) == NULL)
115                         return -1;
116         }
117
118         return 0;
119 }
120
121 static void
122 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
123             void *obj, unsigned i)
124 {
125         uint32_t *objnum = obj;
126         memset(obj, 0, mp->elt_size);
127         *objnum = i;
128 }
129
130 static int
131 mempool_create_lookup(__attribute__((unused)) void *arg)
132 {
133         unsigned lcore_self = rte_lcore_id();
134         struct rte_mempool * mp;
135         char mempool_name[MAX_STRING_SIZE];
136         int i;
137
138         WAIT_SYNCHRO_FOR_SLAVES();
139
140         /* create the same mempool simultaneously on all threads */
141         for (i = 0; i < MAX_ITER_TIMES; i++) {
142                 mp = rte_mempool_create("fr_test_once",  MEMPOOL_SIZE,
143                                         MEMPOOL_ELT_SIZE, 0, 0,
144                                         NULL, NULL,
145                                         my_obj_init, NULL,
146                                         SOCKET_ID_ANY, 0);
147                 if (mp != NULL)
148                         rte_atomic32_inc(&obj_count);
149         }
150
151         /* create/lookup new ring several times */
152         for (i = 0; i < MAX_ITER_TIMES; i++) {
153                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
154                 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
155                                                 MEMPOOL_ELT_SIZE, 0, 0,
156                                                 NULL, NULL,
157                                                 my_obj_init, NULL,
158                                                 SOCKET_ID_ANY, 0);
159                 if (NULL == mp)
160                         return -1;
161                 if (rte_mempool_lookup(mempool_name) != mp)
162                         return -1;
163         }
164
165         /* verify all ring created successful */
166         for (i = 0; i < MAX_ITER_TIMES; i++) {
167                 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
168                 if (rte_mempool_lookup(mempool_name) == NULL)
169                         return -1;
170         }
171
172         return 0;
173 }
174
175 #ifdef RTE_LIBRTE_HASH
176 static void
177 hash_clean(unsigned lcore_id)
178 {
179         char hash_name[MAX_STRING_SIZE];
180         struct rte_hash *handle;
181         int i;
182
183         for (i = 0; i < MAX_ITER_TIMES; i++) {
184                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_id, i);
185
186                 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
187                         rte_hash_free(handle);
188         }
189 }
190
191 static int
192 hash_create_free(__attribute__((unused)) void *arg)
193 {
194         unsigned lcore_self = rte_lcore_id();
195         struct rte_hash *handle;
196         char hash_name[MAX_STRING_SIZE];
197         int i;
198         struct rte_hash_parameters hash_params = {
199                 .name = NULL,
200                 .entries = 16,
201                 .key_len = 4,
202                 .hash_func = (rte_hash_function)rte_jhash_32b,
203                 .hash_func_init_val = 0,
204                 .socket_id = 0,
205         };
206
207         WAIT_SYNCHRO_FOR_SLAVES();
208
209         /* create the same hash simultaneously on all threads */
210         hash_params.name = "fr_test_once";
211         for (i = 0; i < MAX_ITER_TIMES; i++) {
212                 handle = rte_hash_create(&hash_params);
213                 if (handle != NULL)
214                         rte_atomic32_inc(&obj_count);
215         }
216
217         /* create mutiple times simultaneously */
218         for (i = 0; i < MAX_ITER_TIMES; i++) {
219                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
220                 hash_params.name = hash_name;
221
222                 handle = rte_hash_create(&hash_params);
223                 if (NULL == handle)
224                         return -1;
225
226                 /* verify correct existing and then free all */
227                 if (handle != rte_hash_find_existing(hash_name))
228                         return -1;
229
230                 rte_hash_free(handle);
231         }
232
233         /* verify free correct */
234         for (i = 0; i < MAX_ITER_TIMES; i++) {
235                 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_self, i);
236
237                 if (NULL != rte_hash_find_existing(hash_name))
238                         return -1;
239         }
240
241         return 0;
242 }
243
244 static void
245 fbk_clean(unsigned lcore_id)
246 {
247         char fbk_name[MAX_STRING_SIZE];
248         struct rte_fbk_hash_table *handle;
249         int i;
250
251         for (i = 0; i < MAX_ITER_TIMES; i++) {
252                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_id, i);
253
254                 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
255                         rte_fbk_hash_free(handle);
256         }
257 }
258
259 static int
260 fbk_create_free(__attribute__((unused)) void *arg)
261 {
262         unsigned lcore_self = rte_lcore_id();
263         struct rte_fbk_hash_table *handle;
264         char fbk_name[MAX_STRING_SIZE];
265         int i;
266         struct rte_fbk_hash_params fbk_params = {
267                 .name = NULL,
268                 .entries = 4,
269                 .entries_per_bucket = 4,
270                 .socket_id = 0,
271                 .hash_func = rte_jhash_1word,
272                 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
273         };
274
275         WAIT_SYNCHRO_FOR_SLAVES();
276
277         /* create the same fbk hash table simultaneously on all threads */
278         fbk_params.name = "fr_test_once";
279         for (i = 0; i < MAX_ITER_TIMES; i++) {
280                 handle = rte_fbk_hash_create(&fbk_params);
281                 if (handle != NULL)
282                         rte_atomic32_inc(&obj_count);
283         }
284
285         /* create mutiple fbk tables simultaneously */
286         for (i = 0; i < MAX_ITER_TIMES; i++) {
287                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
288                 fbk_params.name = fbk_name;
289
290                 handle = rte_fbk_hash_create(&fbk_params);
291                 if (NULL == handle)
292                         return -1;
293
294                 /* verify correct existing and then free all */
295                 if (handle != rte_fbk_hash_find_existing(fbk_name))
296                         return -1;
297
298                 rte_fbk_hash_free(handle);
299         }
300
301         /* verify free correct */
302         for (i = 0; i < MAX_ITER_TIMES; i++) {
303                 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_self, i);
304
305                 if (NULL != rte_fbk_hash_find_existing(fbk_name))
306                         return -1;
307         }
308
309         return 0;
310 }
311 #endif /* RTE_LIBRTE_HASH */
312
313 #ifdef RTE_LIBRTE_LPM
314 static void
315 lpm_clean(unsigned lcore_id)
316 {
317         char lpm_name[MAX_STRING_SIZE];
318         struct rte_lpm *lpm;
319         int i;
320
321         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
322                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_id, i);
323
324                 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
325                         rte_lpm_free(lpm);
326         }
327 }
328
329 static int
330 lpm_create_free(__attribute__((unused)) void *arg)
331 {
332         unsigned lcore_self = rte_lcore_id();
333         struct rte_lpm *lpm;
334         struct rte_lpm_config config;
335
336         config.max_rules = 4;
337         config.number_tbl8s = 256;
338         config.flags = 0;
339         char lpm_name[MAX_STRING_SIZE];
340         int i;
341
342         WAIT_SYNCHRO_FOR_SLAVES();
343
344         /* create the same lpm simultaneously on all threads */
345         for (i = 0; i < MAX_ITER_TIMES; i++) {
346                 lpm = rte_lpm_create("fr_test_once",  SOCKET_ID_ANY, &config);
347                 if (lpm != NULL)
348                         rte_atomic32_inc(&obj_count);
349         }
350
351         /* create mutiple fbk tables simultaneously */
352         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
353                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
354                 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
355                 if (NULL == lpm)
356                         return -1;
357
358                 /* verify correct existing and then free all */
359                 if (lpm != rte_lpm_find_existing(lpm_name))
360                         return -1;
361
362                 rte_lpm_free(lpm);
363         }
364
365         /* verify free correct */
366         for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
367                 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_self, i);
368                 if (NULL != rte_lpm_find_existing(lpm_name))
369                         return -1;
370         }
371
372         return 0;
373 }
374 #endif /* RTE_LIBRTE_LPM */
375
376 struct test_case{
377         case_func_t    func;
378         void*          arg;
379         case_clean_t   clean;
380         char           name[MAX_STRING_SIZE];
381 };
382
383 /* All test cases in the test suite */
384 struct test_case test_cases[] = {
385         { test_eal_init_once,     NULL,  NULL,         "eal init once" },
386         { ring_create_lookup,     NULL,  NULL,         "ring create/lookup" },
387         { mempool_create_lookup,  NULL,  NULL,         "mempool create/lookup" },
388 #ifdef RTE_LIBRTE_HASH
389         { hash_create_free,       NULL,  hash_clean,   "hash create/free" },
390         { fbk_create_free,        NULL,  fbk_clean,    "fbk create/free" },
391 #endif /* RTE_LIBRTE_HASH */
392 #ifdef RTE_LIBRTE_LPM
393         { lpm_create_free,        NULL,  lpm_clean,    "lpm create/free" },
394 #endif /* RTE_LIBRTE_LPM */
395 };
396
397 /**
398  * launch test case in two separate thread
399  */
400 static int
401 launch_test(struct test_case *pt_case)
402 {
403         int ret = 0;
404         unsigned lcore_id;
405         unsigned cores_save = rte_lcore_count();
406         unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
407         unsigned count;
408
409         if (pt_case->func == NULL)
410                 return -1;
411
412         rte_atomic32_set(&obj_count, 0);
413         rte_atomic32_set(&synchro, 0);
414
415         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
416                 if (cores == 1)
417                         break;
418                 cores--;
419                 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
420         }
421
422         rte_atomic32_set(&synchro, 1);
423
424         if (pt_case->func(pt_case->arg) < 0)
425                 ret = -1;
426
427         cores = cores_save;
428         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
429                 if (cores == 1)
430                         break;
431                 cores--;
432                 if (rte_eal_wait_lcore(lcore_id) < 0)
433                         ret = -1;
434
435                 if (pt_case->clean != NULL)
436                         pt_case->clean(lcore_id);
437         }
438
439         count = rte_atomic32_read(&obj_count);
440         if (count != 1) {
441                 printf("%s: common object allocated %d times (should be 1)\n",
442                         pt_case->name, count);
443                 ret = -1;
444         }
445
446         return ret;
447 }
448
449 /**
450  * Main entry of func_reentrancy test
451  */
452 static int
453 test_func_reentrancy(void)
454 {
455         uint32_t case_id;
456         struct test_case *pt_case = NULL;
457
458         if (rte_lcore_count() <= 1) {
459                 printf("Not enough lcore for testing\n");
460                 return -1;
461         }
462         else if (rte_lcore_count() > MAX_LCORES)
463                 printf("Too many lcores, some cores will be disabled\n");
464
465         for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
466                 pt_case = &test_cases[case_id];
467                 if (pt_case->func == NULL)
468                         continue;
469
470                 if (launch_test(pt_case) < 0) {
471                         printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
472                         return -1;
473                 }
474                 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
475         }
476
477         return 0;
478 }
479
480 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);