test/lpm: report errors in RCU perf tests
[dpdk.git] / app / test / test_lpm_perf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2020 Arm Limited
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <stdlib.h>
9 #include <math.h>
10
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_malloc.h>
15 #include <rte_ip.h>
16 #include <rte_lpm.h>
17
18 #include "test.h"
19 #include "test_xmmt_ops.h"
20
21 struct rte_lpm *lpm;
22 static struct rte_rcu_qsbr *rv;
23 static volatile uint8_t writer_done;
24 static volatile uint32_t thr_id;
25 static uint64_t gwrite_cycles;
26 /* LPM APIs are not thread safe, use mutex to provide thread safety */
27 static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
28
29 /* Report quiescent state interval every 1024 lookups. Larger critical
30  * sections in reader will result in writer polling multiple times.
31  */
32 #define QSBR_REPORTING_INTERVAL 1024
33
34 #define TEST_LPM_ASSERT(cond) do {                                            \
35         if (!(cond)) {                                                        \
36                 printf("Error at line %d: \n", __LINE__);                     \
37                 return -1;                                                    \
38         }                                                                     \
39 } while(0)
40
41 #define ITERATIONS (1 << 10)
42 #define RCU_ITERATIONS 10
43 #define BATCH_SIZE (1 << 12)
44 #define BULK_SIZE 32
45
46 #define MAX_RULE_NUM (1200000)
47
48 struct route_rule {
49         uint32_t ip;
50         uint8_t depth;
51 };
52
53 static struct route_rule large_route_table[MAX_RULE_NUM];
54 /* Route table for routes with depth > 24 */
55 struct route_rule large_ldepth_route_table[MAX_RULE_NUM];
56
57 static uint32_t num_route_entries;
58 static uint32_t num_ldepth_route_entries;
59 #define NUM_ROUTE_ENTRIES num_route_entries
60 #define NUM_LDEPTH_ROUTE_ENTRIES num_ldepth_route_entries
61
62 #define TOTAL_WRITES (RCU_ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES)
63
64 enum {
65         IP_CLASS_A,
66         IP_CLASS_B,
67         IP_CLASS_C
68 };
69
70 /* struct route_rule_count defines the total number of rules in following a/b/c
71  * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
72  * including the ones for private local network.
73  */
74 struct route_rule_count {
75         uint32_t a[RTE_LPM_MAX_DEPTH];
76         uint32_t b[RTE_LPM_MAX_DEPTH];
77         uint32_t c[RTE_LPM_MAX_DEPTH];
78 };
79
80 /* All following numbers of each depth of each common IP class are just
81  * got from previous large constant table in app/test/test_lpm_routes.h .
82  * In order to match similar performance, they keep same depth and IP
83  * address coverage as previous constant table. These numbers don't
84  * include any private local IP address. As previous large const rule
85  * table was just dumped from a real router, there are no any IP address
86  * in class C or D.
87  */
88 static struct route_rule_count rule_count = {
89         .a = { /* IP class A in which the most significant bit is 0 */
90                     0, /* depth =  1 */
91                     0, /* depth =  2 */
92                     1, /* depth =  3 */
93                     0, /* depth =  4 */
94                     2, /* depth =  5 */
95                     1, /* depth =  6 */
96                     3, /* depth =  7 */
97                   185, /* depth =  8 */
98                    26, /* depth =  9 */
99                    16, /* depth = 10 */
100                    39, /* depth = 11 */
101                   144, /* depth = 12 */
102                   233, /* depth = 13 */
103                   528, /* depth = 14 */
104                   866, /* depth = 15 */
105                  3856, /* depth = 16 */
106                  3268, /* depth = 17 */
107                  5662, /* depth = 18 */
108                 17301, /* depth = 19 */
109                 22226, /* depth = 20 */
110                 11147, /* depth = 21 */
111                 16746, /* depth = 22 */
112                 17120, /* depth = 23 */
113                 77578, /* depth = 24 */
114                   401, /* depth = 25 */
115                   656, /* depth = 26 */
116                  1107, /* depth = 27 */
117                  1121, /* depth = 28 */
118                  2316, /* depth = 29 */
119                   717, /* depth = 30 */
120                    10, /* depth = 31 */
121                    66  /* depth = 32 */
122         },
123         .b = { /* IP class A in which the most 2 significant bits are 10 */
124                     0, /* depth =  1 */
125                     0, /* depth =  2 */
126                     0, /* depth =  3 */
127                     0, /* depth =  4 */
128                     1, /* depth =  5 */
129                     1, /* depth =  6 */
130                     1, /* depth =  7 */
131                     3, /* depth =  8 */
132                     3, /* depth =  9 */
133                    30, /* depth = 10 */
134                    25, /* depth = 11 */
135                   168, /* depth = 12 */
136                   305, /* depth = 13 */
137                   569, /* depth = 14 */
138                  1129, /* depth = 15 */
139                 50800, /* depth = 16 */
140                  1645, /* depth = 17 */
141                  1820, /* depth = 18 */
142                  3506, /* depth = 19 */
143                  3258, /* depth = 20 */
144                  3424, /* depth = 21 */
145                  4971, /* depth = 22 */
146                  6885, /* depth = 23 */
147                 39771, /* depth = 24 */
148                   424, /* depth = 25 */
149                   170, /* depth = 26 */
150                   433, /* depth = 27 */
151                    92, /* depth = 28 */
152                   366, /* depth = 29 */
153                   377, /* depth = 30 */
154                     2, /* depth = 31 */
155                   200  /* depth = 32 */
156         },
157         .c = { /* IP class A in which the most 3 significant bits are 110 */
158                      0, /* depth =  1 */
159                      0, /* depth =  2 */
160                      0, /* depth =  3 */
161                      0, /* depth =  4 */
162                      0, /* depth =  5 */
163                      0, /* depth =  6 */
164                      0, /* depth =  7 */
165                     12, /* depth =  8 */
166                      8, /* depth =  9 */
167                      9, /* depth = 10 */
168                     33, /* depth = 11 */
169                     69, /* depth = 12 */
170                    237, /* depth = 13 */
171                   1007, /* depth = 14 */
172                   1717, /* depth = 15 */
173                  14663, /* depth = 16 */
174                   8070, /* depth = 17 */
175                  16185, /* depth = 18 */
176                  48261, /* depth = 19 */
177                  36870, /* depth = 20 */
178                  33960, /* depth = 21 */
179                  50638, /* depth = 22 */
180                  61422, /* depth = 23 */
181                 466549, /* depth = 24 */
182                   1829, /* depth = 25 */
183                   4824, /* depth = 26 */
184                   4927, /* depth = 27 */
185                   5914, /* depth = 28 */
186                  10254, /* depth = 29 */
187                   4905, /* depth = 30 */
188                      1, /* depth = 31 */
189                    716  /* depth = 32 */
190         }
191 };
192
193 static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)
194 {
195 /* IP address class A, the most significant bit is 0 */
196 #define IP_HEAD_MASK_A                  0x00000000
197 #define IP_HEAD_BIT_NUM_A               1
198
199 /* IP address class B, the most significant 2 bits are 10 */
200 #define IP_HEAD_MASK_B                  0x80000000
201 #define IP_HEAD_BIT_NUM_B               2
202
203 /* IP address class C, the most significant 3 bits are 110 */
204 #define IP_HEAD_MASK_C                  0xC0000000
205 #define IP_HEAD_BIT_NUM_C               3
206
207         uint32_t class_depth;
208         uint32_t range;
209         uint32_t mask;
210         uint32_t step;
211         uint32_t start;
212         uint32_t fixed_bit_num;
213         uint32_t ip_head_mask;
214         uint32_t rule_num;
215         uint32_t k;
216         struct route_rule *ptr_rule, *ptr_ldepth_rule;
217
218         if (ip_class == IP_CLASS_A) {        /* IP Address class A */
219                 fixed_bit_num = IP_HEAD_BIT_NUM_A;
220                 ip_head_mask = IP_HEAD_MASK_A;
221                 rule_num = rule_count.a[depth - 1];
222         } else if (ip_class == IP_CLASS_B) { /* IP Address class B */
223                 fixed_bit_num = IP_HEAD_BIT_NUM_B;
224                 ip_head_mask = IP_HEAD_MASK_B;
225                 rule_num = rule_count.b[depth - 1];
226         } else {                             /* IP Address class C */
227                 fixed_bit_num = IP_HEAD_BIT_NUM_C;
228                 ip_head_mask = IP_HEAD_MASK_C;
229                 rule_num = rule_count.c[depth - 1];
230         }
231
232         if (rule_num == 0)
233                 return;
234
235         /* the number of rest bits which don't include the most significant
236          * fixed bits for this IP address class
237          */
238         class_depth = depth - fixed_bit_num;
239
240         /* range is the maximum number of rules for this depth and
241          * this IP address class
242          */
243         range = 1 << class_depth;
244
245         /* only mask the most depth significant generated bits
246          * except fixed bits for IP address class
247          */
248         mask = range - 1;
249
250         /* Widen coverage of IP address in generated rules */
251         if (range <= rule_num)
252                 step = 1;
253         else
254                 step = round((double)range / rule_num);
255
256         /* Only generate rest bits except the most significant
257          * fixed bits for IP address class
258          */
259         start = lrand48() & mask;
260         ptr_rule = &large_route_table[num_route_entries];
261         ptr_ldepth_rule = &large_ldepth_route_table[num_ldepth_route_entries];
262         for (k = 0; k < rule_num; k++) {
263                 ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))
264                         | ip_head_mask;
265                 ptr_rule->depth = depth;
266                 /* If the depth of the route is more than 24, store it
267                  * in another table as well.
268                  */
269                 if (depth > 24) {
270                         ptr_ldepth_rule->ip = ptr_rule->ip;
271                         ptr_ldepth_rule->depth = ptr_rule->depth;
272                         ptr_ldepth_rule++;
273                         num_ldepth_route_entries++;
274                 }
275                 ptr_rule++;
276                 start = (start + step) & mask;
277         }
278         num_route_entries += rule_num;
279 }
280
281 static void insert_rule_in_random_pos(uint32_t ip, uint8_t depth)
282 {
283         uint32_t pos;
284         int try_count = 0;
285         struct route_rule tmp;
286
287         do {
288                 pos = lrand48();
289                 try_count++;
290         } while ((try_count < 10) && (pos > num_route_entries));
291
292         if ((pos > num_route_entries) || (pos >= MAX_RULE_NUM))
293                 pos = num_route_entries >> 1;
294
295         tmp = large_route_table[pos];
296         large_route_table[pos].ip = ip;
297         large_route_table[pos].depth = depth;
298         if (num_route_entries < MAX_RULE_NUM)
299                 large_route_table[num_route_entries++] = tmp;
300 }
301
302 static void generate_large_route_rule_table(void)
303 {
304         uint32_t ip_class;
305         uint8_t  depth;
306
307         num_route_entries = 0;
308         num_ldepth_route_entries = 0;
309         memset(large_route_table, 0, sizeof(large_route_table));
310
311         for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
312                 for (depth = 1; depth <= RTE_LPM_MAX_DEPTH; depth++) {
313                         generate_random_rule_prefix(ip_class, depth);
314                 }
315         }
316
317         /* Add following rules to keep same as previous large constant table,
318          * they are 4 rules with private local IP address and 1 all-zeros prefix
319          * with depth = 8.
320          */
321         insert_rule_in_random_pos(RTE_IPV4(0, 0, 0, 0), 8);
322         insert_rule_in_random_pos(RTE_IPV4(10, 2, 23, 147), 32);
323         insert_rule_in_random_pos(RTE_IPV4(192, 168, 100, 10), 24);
324         insert_rule_in_random_pos(RTE_IPV4(192, 168, 25, 100), 24);
325         insert_rule_in_random_pos(RTE_IPV4(192, 168, 129, 124), 32);
326 }
327
328 static void
329 print_route_distribution(const struct route_rule *table, uint32_t n)
330 {
331         unsigned i, j;
332
333         printf("Route distribution per prefix width: \n");
334         printf("DEPTH    QUANTITY (PERCENT)\n");
335         printf("--------------------------- \n");
336
337         /* Count depths. */
338         for (i = 1; i <= 32; i++) {
339                 unsigned depth_counter = 0;
340                 double percent_hits;
341
342                 for (j = 0; j < n; j++)
343                         if (table[j].depth == (uint8_t) i)
344                                 depth_counter++;
345
346                 percent_hits = ((double)depth_counter)/((double)n) * 100;
347                 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
348         }
349         printf("\n");
350 }
351
352 /* Check condition and return an error if true. */
353 static uint16_t enabled_core_ids[RTE_MAX_LCORE];
354 static unsigned int num_cores;
355
356 /* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */
357 static inline uint32_t
358 alloc_thread_id(void)
359 {
360         uint32_t tmp_thr_id;
361
362         tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
363         if (tmp_thr_id >= RTE_MAX_LCORE)
364                 printf("Invalid thread id %u\n", tmp_thr_id);
365
366         return tmp_thr_id;
367 }
368
369 /*
370  * Reader thread using rte_lpm data structure without RCU.
371  */
372 static int
373 test_lpm_reader(void *arg)
374 {
375         int i;
376         uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
377         uint32_t next_hop_return = 0;
378
379         RTE_SET_USED(arg);
380         do {
381                 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
382                         ip_batch[i] = rte_rand();
383
384                 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
385                         rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
386
387         } while (!writer_done);
388
389         return 0;
390 }
391
392 /*
393  * Reader thread using rte_lpm data structure with RCU.
394  */
395 static int
396 test_lpm_rcu_qsbr_reader(void *arg)
397 {
398         int i;
399         uint32_t thread_id = alloc_thread_id();
400         uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
401         uint32_t next_hop_return = 0;
402
403         RTE_SET_USED(arg);
404         /* Register this thread to report quiescent state */
405         rte_rcu_qsbr_thread_register(rv, thread_id);
406         rte_rcu_qsbr_thread_online(rv, thread_id);
407
408         do {
409                 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
410                         ip_batch[i] = rte_rand();
411
412                 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
413                         rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
414
415                 /* Update quiescent state */
416                 rte_rcu_qsbr_quiescent(rv, thread_id);
417         } while (!writer_done);
418
419         rte_rcu_qsbr_thread_offline(rv, thread_id);
420         rte_rcu_qsbr_thread_unregister(rv, thread_id);
421
422         return 0;
423 }
424
425 /*
426  * Writer thread using rte_lpm data structure with RCU.
427  */
428 static int
429 test_lpm_rcu_qsbr_writer(void *arg)
430 {
431         unsigned int i, j, si, ei;
432         uint64_t begin, total_cycles;
433         uint8_t core_id = (uint8_t)((uintptr_t)arg);
434         uint32_t next_hop_add = 0xAA;
435
436         /* 2 writer threads are used */
437         if (core_id % 2 == 0) {
438                 si = 0;
439                 ei = NUM_LDEPTH_ROUTE_ENTRIES / 2;
440         } else {
441                 si = NUM_LDEPTH_ROUTE_ENTRIES / 2;
442                 ei = NUM_LDEPTH_ROUTE_ENTRIES;
443         }
444
445         /* Measure add/delete. */
446         begin = rte_rdtsc_precise();
447         for (i = 0; i < RCU_ITERATIONS; i++) {
448                 /* Add all the entries */
449                 for (j = si; j < ei; j++) {
450                         pthread_mutex_lock(&lpm_mutex);
451                         if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
452                                         large_ldepth_route_table[j].depth,
453                                         next_hop_add) != 0) {
454                                 printf("Failed to add iteration %d, route# %d\n",
455                                         i, j);
456                                 goto error;
457                         }
458                         pthread_mutex_unlock(&lpm_mutex);
459                 }
460
461                 /* Delete all the entries */
462                 for (j = si; j < ei; j++) {
463                         pthread_mutex_lock(&lpm_mutex);
464                         if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
465                                 large_ldepth_route_table[j].depth) != 0) {
466                                 printf("Failed to delete iteration %d, route# %d\n",
467                                         i, j);
468                                 goto error;
469                         }
470                         pthread_mutex_unlock(&lpm_mutex);
471                 }
472         }
473
474         total_cycles = rte_rdtsc_precise() - begin;
475
476         __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
477
478         return 0;
479
480 error:
481         pthread_mutex_unlock(&lpm_mutex);
482         return -1;
483 }
484
485 /*
486  * Functional test:
487  * 2 writers, rest are readers
488  */
489 static int
490 test_lpm_rcu_perf_multi_writer(void)
491 {
492         struct rte_lpm_config config;
493         size_t sz;
494         unsigned int i;
495         uint16_t core_id;
496         struct rte_lpm_rcu_config rcu_cfg = {0};
497
498         if (rte_lcore_count() < 3) {
499                 printf("Not enough cores for lpm_rcu_perf_autotest, expecting at least 3\n");
500                 return TEST_SKIPPED;
501         }
502
503         num_cores = 0;
504         RTE_LCORE_FOREACH_WORKER(core_id) {
505                 enabled_core_ids[num_cores] = core_id;
506                 num_cores++;
507         }
508
509         printf("\nPerf test: 2 writers, %d readers, RCU integration enabled\n",
510                 num_cores - 2);
511
512         /* Create LPM table */
513         config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
514         config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
515         config.flags = 0;
516         lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
517         TEST_LPM_ASSERT(lpm != NULL);
518
519         /* Init RCU variable */
520         sz = rte_rcu_qsbr_get_memsize(num_cores);
521         rv = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
522                                                 RTE_CACHE_LINE_SIZE);
523         rte_rcu_qsbr_init(rv, num_cores);
524
525         rcu_cfg.v = rv;
526         /* Assign the RCU variable to LPM */
527         if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg) != 0) {
528                 printf("RCU variable assignment failed\n");
529                 goto error;
530         }
531
532         writer_done = 0;
533         __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
534
535         __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
536
537         /* Launch reader threads */
538         for (i = 2; i < num_cores; i++)
539                 rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,
540                                         enabled_core_ids[i]);
541
542         /* Launch writer threads */
543         for (i = 0; i < 2; i++)
544                 rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,
545                                         (void *)(uintptr_t)i,
546                                         enabled_core_ids[i]);
547
548         /* Wait for writer threads */
549         for (i = 0; i < 2; i++)
550                 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
551                         goto error;
552
553         printf("Total LPM Adds: %d\n", TOTAL_WRITES);
554         printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
555         printf("Average LPM Add/Del: %"PRIu64" cycles\n",
556                 __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
557                 / TOTAL_WRITES);
558
559         /* Wait and check return value from reader threads */
560         writer_done = 1;
561         for (i = 2; i < num_cores; i++)
562                 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
563                         goto error;
564
565         rte_lpm_free(lpm);
566         rte_free(rv);
567         lpm = NULL;
568         rv = NULL;
569
570         /* Test without RCU integration */
571         printf("\nPerf test: 2 writers, %d readers, RCU integration disabled\n",
572                 num_cores - 2);
573
574         /* Create LPM table */
575         config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
576         config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
577         config.flags = 0;
578         lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
579         TEST_LPM_ASSERT(lpm != NULL);
580
581         writer_done = 0;
582         __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
583         __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
584
585         /* Launch reader threads */
586         for (i = 2; i < num_cores; i++)
587                 rte_eal_remote_launch(test_lpm_reader, NULL,
588                                         enabled_core_ids[i]);
589
590         /* Launch writer threads */
591         for (i = 0; i < 2; i++)
592                 rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,
593                                         (void *)(uintptr_t)i,
594                                         enabled_core_ids[i]);
595
596         /* Wait for writer threads */
597         for (i = 0; i < 2; i++)
598                 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
599                         goto error;
600
601         printf("Total LPM Adds: %d\n", TOTAL_WRITES);
602         printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
603         printf("Average LPM Add/Del: %"PRIu64" cycles\n",
604                 __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
605                 / TOTAL_WRITES);
606
607         writer_done = 1;
608         /* Wait and check return value from reader threads */
609         for (i = 2; i < num_cores; i++)
610                 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
611                         goto error;
612
613         rte_lpm_free(lpm);
614
615         return 0;
616
617 error:
618         writer_done = 1;
619         /* Wait until all readers have exited */
620         rte_eal_mp_wait_lcore();
621
622         rte_lpm_free(lpm);
623         rte_free(rv);
624
625         return -1;
626 }
627
628 /*
629  * Functional test:
630  * Single writer, rest are readers
631  */
632 static int
633 test_lpm_rcu_perf(void)
634 {
635         struct rte_lpm_config config;
636         uint64_t begin, total_cycles;
637         size_t sz;
638         unsigned int i, j;
639         uint16_t core_id;
640         uint32_t next_hop_add = 0xAA;
641         struct rte_lpm_rcu_config rcu_cfg = {0};
642
643         if (rte_lcore_count() < 2) {
644                 printf("Not enough cores for lpm_rcu_perf_autotest, expecting at least 2\n");
645                 return TEST_SKIPPED;
646         }
647
648         num_cores = 0;
649         RTE_LCORE_FOREACH_WORKER(core_id) {
650                 enabled_core_ids[num_cores] = core_id;
651                 num_cores++;
652         }
653
654         printf("\nPerf test: 1 writer, %d readers, RCU integration enabled\n",
655                 num_cores);
656
657         /* Create LPM table */
658         config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
659         config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
660         config.flags = 0;
661         lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
662         TEST_LPM_ASSERT(lpm != NULL);
663
664         /* Init RCU variable */
665         sz = rte_rcu_qsbr_get_memsize(num_cores);
666         rv = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
667                                                 RTE_CACHE_LINE_SIZE);
668         rte_rcu_qsbr_init(rv, num_cores);
669
670         rcu_cfg.v = rv;
671         /* Assign the RCU variable to LPM */
672         if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg) != 0) {
673                 printf("RCU variable assignment failed\n");
674                 goto error;
675         }
676
677         writer_done = 0;
678         __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
679
680         /* Launch reader threads */
681         for (i = 0; i < num_cores; i++)
682                 rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,
683                                         enabled_core_ids[i]);
684
685         /* Measure add/delete. */
686         begin = rte_rdtsc_precise();
687         for (i = 0; i < RCU_ITERATIONS; i++) {
688                 /* Add all the entries */
689                 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
690                         if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
691                                         large_ldepth_route_table[j].depth,
692                                         next_hop_add) != 0) {
693                                 printf("Failed to add iteration %d, route# %d\n",
694                                         i, j);
695                                 goto error;
696                         }
697
698                 /* Delete all the entries */
699                 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
700                         if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
701                                 large_ldepth_route_table[j].depth) != 0) {
702                                 printf("Failed to delete iteration %d, route# %d\n",
703                                         i, j);
704                                 goto error;
705                         }
706         }
707         total_cycles = rte_rdtsc_precise() - begin;
708
709         printf("Total LPM Adds: %d\n", TOTAL_WRITES);
710         printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
711         printf("Average LPM Add/Del: %g cycles\n",
712                 (double)total_cycles / TOTAL_WRITES);
713
714         writer_done = 1;
715         /* Wait and check return value from reader threads */
716         for (i = 0; i < num_cores; i++)
717                 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
718                         goto error;
719
720         rte_lpm_free(lpm);
721         rte_free(rv);
722         lpm = NULL;
723         rv = NULL;
724
725         /* Test without RCU integration */
726         printf("\nPerf test: 1 writer, %d readers, RCU integration disabled\n",
727                 num_cores);
728
729         /* Create LPM table */
730         config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
731         config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
732         config.flags = 0;
733         lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
734         TEST_LPM_ASSERT(lpm != NULL);
735
736         writer_done = 0;
737         __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
738
739         /* Launch reader threads */
740         for (i = 0; i < num_cores; i++)
741                 rte_eal_remote_launch(test_lpm_reader, NULL,
742                                         enabled_core_ids[i]);
743
744         /* Measure add/delete. */
745         begin = rte_rdtsc_precise();
746         for (i = 0; i < RCU_ITERATIONS; i++) {
747                 /* Add all the entries */
748                 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
749                         if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
750                                         large_ldepth_route_table[j].depth,
751                                         next_hop_add) != 0) {
752                                 printf("Failed to add iteration %d, route# %d\n",
753                                         i, j);
754                                 goto error;
755                         }
756
757                 /* Delete all the entries */
758                 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
759                         if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
760                                 large_ldepth_route_table[j].depth) != 0) {
761                                 printf("Failed to delete iteration %d, route# %d\n",
762                                         i, j);
763                                 goto error;
764                         }
765         }
766         total_cycles = rte_rdtsc_precise() - begin;
767
768         printf("Total LPM Adds: %d\n", TOTAL_WRITES);
769         printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
770         printf("Average LPM Add/Del: %g cycles\n",
771                 (double)total_cycles / TOTAL_WRITES);
772
773         writer_done = 1;
774         /* Wait and check return value from reader threads */
775         for (i = 0; i < num_cores; i++)
776                 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
777                         printf("Warning: lcore %u not finished.\n",
778                                 enabled_core_ids[i]);
779
780         rte_lpm_free(lpm);
781
782         return 0;
783
784 error:
785         writer_done = 1;
786         /* Wait until all readers have exited */
787         rte_eal_mp_wait_lcore();
788
789         rte_lpm_free(lpm);
790         rte_free(rv);
791
792         return -1;
793 }
794
795 static int
796 test_lpm_perf(void)
797 {
798         struct rte_lpm_config config;
799
800         config.max_rules = 2000000;
801         config.number_tbl8s = 2048;
802         config.flags = 0;
803         uint64_t begin, total_time, lpm_used_entries = 0;
804         unsigned i, j;
805         uint32_t next_hop_add = 0xAA, next_hop_return = 0;
806         int status = 0;
807         uint64_t cache_line_counter = 0;
808         int64_t count = 0;
809
810         rte_srand(rte_rdtsc());
811
812         generate_large_route_rule_table();
813
814         printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
815
816         print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
817
818         lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
819         TEST_LPM_ASSERT(lpm != NULL);
820
821         /* Measure add. */
822         begin = rte_rdtsc();
823
824         for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
825                 if (rte_lpm_add(lpm, large_route_table[i].ip,
826                                 large_route_table[i].depth, next_hop_add) == 0)
827                         status++;
828         }
829         /* End Timer. */
830         total_time = rte_rdtsc() - begin;
831
832         printf("Unique added entries = %d\n", status);
833         /* Obtain add statistics. */
834         for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
835                 if (lpm->tbl24[i].valid)
836                         lpm_used_entries++;
837
838                 if (i % 32 == 0) {
839                         if ((uint64_t)count < lpm_used_entries) {
840                                 cache_line_counter++;
841                                 count = lpm_used_entries;
842                         }
843                 }
844         }
845
846         printf("Used table 24 entries = %u (%g%%)\n",
847                         (unsigned) lpm_used_entries,
848                         (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
849         printf("64 byte Cache entries used = %u (%u bytes)\n",
850                         (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
851
852         printf("Average LPM Add: %g cycles\n",
853                         (double)total_time / NUM_ROUTE_ENTRIES);
854
855         /* Measure single Lookup */
856         total_time = 0;
857         count = 0;
858
859         for (i = 0; i < ITERATIONS; i++) {
860                 static uint32_t ip_batch[BATCH_SIZE];
861
862                 for (j = 0; j < BATCH_SIZE; j++)
863                         ip_batch[j] = rte_rand();
864
865                 /* Lookup per batch */
866                 begin = rte_rdtsc();
867
868                 for (j = 0; j < BATCH_SIZE; j++) {
869                         if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
870                                 count++;
871                 }
872
873                 total_time += rte_rdtsc() - begin;
874
875         }
876         printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
877                         (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
878                         (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
879
880         /* Measure bulk Lookup */
881         total_time = 0;
882         count = 0;
883         for (i = 0; i < ITERATIONS; i++) {
884                 static uint32_t ip_batch[BATCH_SIZE];
885                 uint32_t next_hops[BULK_SIZE];
886
887                 /* Create array of random IP addresses */
888                 for (j = 0; j < BATCH_SIZE; j++)
889                         ip_batch[j] = rte_rand();
890
891                 /* Lookup per batch */
892                 begin = rte_rdtsc();
893                 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
894                         unsigned k;
895                         rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
896                         for (k = 0; k < BULK_SIZE; k++)
897                                 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
898                                         count++;
899                 }
900
901                 total_time += rte_rdtsc() - begin;
902         }
903         printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
904                         (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
905                         (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
906
907         /* Measure LookupX4 */
908         total_time = 0;
909         count = 0;
910         for (i = 0; i < ITERATIONS; i++) {
911                 static uint32_t ip_batch[BATCH_SIZE];
912                 uint32_t next_hops[4];
913
914                 /* Create array of random IP addresses */
915                 for (j = 0; j < BATCH_SIZE; j++)
916                         ip_batch[j] = rte_rand();
917
918                 /* Lookup per batch */
919                 begin = rte_rdtsc();
920                 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
921                         unsigned k;
922                         xmm_t ipx4;
923
924                         ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
925                         ipx4 = *(xmm_t *)(ip_batch + j);
926                         rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
927                         for (k = 0; k < RTE_DIM(next_hops); k++)
928                                 if (unlikely(next_hops[k] == UINT32_MAX))
929                                         count++;
930                 }
931
932                 total_time += rte_rdtsc() - begin;
933         }
934         printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
935                         (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
936                         (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
937
938         /* Measure Delete */
939         status = 0;
940         begin = rte_rdtsc();
941
942         for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
943                 /* rte_lpm_delete(lpm, ip, depth) */
944                 status += rte_lpm_delete(lpm, large_route_table[i].ip,
945                                 large_route_table[i].depth);
946         }
947
948         total_time = rte_rdtsc() - begin;
949
950         printf("Average LPM Delete: %g cycles\n",
951                         (double)total_time / NUM_ROUTE_ENTRIES);
952
953         rte_lpm_delete_all(lpm);
954         rte_lpm_free(lpm);
955
956         if (test_lpm_rcu_perf() < 0)
957                 return -1;
958
959         if (test_lpm_rcu_perf_multi_writer() < 0)
960                 return -1;
961
962         return 0;
963 }
964
965 REGISTER_TEST_COMMAND(lpm_perf_autotest, test_lpm_perf);