1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_malloc.h>
19 #include "test_xmmt_ops.h"
22 static struct rte_rcu_qsbr *rv;
23 static volatile uint8_t writer_done;
24 static volatile uint32_t thr_id;
25 static uint64_t gwrite_cycles;
26 static uint32_t num_writers;
27 /* LPM APIs are not thread safe, use mutex to provide thread safety */
28 static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
30 /* Report quiescent state interval every 1024 lookups. Larger critical
31 * sections in reader will result in writer polling multiple times.
33 #define QSBR_REPORTING_INTERVAL 1024
35 #define TEST_LPM_ASSERT(cond) do { \
37 printf("Error at line %d: \n", __LINE__); \
42 #define ITERATIONS (1 << 10)
43 #define RCU_ITERATIONS 10
44 #define BATCH_SIZE (1 << 12)
47 #define MAX_RULE_NUM (1200000)
54 static struct route_rule large_route_table[MAX_RULE_NUM];
55 /* Route table for routes with depth > 24 */
56 struct route_rule large_ldepth_route_table[MAX_RULE_NUM];
58 static uint32_t num_route_entries;
59 static uint32_t num_ldepth_route_entries;
60 #define NUM_ROUTE_ENTRIES num_route_entries
61 #define NUM_LDEPTH_ROUTE_ENTRIES num_ldepth_route_entries
63 #define TOTAL_WRITES (RCU_ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES)
71 /* struct route_rule_count defines the total number of rules in following a/b/c
72 * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
73 * including the ones for private local network.
75 struct route_rule_count {
76 uint32_t a[RTE_LPM_MAX_DEPTH];
77 uint32_t b[RTE_LPM_MAX_DEPTH];
78 uint32_t c[RTE_LPM_MAX_DEPTH];
81 /* All following numbers of each depth of each common IP class are just
82 * got from previous large constant table in app/test/test_lpm_routes.h .
83 * In order to match similar performance, they keep same depth and IP
84 * address coverage as previous constant table. These numbers don't
85 * include any private local IP address. As previous large const rule
86 * table was just dumped from a real router, there are no any IP address
89 static struct route_rule_count rule_count = {
90 .a = { /* IP class A in which the most significant bit is 0 */
102 144, /* depth = 12 */
103 233, /* depth = 13 */
104 528, /* depth = 14 */
105 866, /* depth = 15 */
106 3856, /* depth = 16 */
107 3268, /* depth = 17 */
108 5662, /* depth = 18 */
109 17301, /* depth = 19 */
110 22226, /* depth = 20 */
111 11147, /* depth = 21 */
112 16746, /* depth = 22 */
113 17120, /* depth = 23 */
114 77578, /* depth = 24 */
115 401, /* depth = 25 */
116 656, /* depth = 26 */
117 1107, /* depth = 27 */
118 1121, /* depth = 28 */
119 2316, /* depth = 29 */
120 717, /* depth = 30 */
124 .b = { /* IP class A in which the most 2 significant bits are 10 */
136 168, /* depth = 12 */
137 305, /* depth = 13 */
138 569, /* depth = 14 */
139 1129, /* depth = 15 */
140 50800, /* depth = 16 */
141 1645, /* depth = 17 */
142 1820, /* depth = 18 */
143 3506, /* depth = 19 */
144 3258, /* depth = 20 */
145 3424, /* depth = 21 */
146 4971, /* depth = 22 */
147 6885, /* depth = 23 */
148 39771, /* depth = 24 */
149 424, /* depth = 25 */
150 170, /* depth = 26 */
151 433, /* depth = 27 */
153 366, /* depth = 29 */
154 377, /* depth = 30 */
158 .c = { /* IP class A in which the most 3 significant bits are 110 */
171 237, /* depth = 13 */
172 1007, /* depth = 14 */
173 1717, /* depth = 15 */
174 14663, /* depth = 16 */
175 8070, /* depth = 17 */
176 16185, /* depth = 18 */
177 48261, /* depth = 19 */
178 36870, /* depth = 20 */
179 33960, /* depth = 21 */
180 50638, /* depth = 22 */
181 61422, /* depth = 23 */
182 466549, /* depth = 24 */
183 1829, /* depth = 25 */
184 4824, /* depth = 26 */
185 4927, /* depth = 27 */
186 5914, /* depth = 28 */
187 10254, /* depth = 29 */
188 4905, /* depth = 30 */
194 static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)
196 /* IP address class A, the most significant bit is 0 */
197 #define IP_HEAD_MASK_A 0x00000000
198 #define IP_HEAD_BIT_NUM_A 1
200 /* IP address class B, the most significant 2 bits are 10 */
201 #define IP_HEAD_MASK_B 0x80000000
202 #define IP_HEAD_BIT_NUM_B 2
204 /* IP address class C, the most significant 3 bits are 110 */
205 #define IP_HEAD_MASK_C 0xC0000000
206 #define IP_HEAD_BIT_NUM_C 3
208 uint32_t class_depth;
213 uint32_t fixed_bit_num;
214 uint32_t ip_head_mask;
217 struct route_rule *ptr_rule, *ptr_ldepth_rule;
219 if (ip_class == IP_CLASS_A) { /* IP Address class A */
220 fixed_bit_num = IP_HEAD_BIT_NUM_A;
221 ip_head_mask = IP_HEAD_MASK_A;
222 rule_num = rule_count.a[depth - 1];
223 } else if (ip_class == IP_CLASS_B) { /* IP Address class B */
224 fixed_bit_num = IP_HEAD_BIT_NUM_B;
225 ip_head_mask = IP_HEAD_MASK_B;
226 rule_num = rule_count.b[depth - 1];
227 } else { /* IP Address class C */
228 fixed_bit_num = IP_HEAD_BIT_NUM_C;
229 ip_head_mask = IP_HEAD_MASK_C;
230 rule_num = rule_count.c[depth - 1];
236 /* the number of rest bits which don't include the most significant
237 * fixed bits for this IP address class
239 class_depth = depth - fixed_bit_num;
241 /* range is the maximum number of rules for this depth and
242 * this IP address class
244 range = 1 << class_depth;
246 /* only mask the most depth significant generated bits
247 * except fixed bits for IP address class
251 /* Widen coverage of IP address in generated rules */
252 if (range <= rule_num)
255 step = round((double)range / rule_num);
257 /* Only generate rest bits except the most significant
258 * fixed bits for IP address class
260 start = lrand48() & mask;
261 ptr_rule = &large_route_table[num_route_entries];
262 ptr_ldepth_rule = &large_ldepth_route_table[num_ldepth_route_entries];
263 for (k = 0; k < rule_num; k++) {
264 ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))
266 ptr_rule->depth = depth;
267 /* If the depth of the route is more than 24, store it
268 * in another table as well.
271 ptr_ldepth_rule->ip = ptr_rule->ip;
272 ptr_ldepth_rule->depth = ptr_rule->depth;
274 num_ldepth_route_entries++;
277 start = (start + step) & mask;
279 num_route_entries += rule_num;
282 static void insert_rule_in_random_pos(uint32_t ip, uint8_t depth)
286 struct route_rule tmp;
291 } while ((try_count < 10) && (pos > num_route_entries));
293 if ((pos > num_route_entries) || (pos >= MAX_RULE_NUM))
294 pos = num_route_entries >> 1;
296 tmp = large_route_table[pos];
297 large_route_table[pos].ip = ip;
298 large_route_table[pos].depth = depth;
299 if (num_route_entries < MAX_RULE_NUM)
300 large_route_table[num_route_entries++] = tmp;
303 static void generate_large_route_rule_table(void)
308 num_route_entries = 0;
309 num_ldepth_route_entries = 0;
310 memset(large_route_table, 0, sizeof(large_route_table));
312 for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
313 for (depth = 1; depth <= RTE_LPM_MAX_DEPTH; depth++) {
314 generate_random_rule_prefix(ip_class, depth);
318 /* Add following rules to keep same as previous large constant table,
319 * they are 4 rules with private local IP address and 1 all-zeros prefix
322 insert_rule_in_random_pos(RTE_IPV4(0, 0, 0, 0), 8);
323 insert_rule_in_random_pos(RTE_IPV4(10, 2, 23, 147), 32);
324 insert_rule_in_random_pos(RTE_IPV4(192, 168, 100, 10), 24);
325 insert_rule_in_random_pos(RTE_IPV4(192, 168, 25, 100), 24);
326 insert_rule_in_random_pos(RTE_IPV4(192, 168, 129, 124), 32);
330 print_route_distribution(const struct route_rule *table, uint32_t n)
334 printf("Route distribution per prefix width: \n");
335 printf("DEPTH QUANTITY (PERCENT)\n");
336 printf("--------------------------- \n");
339 for (i = 1; i <= 32; i++) {
340 unsigned depth_counter = 0;
343 for (j = 0; j < n; j++)
344 if (table[j].depth == (uint8_t) i)
347 percent_hits = ((double)depth_counter)/((double)n) * 100;
348 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
353 /* Check condition and return an error if true. */
354 static uint16_t enabled_core_ids[RTE_MAX_LCORE];
355 static unsigned int num_cores;
357 /* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */
358 static inline uint32_t
359 alloc_thread_id(void)
363 tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
364 if (tmp_thr_id >= RTE_MAX_LCORE)
365 printf("Invalid thread id %u\n", tmp_thr_id);
371 * Reader thread using rte_lpm data structure without RCU.
374 test_lpm_reader(void *arg)
377 uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
378 uint32_t next_hop_return = 0;
382 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
383 ip_batch[i] = rte_rand();
385 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
386 rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
388 } while (!writer_done);
394 * Reader thread using rte_lpm data structure with RCU.
397 test_lpm_rcu_qsbr_reader(void *arg)
400 uint32_t thread_id = alloc_thread_id();
401 uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
402 uint32_t next_hop_return = 0;
405 /* Register this thread to report quiescent state */
406 rte_rcu_qsbr_thread_register(rv, thread_id);
407 rte_rcu_qsbr_thread_online(rv, thread_id);
410 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
411 ip_batch[i] = rte_rand();
413 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
414 rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
416 /* Update quiescent state */
417 rte_rcu_qsbr_quiescent(rv, thread_id);
418 } while (!writer_done);
420 rte_rcu_qsbr_thread_offline(rv, thread_id);
421 rte_rcu_qsbr_thread_unregister(rv, thread_id);
427 * Writer thread using rte_lpm data structure with RCU.
430 test_lpm_rcu_qsbr_writer(void *arg)
432 unsigned int i, j, si, ei;
433 uint64_t begin, total_cycles;
434 uint32_t next_hop_add = 0xAA;
435 uint8_t pos_core = (uint8_t)((uintptr_t)arg);
437 si = (pos_core * NUM_LDEPTH_ROUTE_ENTRIES) / num_writers;
438 ei = ((pos_core + 1) * NUM_LDEPTH_ROUTE_ENTRIES) / num_writers;
440 /* Measure add/delete. */
441 begin = rte_rdtsc_precise();
442 for (i = 0; i < RCU_ITERATIONS; i++) {
443 /* Add all the entries */
444 for (j = si; j < ei; j++) {
446 pthread_mutex_lock(&lpm_mutex);
447 if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
448 large_ldepth_route_table[j].depth,
449 next_hop_add) != 0) {
450 printf("Failed to add iteration %d, route# %d\n",
455 pthread_mutex_unlock(&lpm_mutex);
458 /* Delete all the entries */
459 for (j = si; j < ei; j++) {
461 pthread_mutex_lock(&lpm_mutex);
462 if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
463 large_ldepth_route_table[j].depth) != 0) {
464 printf("Failed to delete iteration %d, route# %d\n",
469 pthread_mutex_unlock(&lpm_mutex);
473 total_cycles = rte_rdtsc_precise() - begin;
475 __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
481 pthread_mutex_unlock(&lpm_mutex);
487 * 1/2 writers, rest are readers
490 test_lpm_rcu_perf_multi_writer(uint8_t use_rcu)
492 struct rte_lpm_config config;
496 struct rte_lpm_rcu_config rcu_cfg = {0};
497 int (*reader_f)(void *arg) = NULL;
499 if (rte_lcore_count() < 3) {
500 printf("Not enough cores for lpm_rcu_perf_autotest, expecting at least 3\n");
505 RTE_LCORE_FOREACH_WORKER(core_id) {
506 enabled_core_ids[num_cores] = core_id;
510 for (j = 1; j < 3; j++) {
512 printf("\nPerf test: %d writer(s), %d reader(s),"
513 " RCU integration enabled\n", j, num_cores - j);
515 printf("\nPerf test: %d writer(s), %d reader(s),"
516 " RCU integration disabled\n", j, num_cores - j);
520 /* Create LPM table */
521 config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
522 config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
524 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
525 TEST_LPM_ASSERT(lpm != NULL);
527 /* Init RCU variable */
529 sz = rte_rcu_qsbr_get_memsize(num_cores);
530 rv = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
531 RTE_CACHE_LINE_SIZE);
532 rte_rcu_qsbr_init(rv, num_cores);
535 /* Assign the RCU variable to LPM */
536 if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg) != 0) {
537 printf("RCU variable assignment failed\n");
541 reader_f = test_lpm_rcu_qsbr_reader;
543 reader_f = test_lpm_reader;
546 __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
548 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
550 /* Launch reader threads */
551 for (i = j; i < num_cores; i++)
552 rte_eal_remote_launch(reader_f, NULL,
553 enabled_core_ids[i]);
555 /* Launch writer threads */
556 for (i = 0; i < j; i++)
557 rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,
558 (void *)(uintptr_t)i,
559 enabled_core_ids[i]);
561 /* Wait for writer threads */
562 for (i = 0; i < j; i++)
563 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
566 printf("Total LPM Adds: %d\n", TOTAL_WRITES);
567 printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
568 printf("Average LPM Add/Del: %"PRIu64" cycles\n",
569 __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
573 /* Wait until all readers have exited */
574 for (i = j; i < num_cores; i++)
575 rte_eal_wait_lcore(enabled_core_ids[i]);
587 /* Wait until all readers have exited */
588 rte_eal_mp_wait_lcore();
599 struct rte_lpm_config config;
601 config.max_rules = 2000000;
602 config.number_tbl8s = 2048;
604 uint64_t begin, total_time, lpm_used_entries = 0;
606 uint32_t next_hop_add = 0xAA, next_hop_return = 0;
608 uint64_t cache_line_counter = 0;
611 rte_srand(rte_rdtsc());
613 generate_large_route_rule_table();
615 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
617 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
619 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
620 TEST_LPM_ASSERT(lpm != NULL);
625 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
626 if (rte_lpm_add(lpm, large_route_table[i].ip,
627 large_route_table[i].depth, next_hop_add) == 0)
631 total_time = rte_rdtsc() - begin;
633 printf("Unique added entries = %d\n", status);
634 /* Obtain add statistics. */
635 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
636 if (lpm->tbl24[i].valid)
640 if ((uint64_t)count < lpm_used_entries) {
641 cache_line_counter++;
642 count = lpm_used_entries;
647 printf("Used table 24 entries = %u (%g%%)\n",
648 (unsigned) lpm_used_entries,
649 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
650 printf("64 byte Cache entries used = %u (%u bytes)\n",
651 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
653 printf("Average LPM Add: %g cycles\n",
654 (double)total_time / NUM_ROUTE_ENTRIES);
656 /* Measure single Lookup */
660 for (i = 0; i < ITERATIONS; i++) {
661 static uint32_t ip_batch[BATCH_SIZE];
663 for (j = 0; j < BATCH_SIZE; j++)
664 ip_batch[j] = rte_rand();
666 /* Lookup per batch */
669 for (j = 0; j < BATCH_SIZE; j++) {
670 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
674 total_time += rte_rdtsc() - begin;
677 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
678 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
679 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
681 /* Measure bulk Lookup */
684 for (i = 0; i < ITERATIONS; i++) {
685 static uint32_t ip_batch[BATCH_SIZE];
686 uint32_t next_hops[BULK_SIZE];
688 /* Create array of random IP addresses */
689 for (j = 0; j < BATCH_SIZE; j++)
690 ip_batch[j] = rte_rand();
692 /* Lookup per batch */
694 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
696 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
697 for (k = 0; k < BULK_SIZE; k++)
698 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
702 total_time += rte_rdtsc() - begin;
704 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
705 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
706 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
708 /* Measure LookupX4 */
711 for (i = 0; i < ITERATIONS; i++) {
712 static uint32_t ip_batch[BATCH_SIZE];
713 uint32_t next_hops[4];
715 /* Create array of random IP addresses */
716 for (j = 0; j < BATCH_SIZE; j++)
717 ip_batch[j] = rte_rand();
719 /* Lookup per batch */
721 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
725 ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
726 ipx4 = *(xmm_t *)(ip_batch + j);
727 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
728 for (k = 0; k < RTE_DIM(next_hops); k++)
729 if (unlikely(next_hops[k] == UINT32_MAX))
733 total_time += rte_rdtsc() - begin;
735 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
736 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
737 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
743 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
744 /* rte_lpm_delete(lpm, ip, depth) */
745 status += rte_lpm_delete(lpm, large_route_table[i].ip,
746 large_route_table[i].depth);
749 total_time = rte_rdtsc() - begin;
751 printf("Average LPM Delete: %g cycles\n",
752 (double)total_time / NUM_ROUTE_ENTRIES);
754 rte_lpm_delete_all(lpm);
757 if (test_lpm_rcu_perf_multi_writer(0) < 0)
760 if (test_lpm_rcu_perf_multi_writer(1) < 0)
766 REGISTER_TEST_COMMAND(lpm_perf_autotest, test_lpm_perf);