1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
8 #ifdef RTE_EXEC_ENV_WINDOWS
12 printf("lpm_perf not supported on Windows, skipping test\n");
22 #include <rte_cycles.h>
23 #include <rte_random.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_malloc.h>
29 #include "test_xmmt_ops.h"
32 static struct rte_rcu_qsbr *rv;
33 static volatile uint8_t writer_done;
34 static volatile uint32_t thr_id;
35 static uint64_t gwrite_cycles;
36 static uint32_t num_writers;
37 /* LPM APIs are not thread safe, use mutex to provide thread safety */
38 static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
40 /* Report quiescent state interval every 1024 lookups. Larger critical
41 * sections in reader will result in writer polling multiple times.
43 #define QSBR_REPORTING_INTERVAL 1024
45 #define TEST_LPM_ASSERT(cond) do { \
47 printf("Error at line %d: \n", __LINE__); \
52 #define ITERATIONS (1 << 10)
53 #define RCU_ITERATIONS 10
54 #define BATCH_SIZE (1 << 12)
57 #define MAX_RULE_NUM (1200000)
64 static struct route_rule large_route_table[MAX_RULE_NUM];
65 /* Route table for routes with depth > 24 */
66 struct route_rule large_ldepth_route_table[MAX_RULE_NUM];
68 static uint32_t num_route_entries;
69 static uint32_t num_ldepth_route_entries;
70 #define NUM_ROUTE_ENTRIES num_route_entries
71 #define NUM_LDEPTH_ROUTE_ENTRIES num_ldepth_route_entries
73 #define TOTAL_WRITES (RCU_ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES)
81 /* struct route_rule_count defines the total number of rules in following a/b/c
82 * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
83 * including the ones for private local network.
85 struct route_rule_count {
86 uint32_t a[RTE_LPM_MAX_DEPTH];
87 uint32_t b[RTE_LPM_MAX_DEPTH];
88 uint32_t c[RTE_LPM_MAX_DEPTH];
91 /* All following numbers of each depth of each common IP class are just
92 * got from previous large constant table in app/test/test_lpm_routes.h .
93 * In order to match similar performance, they keep same depth and IP
94 * address coverage as previous constant table. These numbers don't
95 * include any private local IP address. As previous large const rule
96 * table was just dumped from a real router, there are no any IP address
99 static struct route_rule_count rule_count = {
100 .a = { /* IP class A in which the most significant bit is 0 */
112 144, /* depth = 12 */
113 233, /* depth = 13 */
114 528, /* depth = 14 */
115 866, /* depth = 15 */
116 3856, /* depth = 16 */
117 3268, /* depth = 17 */
118 5662, /* depth = 18 */
119 17301, /* depth = 19 */
120 22226, /* depth = 20 */
121 11147, /* depth = 21 */
122 16746, /* depth = 22 */
123 17120, /* depth = 23 */
124 77578, /* depth = 24 */
125 401, /* depth = 25 */
126 656, /* depth = 26 */
127 1107, /* depth = 27 */
128 1121, /* depth = 28 */
129 2316, /* depth = 29 */
130 717, /* depth = 30 */
134 .b = { /* IP class A in which the most 2 significant bits are 10 */
146 168, /* depth = 12 */
147 305, /* depth = 13 */
148 569, /* depth = 14 */
149 1129, /* depth = 15 */
150 50800, /* depth = 16 */
151 1645, /* depth = 17 */
152 1820, /* depth = 18 */
153 3506, /* depth = 19 */
154 3258, /* depth = 20 */
155 3424, /* depth = 21 */
156 4971, /* depth = 22 */
157 6885, /* depth = 23 */
158 39771, /* depth = 24 */
159 424, /* depth = 25 */
160 170, /* depth = 26 */
161 433, /* depth = 27 */
163 366, /* depth = 29 */
164 377, /* depth = 30 */
168 .c = { /* IP class A in which the most 3 significant bits are 110 */
181 237, /* depth = 13 */
182 1007, /* depth = 14 */
183 1717, /* depth = 15 */
184 14663, /* depth = 16 */
185 8070, /* depth = 17 */
186 16185, /* depth = 18 */
187 48261, /* depth = 19 */
188 36870, /* depth = 20 */
189 33960, /* depth = 21 */
190 50638, /* depth = 22 */
191 61422, /* depth = 23 */
192 466549, /* depth = 24 */
193 1829, /* depth = 25 */
194 4824, /* depth = 26 */
195 4927, /* depth = 27 */
196 5914, /* depth = 28 */
197 10254, /* depth = 29 */
198 4905, /* depth = 30 */
204 static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)
206 /* IP address class A, the most significant bit is 0 */
207 #define IP_HEAD_MASK_A 0x00000000
208 #define IP_HEAD_BIT_NUM_A 1
210 /* IP address class B, the most significant 2 bits are 10 */
211 #define IP_HEAD_MASK_B 0x80000000
212 #define IP_HEAD_BIT_NUM_B 2
214 /* IP address class C, the most significant 3 bits are 110 */
215 #define IP_HEAD_MASK_C 0xC0000000
216 #define IP_HEAD_BIT_NUM_C 3
218 uint32_t class_depth;
223 uint32_t fixed_bit_num;
224 uint32_t ip_head_mask;
227 struct route_rule *ptr_rule, *ptr_ldepth_rule;
229 if (ip_class == IP_CLASS_A) { /* IP Address class A */
230 fixed_bit_num = IP_HEAD_BIT_NUM_A;
231 ip_head_mask = IP_HEAD_MASK_A;
232 rule_num = rule_count.a[depth - 1];
233 } else if (ip_class == IP_CLASS_B) { /* IP Address class B */
234 fixed_bit_num = IP_HEAD_BIT_NUM_B;
235 ip_head_mask = IP_HEAD_MASK_B;
236 rule_num = rule_count.b[depth - 1];
237 } else { /* IP Address class C */
238 fixed_bit_num = IP_HEAD_BIT_NUM_C;
239 ip_head_mask = IP_HEAD_MASK_C;
240 rule_num = rule_count.c[depth - 1];
246 /* the number of rest bits which don't include the most significant
247 * fixed bits for this IP address class
249 class_depth = depth - fixed_bit_num;
251 /* range is the maximum number of rules for this depth and
252 * this IP address class
254 range = 1 << class_depth;
256 /* only mask the most depth significant generated bits
257 * except fixed bits for IP address class
261 /* Widen coverage of IP address in generated rules */
262 if (range <= rule_num)
265 step = round((double)range / rule_num);
267 /* Only generate rest bits except the most significant
268 * fixed bits for IP address class
270 start = lrand48() & mask;
271 ptr_rule = &large_route_table[num_route_entries];
272 ptr_ldepth_rule = &large_ldepth_route_table[num_ldepth_route_entries];
273 for (k = 0; k < rule_num; k++) {
274 ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))
276 ptr_rule->depth = depth;
277 /* If the depth of the route is more than 24, store it
278 * in another table as well.
281 ptr_ldepth_rule->ip = ptr_rule->ip;
282 ptr_ldepth_rule->depth = ptr_rule->depth;
284 num_ldepth_route_entries++;
287 start = (start + step) & mask;
289 num_route_entries += rule_num;
292 static void insert_rule_in_random_pos(uint32_t ip, uint8_t depth)
296 struct route_rule tmp;
301 } while ((try_count < 10) && (pos > num_route_entries));
303 if ((pos > num_route_entries) || (pos >= MAX_RULE_NUM))
304 pos = num_route_entries >> 1;
306 tmp = large_route_table[pos];
307 large_route_table[pos].ip = ip;
308 large_route_table[pos].depth = depth;
309 if (num_route_entries < MAX_RULE_NUM)
310 large_route_table[num_route_entries++] = tmp;
313 static void generate_large_route_rule_table(void)
318 num_route_entries = 0;
319 num_ldepth_route_entries = 0;
320 memset(large_route_table, 0, sizeof(large_route_table));
322 for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
323 for (depth = 1; depth <= RTE_LPM_MAX_DEPTH; depth++) {
324 generate_random_rule_prefix(ip_class, depth);
328 /* Add following rules to keep same as previous large constant table,
329 * they are 4 rules with private local IP address and 1 all-zeros prefix
332 insert_rule_in_random_pos(RTE_IPV4(0, 0, 0, 0), 8);
333 insert_rule_in_random_pos(RTE_IPV4(10, 2, 23, 147), 32);
334 insert_rule_in_random_pos(RTE_IPV4(192, 168, 100, 10), 24);
335 insert_rule_in_random_pos(RTE_IPV4(192, 168, 25, 100), 24);
336 insert_rule_in_random_pos(RTE_IPV4(192, 168, 129, 124), 32);
340 print_route_distribution(const struct route_rule *table, uint32_t n)
344 printf("Route distribution per prefix width: \n");
345 printf("DEPTH QUANTITY (PERCENT)\n");
346 printf("--------------------------- \n");
349 for (i = 1; i <= 32; i++) {
350 unsigned depth_counter = 0;
353 for (j = 0; j < n; j++)
354 if (table[j].depth == (uint8_t) i)
357 percent_hits = ((double)depth_counter)/((double)n) * 100;
358 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
363 /* Check condition and return an error if true. */
364 static uint16_t enabled_core_ids[RTE_MAX_LCORE];
365 static unsigned int num_cores;
367 /* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */
368 static inline uint32_t
369 alloc_thread_id(void)
373 tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
374 if (tmp_thr_id >= RTE_MAX_LCORE)
375 printf("Invalid thread id %u\n", tmp_thr_id);
381 * Reader thread using rte_lpm data structure without RCU.
384 test_lpm_reader(void *arg)
387 uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
388 uint32_t next_hop_return = 0;
392 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
393 ip_batch[i] = rte_rand();
395 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
396 rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
398 } while (!writer_done);
404 * Reader thread using rte_lpm data structure with RCU.
407 test_lpm_rcu_qsbr_reader(void *arg)
410 uint32_t thread_id = alloc_thread_id();
411 uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
412 uint32_t next_hop_return = 0;
415 /* Register this thread to report quiescent state */
416 rte_rcu_qsbr_thread_register(rv, thread_id);
417 rte_rcu_qsbr_thread_online(rv, thread_id);
420 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
421 ip_batch[i] = rte_rand();
423 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
424 rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
426 /* Update quiescent state */
427 rte_rcu_qsbr_quiescent(rv, thread_id);
428 } while (!writer_done);
430 rte_rcu_qsbr_thread_offline(rv, thread_id);
431 rte_rcu_qsbr_thread_unregister(rv, thread_id);
437 * Writer thread using rte_lpm data structure with RCU.
440 test_lpm_rcu_qsbr_writer(void *arg)
442 unsigned int i, j, si, ei;
443 uint64_t begin, total_cycles;
444 uint32_t next_hop_add = 0xAA;
445 uint8_t pos_core = (uint8_t)((uintptr_t)arg);
447 si = (pos_core * NUM_LDEPTH_ROUTE_ENTRIES) / num_writers;
448 ei = ((pos_core + 1) * NUM_LDEPTH_ROUTE_ENTRIES) / num_writers;
450 /* Measure add/delete. */
451 begin = rte_rdtsc_precise();
452 for (i = 0; i < RCU_ITERATIONS; i++) {
453 /* Add all the entries */
454 for (j = si; j < ei; j++) {
456 pthread_mutex_lock(&lpm_mutex);
457 if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
458 large_ldepth_route_table[j].depth,
459 next_hop_add) != 0) {
460 printf("Failed to add iteration %d, route# %d\n",
465 pthread_mutex_unlock(&lpm_mutex);
468 /* Delete all the entries */
469 for (j = si; j < ei; j++) {
471 pthread_mutex_lock(&lpm_mutex);
472 if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
473 large_ldepth_route_table[j].depth) != 0) {
474 printf("Failed to delete iteration %d, route# %d\n",
479 pthread_mutex_unlock(&lpm_mutex);
483 total_cycles = rte_rdtsc_precise() - begin;
485 __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
491 pthread_mutex_unlock(&lpm_mutex);
497 * 1/2 writers, rest are readers
500 test_lpm_rcu_perf_multi_writer(uint8_t use_rcu)
502 struct rte_lpm_config config;
506 struct rte_lpm_rcu_config rcu_cfg = {0};
507 int (*reader_f)(void *arg) = NULL;
509 if (rte_lcore_count() < 3) {
510 printf("Not enough cores for lpm_rcu_perf_autotest, expecting at least 3\n");
515 RTE_LCORE_FOREACH_WORKER(core_id) {
516 enabled_core_ids[num_cores] = core_id;
520 for (j = 1; j < 3; j++) {
522 printf("\nPerf test: %d writer(s), %d reader(s),"
523 " RCU integration enabled\n", j, num_cores - j);
525 printf("\nPerf test: %d writer(s), %d reader(s),"
526 " RCU integration disabled\n", j, num_cores - j);
530 /* Create LPM table */
531 config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
532 config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
534 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
535 TEST_LPM_ASSERT(lpm != NULL);
537 /* Init RCU variable */
539 sz = rte_rcu_qsbr_get_memsize(num_cores);
540 rv = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
541 RTE_CACHE_LINE_SIZE);
542 rte_rcu_qsbr_init(rv, num_cores);
545 /* Assign the RCU variable to LPM */
546 if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg) != 0) {
547 printf("RCU variable assignment failed\n");
551 reader_f = test_lpm_rcu_qsbr_reader;
553 reader_f = test_lpm_reader;
556 __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
558 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
560 /* Launch reader threads */
561 for (i = j; i < num_cores; i++)
562 rte_eal_remote_launch(reader_f, NULL,
563 enabled_core_ids[i]);
565 /* Launch writer threads */
566 for (i = 0; i < j; i++)
567 rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,
568 (void *)(uintptr_t)i,
569 enabled_core_ids[i]);
571 /* Wait for writer threads */
572 for (i = 0; i < j; i++)
573 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
576 printf("Total LPM Adds: %d\n", TOTAL_WRITES);
577 printf("Total LPM Deletes: %d\n", TOTAL_WRITES);
578 printf("Average LPM Add/Del: %"PRIu64" cycles\n",
579 __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED)
583 /* Wait until all readers have exited */
584 for (i = j; i < num_cores; i++)
585 rte_eal_wait_lcore(enabled_core_ids[i]);
597 /* Wait until all readers have exited */
598 rte_eal_mp_wait_lcore();
609 struct rte_lpm_config config;
611 config.max_rules = 2000000;
612 config.number_tbl8s = 2048;
614 uint64_t begin, total_time, lpm_used_entries = 0;
616 uint32_t next_hop_add = 0xAA, next_hop_return = 0;
618 uint64_t cache_line_counter = 0;
621 rte_srand(rte_rdtsc());
623 generate_large_route_rule_table();
625 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
627 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
629 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
630 TEST_LPM_ASSERT(lpm != NULL);
635 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
636 if (rte_lpm_add(lpm, large_route_table[i].ip,
637 large_route_table[i].depth, next_hop_add) == 0)
641 total_time = rte_rdtsc() - begin;
643 printf("Unique added entries = %d\n", status);
644 /* Obtain add statistics. */
645 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
646 if (lpm->tbl24[i].valid)
650 if ((uint64_t)count < lpm_used_entries) {
651 cache_line_counter++;
652 count = lpm_used_entries;
657 printf("Used table 24 entries = %u (%g%%)\n",
658 (unsigned) lpm_used_entries,
659 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
660 printf("64 byte Cache entries used = %u (%u bytes)\n",
661 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
663 printf("Average LPM Add: %g cycles\n",
664 (double)total_time / NUM_ROUTE_ENTRIES);
666 /* Measure single Lookup */
670 for (i = 0; i < ITERATIONS; i++) {
671 static uint32_t ip_batch[BATCH_SIZE];
673 for (j = 0; j < BATCH_SIZE; j++)
674 ip_batch[j] = rte_rand();
676 /* Lookup per batch */
679 for (j = 0; j < BATCH_SIZE; j++) {
680 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
684 total_time += rte_rdtsc() - begin;
687 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
688 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
689 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
691 /* Measure bulk Lookup */
694 for (i = 0; i < ITERATIONS; i++) {
695 static uint32_t ip_batch[BATCH_SIZE];
696 uint32_t next_hops[BULK_SIZE];
698 /* Create array of random IP addresses */
699 for (j = 0; j < BATCH_SIZE; j++)
700 ip_batch[j] = rte_rand();
702 /* Lookup per batch */
704 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
706 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
707 for (k = 0; k < BULK_SIZE; k++)
708 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
712 total_time += rte_rdtsc() - begin;
714 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
715 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
716 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
718 /* Measure LookupX4 */
721 for (i = 0; i < ITERATIONS; i++) {
722 static uint32_t ip_batch[BATCH_SIZE];
723 uint32_t next_hops[4];
725 /* Create array of random IP addresses */
726 for (j = 0; j < BATCH_SIZE; j++)
727 ip_batch[j] = rte_rand();
729 /* Lookup per batch */
731 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
735 ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
736 ipx4 = *(xmm_t *)(ip_batch + j);
737 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
738 for (k = 0; k < RTE_DIM(next_hops); k++)
739 if (unlikely(next_hops[k] == UINT32_MAX))
743 total_time += rte_rdtsc() - begin;
745 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
746 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
747 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
753 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
754 /* rte_lpm_delete(lpm, ip, depth) */
755 status += rte_lpm_delete(lpm, large_route_table[i].ip,
756 large_route_table[i].depth);
759 total_time = rte_rdtsc() - begin;
761 printf("Average LPM Delete: %g cycles\n",
762 (double)total_time / NUM_ROUTE_ENTRIES);
764 rte_lpm_delete_all(lpm);
767 if (test_lpm_rcu_perf_multi_writer(0) < 0)
770 if (test_lpm_rcu_perf_multi_writer(1) < 0)
776 #endif /* !RTE_EXEC_ENV_WINDOWS */
778 REGISTER_TEST_COMMAND(lpm_perf_autotest, test_lpm_perf);