1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
11 #include <rte_cycles.h>
12 #include <rte_random.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_malloc.h>
19 #include "test_xmmt_ops.h"
22 static struct rte_rcu_qsbr *rv;
23 static volatile uint8_t writer_done;
24 static volatile uint32_t thr_id;
25 static uint64_t gwrite_cycles;
26 static uint64_t gwrites;
27 /* LPM APIs are not thread safe, use mutex to provide thread safety */
28 static pthread_mutex_t lpm_mutex = PTHREAD_MUTEX_INITIALIZER;
30 /* Report quiescent state interval every 1024 lookups. Larger critical
31 * sections in reader will result in writer polling multiple times.
33 #define QSBR_REPORTING_INTERVAL 1024
35 #define TEST_LPM_ASSERT(cond) do { \
37 printf("Error at line %d: \n", __LINE__); \
42 #define ITERATIONS (1 << 10)
43 #define RCU_ITERATIONS 10
44 #define BATCH_SIZE (1 << 12)
47 #define MAX_RULE_NUM (1200000)
54 static struct route_rule large_route_table[MAX_RULE_NUM];
55 /* Route table for routes with depth > 24 */
56 struct route_rule large_ldepth_route_table[MAX_RULE_NUM];
58 static uint32_t num_route_entries;
59 static uint32_t num_ldepth_route_entries;
60 #define NUM_ROUTE_ENTRIES num_route_entries
61 #define NUM_LDEPTH_ROUTE_ENTRIES num_ldepth_route_entries
69 /* struct route_rule_count defines the total number of rules in following a/b/c
70 * each item in a[]/b[]/c[] is the number of common IP address class A/B/C, not
71 * including the ones for private local network.
73 struct route_rule_count {
74 uint32_t a[RTE_LPM_MAX_DEPTH];
75 uint32_t b[RTE_LPM_MAX_DEPTH];
76 uint32_t c[RTE_LPM_MAX_DEPTH];
79 /* All following numbers of each depth of each common IP class are just
80 * got from previous large constant table in app/test/test_lpm_routes.h .
81 * In order to match similar performance, they keep same depth and IP
82 * address coverage as previous constant table. These numbers don't
83 * include any private local IP address. As previous large const rule
84 * table was just dumped from a real router, there are no any IP address
87 static struct route_rule_count rule_count = {
88 .a = { /* IP class A in which the most significant bit is 0 */
100 144, /* depth = 12 */
101 233, /* depth = 13 */
102 528, /* depth = 14 */
103 866, /* depth = 15 */
104 3856, /* depth = 16 */
105 3268, /* depth = 17 */
106 5662, /* depth = 18 */
107 17301, /* depth = 19 */
108 22226, /* depth = 20 */
109 11147, /* depth = 21 */
110 16746, /* depth = 22 */
111 17120, /* depth = 23 */
112 77578, /* depth = 24 */
113 401, /* depth = 25 */
114 656, /* depth = 26 */
115 1107, /* depth = 27 */
116 1121, /* depth = 28 */
117 2316, /* depth = 29 */
118 717, /* depth = 30 */
122 .b = { /* IP class A in which the most 2 significant bits are 10 */
134 168, /* depth = 12 */
135 305, /* depth = 13 */
136 569, /* depth = 14 */
137 1129, /* depth = 15 */
138 50800, /* depth = 16 */
139 1645, /* depth = 17 */
140 1820, /* depth = 18 */
141 3506, /* depth = 19 */
142 3258, /* depth = 20 */
143 3424, /* depth = 21 */
144 4971, /* depth = 22 */
145 6885, /* depth = 23 */
146 39771, /* depth = 24 */
147 424, /* depth = 25 */
148 170, /* depth = 26 */
149 433, /* depth = 27 */
151 366, /* depth = 29 */
152 377, /* depth = 30 */
156 .c = { /* IP class A in which the most 3 significant bits are 110 */
169 237, /* depth = 13 */
170 1007, /* depth = 14 */
171 1717, /* depth = 15 */
172 14663, /* depth = 16 */
173 8070, /* depth = 17 */
174 16185, /* depth = 18 */
175 48261, /* depth = 19 */
176 36870, /* depth = 20 */
177 33960, /* depth = 21 */
178 50638, /* depth = 22 */
179 61422, /* depth = 23 */
180 466549, /* depth = 24 */
181 1829, /* depth = 25 */
182 4824, /* depth = 26 */
183 4927, /* depth = 27 */
184 5914, /* depth = 28 */
185 10254, /* depth = 29 */
186 4905, /* depth = 30 */
192 static void generate_random_rule_prefix(uint32_t ip_class, uint8_t depth)
194 /* IP address class A, the most significant bit is 0 */
195 #define IP_HEAD_MASK_A 0x00000000
196 #define IP_HEAD_BIT_NUM_A 1
198 /* IP address class B, the most significant 2 bits are 10 */
199 #define IP_HEAD_MASK_B 0x80000000
200 #define IP_HEAD_BIT_NUM_B 2
202 /* IP address class C, the most significant 3 bits are 110 */
203 #define IP_HEAD_MASK_C 0xC0000000
204 #define IP_HEAD_BIT_NUM_C 3
206 uint32_t class_depth;
211 uint32_t fixed_bit_num;
212 uint32_t ip_head_mask;
215 struct route_rule *ptr_rule, *ptr_ldepth_rule;
217 if (ip_class == IP_CLASS_A) { /* IP Address class A */
218 fixed_bit_num = IP_HEAD_BIT_NUM_A;
219 ip_head_mask = IP_HEAD_MASK_A;
220 rule_num = rule_count.a[depth - 1];
221 } else if (ip_class == IP_CLASS_B) { /* IP Address class B */
222 fixed_bit_num = IP_HEAD_BIT_NUM_B;
223 ip_head_mask = IP_HEAD_MASK_B;
224 rule_num = rule_count.b[depth - 1];
225 } else { /* IP Address class C */
226 fixed_bit_num = IP_HEAD_BIT_NUM_C;
227 ip_head_mask = IP_HEAD_MASK_C;
228 rule_num = rule_count.c[depth - 1];
234 /* the number of rest bits which don't include the most significant
235 * fixed bits for this IP address class
237 class_depth = depth - fixed_bit_num;
239 /* range is the maximum number of rules for this depth and
240 * this IP address class
242 range = 1 << class_depth;
244 /* only mask the most depth significant generated bits
245 * except fixed bits for IP address class
249 /* Widen coverage of IP address in generated rules */
250 if (range <= rule_num)
253 step = round((double)range / rule_num);
255 /* Only generate rest bits except the most significant
256 * fixed bits for IP address class
258 start = lrand48() & mask;
259 ptr_rule = &large_route_table[num_route_entries];
260 ptr_ldepth_rule = &large_ldepth_route_table[num_ldepth_route_entries];
261 for (k = 0; k < rule_num; k++) {
262 ptr_rule->ip = (start << (RTE_LPM_MAX_DEPTH - depth))
264 ptr_rule->depth = depth;
265 /* If the depth of the route is more than 24, store it
266 * in another table as well.
269 ptr_ldepth_rule->ip = ptr_rule->ip;
270 ptr_ldepth_rule->depth = ptr_rule->depth;
272 num_ldepth_route_entries++;
275 start = (start + step) & mask;
277 num_route_entries += rule_num;
280 static void insert_rule_in_random_pos(uint32_t ip, uint8_t depth)
284 struct route_rule tmp;
289 } while ((try_count < 10) && (pos > num_route_entries));
291 if ((pos > num_route_entries) || (pos >= MAX_RULE_NUM))
292 pos = num_route_entries >> 1;
294 tmp = large_route_table[pos];
295 large_route_table[pos].ip = ip;
296 large_route_table[pos].depth = depth;
297 if (num_route_entries < MAX_RULE_NUM)
298 large_route_table[num_route_entries++] = tmp;
301 static void generate_large_route_rule_table(void)
306 num_route_entries = 0;
307 num_ldepth_route_entries = 0;
308 memset(large_route_table, 0, sizeof(large_route_table));
310 for (ip_class = IP_CLASS_A; ip_class <= IP_CLASS_C; ip_class++) {
311 for (depth = 1; depth <= RTE_LPM_MAX_DEPTH; depth++) {
312 generate_random_rule_prefix(ip_class, depth);
316 /* Add following rules to keep same as previous large constant table,
317 * they are 4 rules with private local IP address and 1 all-zeros prefix
320 insert_rule_in_random_pos(RTE_IPV4(0, 0, 0, 0), 8);
321 insert_rule_in_random_pos(RTE_IPV4(10, 2, 23, 147), 32);
322 insert_rule_in_random_pos(RTE_IPV4(192, 168, 100, 10), 24);
323 insert_rule_in_random_pos(RTE_IPV4(192, 168, 25, 100), 24);
324 insert_rule_in_random_pos(RTE_IPV4(192, 168, 129, 124), 32);
328 print_route_distribution(const struct route_rule *table, uint32_t n)
332 printf("Route distribution per prefix width: \n");
333 printf("DEPTH QUANTITY (PERCENT)\n");
334 printf("--------------------------- \n");
337 for (i = 1; i <= 32; i++) {
338 unsigned depth_counter = 0;
341 for (j = 0; j < n; j++)
342 if (table[j].depth == (uint8_t) i)
345 percent_hits = ((double)depth_counter)/((double)n) * 100;
346 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
351 /* Check condition and return an error if true. */
352 static uint16_t enabled_core_ids[RTE_MAX_LCORE];
353 static unsigned int num_cores;
355 /* Simple way to allocate thread ids in 0 to RTE_MAX_LCORE space */
356 static inline uint32_t
357 alloc_thread_id(void)
361 tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
362 if (tmp_thr_id >= RTE_MAX_LCORE)
363 printf("Invalid thread id %u\n", tmp_thr_id);
369 * Reader thread using rte_lpm data structure without RCU.
372 test_lpm_reader(void *arg)
375 uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
376 uint32_t next_hop_return = 0;
380 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
381 ip_batch[i] = rte_rand();
383 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
384 rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
386 } while (!writer_done);
392 * Reader thread using rte_lpm data structure with RCU.
395 test_lpm_rcu_qsbr_reader(void *arg)
398 uint32_t thread_id = alloc_thread_id();
399 uint32_t ip_batch[QSBR_REPORTING_INTERVAL];
400 uint32_t next_hop_return = 0;
403 /* Register this thread to report quiescent state */
404 rte_rcu_qsbr_thread_register(rv, thread_id);
405 rte_rcu_qsbr_thread_online(rv, thread_id);
408 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
409 ip_batch[i] = rte_rand();
411 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
412 rte_lpm_lookup(lpm, ip_batch[i], &next_hop_return);
414 /* Update quiescent state */
415 rte_rcu_qsbr_quiescent(rv, thread_id);
416 } while (!writer_done);
418 rte_rcu_qsbr_thread_offline(rv, thread_id);
419 rte_rcu_qsbr_thread_unregister(rv, thread_id);
425 * Writer thread using rte_lpm data structure with RCU.
428 test_lpm_rcu_qsbr_writer(void *arg)
430 unsigned int i, j, si, ei;
431 uint64_t begin, total_cycles;
432 uint8_t core_id = (uint8_t)((uintptr_t)arg);
433 uint32_t next_hop_add = 0xAA;
436 /* 2 writer threads are used */
437 if (core_id % 2 == 0) {
439 ei = NUM_LDEPTH_ROUTE_ENTRIES / 2;
441 si = NUM_LDEPTH_ROUTE_ENTRIES / 2;
442 ei = NUM_LDEPTH_ROUTE_ENTRIES;
445 /* Measure add/delete. */
446 begin = rte_rdtsc_precise();
447 for (i = 0; i < RCU_ITERATIONS; i++) {
448 /* Add all the entries */
449 for (j = si; j < ei; j++) {
450 pthread_mutex_lock(&lpm_mutex);
451 if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
452 large_ldepth_route_table[j].depth,
453 next_hop_add) != 0) {
454 printf("Failed to add iteration %d, route# %d\n",
457 pthread_mutex_unlock(&lpm_mutex);
460 /* Delete all the entries */
461 for (j = si; j < ei; j++) {
462 pthread_mutex_lock(&lpm_mutex);
463 if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
464 large_ldepth_route_table[j].depth) != 0) {
465 printf("Failed to delete iteration %d, route# %d\n",
468 pthread_mutex_unlock(&lpm_mutex);
472 total_cycles = rte_rdtsc_precise() - begin;
474 __atomic_fetch_add(&gwrite_cycles, total_cycles, __ATOMIC_RELAXED);
475 __atomic_fetch_add(&gwrites,
476 2 * NUM_LDEPTH_ROUTE_ENTRIES * RCU_ITERATIONS,
484 * 2 writers, rest are readers
487 test_lpm_rcu_perf_multi_writer(void)
489 struct rte_lpm_config config;
493 struct rte_lpm_rcu_config rcu_cfg = {0};
495 if (rte_lcore_count() < 3) {
496 printf("Not enough cores for lpm_rcu_perf_autotest, expecting at least 3\n");
501 RTE_LCORE_FOREACH_SLAVE(core_id) {
502 enabled_core_ids[num_cores] = core_id;
506 printf("\nPerf test: 2 writers, %d readers, RCU integration enabled\n",
509 /* Create LPM table */
510 config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
511 config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
513 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
514 TEST_LPM_ASSERT(lpm != NULL);
516 /* Init RCU variable */
517 sz = rte_rcu_qsbr_get_memsize(num_cores);
518 rv = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
519 RTE_CACHE_LINE_SIZE);
520 rte_rcu_qsbr_init(rv, num_cores);
523 /* Assign the RCU variable to LPM */
524 if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg) != 0) {
525 printf("RCU variable assignment failed\n");
530 __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
531 __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
533 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
535 /* Launch reader threads */
536 for (i = 2; i < num_cores; i++)
537 rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,
538 enabled_core_ids[i]);
540 /* Launch writer threads */
541 for (i = 0; i < 2; i++)
542 rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,
543 (void *)(uintptr_t)i,
544 enabled_core_ids[i]);
546 /* Wait for writer threads */
547 for (i = 0; i < 2; i++)
548 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
551 printf("Total LPM Adds: %d\n",
552 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
553 printf("Total LPM Deletes: %d\n",
554 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
555 printf("Average LPM Add/Del: %"PRIu64" cycles\n",
556 __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
557 __atomic_load_n(&gwrites, __ATOMIC_RELAXED)
560 /* Wait and check return value from reader threads */
562 for (i = 2; i < num_cores; i++)
563 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
571 /* Test without RCU integration */
572 printf("\nPerf test: 2 writers, %d readers, RCU integration disabled\n",
575 /* Create LPM table */
576 config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
577 config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
579 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
580 TEST_LPM_ASSERT(lpm != NULL);
583 __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
584 __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
585 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
587 /* Launch reader threads */
588 for (i = 2; i < num_cores; i++)
589 rte_eal_remote_launch(test_lpm_reader, NULL,
590 enabled_core_ids[i]);
592 /* Launch writer threads */
593 for (i = 0; i < 2; i++)
594 rte_eal_remote_launch(test_lpm_rcu_qsbr_writer,
595 (void *)(uintptr_t)i,
596 enabled_core_ids[i]);
598 /* Wait for writer threads */
599 for (i = 0; i < 2; i++)
600 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
603 printf("Total LPM Adds: %d\n",
604 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
605 printf("Total LPM Deletes: %d\n",
606 2 * ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
607 printf("Average LPM Add/Del: %"PRIu64" cycles\n",
608 __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
609 __atomic_load_n(&gwrites, __ATOMIC_RELAXED)
613 /* Wait and check return value from reader threads */
614 for (i = 2; i < num_cores; i++)
615 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
624 /* Wait until all readers have exited */
625 rte_eal_mp_wait_lcore();
635 * Single writer, rest are readers
638 test_lpm_rcu_perf(void)
640 struct rte_lpm_config config;
641 uint64_t begin, total_cycles;
645 uint32_t next_hop_add = 0xAA;
646 struct rte_lpm_rcu_config rcu_cfg = {0};
648 if (rte_lcore_count() < 2) {
649 printf("Not enough cores for lpm_rcu_perf_autotest, expecting at least 2\n");
654 RTE_LCORE_FOREACH_SLAVE(core_id) {
655 enabled_core_ids[num_cores] = core_id;
659 printf("\nPerf test: 1 writer, %d readers, RCU integration enabled\n",
662 /* Create LPM table */
663 config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
664 config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
666 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
667 TEST_LPM_ASSERT(lpm != NULL);
669 /* Init RCU variable */
670 sz = rte_rcu_qsbr_get_memsize(num_cores);
671 rv = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
672 RTE_CACHE_LINE_SIZE);
673 rte_rcu_qsbr_init(rv, num_cores);
676 /* Assign the RCU variable to LPM */
677 if (rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg) != 0) {
678 printf("RCU variable assignment failed\n");
683 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
685 /* Launch reader threads */
686 for (i = 0; i < num_cores; i++)
687 rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,
688 enabled_core_ids[i]);
690 /* Measure add/delete. */
691 begin = rte_rdtsc_precise();
692 for (i = 0; i < RCU_ITERATIONS; i++) {
693 /* Add all the entries */
694 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
695 if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
696 large_ldepth_route_table[j].depth,
697 next_hop_add) != 0) {
698 printf("Failed to add iteration %d, route# %d\n",
703 /* Delete all the entries */
704 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
705 if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
706 large_ldepth_route_table[j].depth) != 0) {
707 printf("Failed to delete iteration %d, route# %d\n",
712 total_cycles = rte_rdtsc_precise() - begin;
714 printf("Total LPM Adds: %d\n", ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
715 printf("Total LPM Deletes: %d\n",
716 ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
717 printf("Average LPM Add/Del: %g cycles\n",
718 (double)total_cycles / (NUM_LDEPTH_ROUTE_ENTRIES * ITERATIONS));
721 /* Wait and check return value from reader threads */
722 for (i = 0; i < num_cores; i++)
723 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
731 /* Test without RCU integration */
732 printf("\nPerf test: 1 writer, %d readers, RCU integration disabled\n",
735 /* Create LPM table */
736 config.max_rules = NUM_LDEPTH_ROUTE_ENTRIES;
737 config.number_tbl8s = NUM_LDEPTH_ROUTE_ENTRIES;
739 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
740 TEST_LPM_ASSERT(lpm != NULL);
743 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
745 /* Launch reader threads */
746 for (i = 0; i < num_cores; i++)
747 rte_eal_remote_launch(test_lpm_reader, NULL,
748 enabled_core_ids[i]);
750 /* Measure add/delete. */
751 begin = rte_rdtsc_precise();
752 for (i = 0; i < RCU_ITERATIONS; i++) {
753 /* Add all the entries */
754 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
755 if (rte_lpm_add(lpm, large_ldepth_route_table[j].ip,
756 large_ldepth_route_table[j].depth,
757 next_hop_add) != 0) {
758 printf("Failed to add iteration %d, route# %d\n",
763 /* Delete all the entries */
764 for (j = 0; j < NUM_LDEPTH_ROUTE_ENTRIES; j++)
765 if (rte_lpm_delete(lpm, large_ldepth_route_table[j].ip,
766 large_ldepth_route_table[j].depth) != 0) {
767 printf("Failed to delete iteration %d, route# %d\n",
772 total_cycles = rte_rdtsc_precise() - begin;
774 printf("Total LPM Adds: %d\n", ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
775 printf("Total LPM Deletes: %d\n",
776 ITERATIONS * NUM_LDEPTH_ROUTE_ENTRIES);
777 printf("Average LPM Add/Del: %g cycles\n",
778 (double)total_cycles / (NUM_LDEPTH_ROUTE_ENTRIES * ITERATIONS));
781 /* Wait and check return value from reader threads */
782 for (i = 0; i < num_cores; i++)
783 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
784 printf("Warning: lcore %u not finished.\n",
785 enabled_core_ids[i]);
793 /* Wait until all readers have exited */
794 rte_eal_mp_wait_lcore();
805 struct rte_lpm_config config;
807 config.max_rules = 2000000;
808 config.number_tbl8s = 2048;
810 uint64_t begin, total_time, lpm_used_entries = 0;
812 uint32_t next_hop_add = 0xAA, next_hop_return = 0;
814 uint64_t cache_line_counter = 0;
817 rte_srand(rte_rdtsc());
819 generate_large_route_rule_table();
821 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
823 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
825 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
826 TEST_LPM_ASSERT(lpm != NULL);
831 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
832 if (rte_lpm_add(lpm, large_route_table[i].ip,
833 large_route_table[i].depth, next_hop_add) == 0)
837 total_time = rte_rdtsc() - begin;
839 printf("Unique added entries = %d\n", status);
840 /* Obtain add statistics. */
841 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
842 if (lpm->tbl24[i].valid)
846 if ((uint64_t)count < lpm_used_entries) {
847 cache_line_counter++;
848 count = lpm_used_entries;
853 printf("Used table 24 entries = %u (%g%%)\n",
854 (unsigned) lpm_used_entries,
855 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
856 printf("64 byte Cache entries used = %u (%u bytes)\n",
857 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
859 printf("Average LPM Add: %g cycles\n",
860 (double)total_time / NUM_ROUTE_ENTRIES);
862 /* Measure single Lookup */
866 for (i = 0; i < ITERATIONS; i++) {
867 static uint32_t ip_batch[BATCH_SIZE];
869 for (j = 0; j < BATCH_SIZE; j++)
870 ip_batch[j] = rte_rand();
872 /* Lookup per batch */
875 for (j = 0; j < BATCH_SIZE; j++) {
876 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
880 total_time += rte_rdtsc() - begin;
883 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
884 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
885 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
887 /* Measure bulk Lookup */
890 for (i = 0; i < ITERATIONS; i++) {
891 static uint32_t ip_batch[BATCH_SIZE];
892 uint32_t next_hops[BULK_SIZE];
894 /* Create array of random IP addresses */
895 for (j = 0; j < BATCH_SIZE; j++)
896 ip_batch[j] = rte_rand();
898 /* Lookup per batch */
900 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
902 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
903 for (k = 0; k < BULK_SIZE; k++)
904 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
908 total_time += rte_rdtsc() - begin;
910 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
911 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
912 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
914 /* Measure LookupX4 */
917 for (i = 0; i < ITERATIONS; i++) {
918 static uint32_t ip_batch[BATCH_SIZE];
919 uint32_t next_hops[4];
921 /* Create array of random IP addresses */
922 for (j = 0; j < BATCH_SIZE; j++)
923 ip_batch[j] = rte_rand();
925 /* Lookup per batch */
927 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
931 ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
932 ipx4 = *(xmm_t *)(ip_batch + j);
933 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
934 for (k = 0; k < RTE_DIM(next_hops); k++)
935 if (unlikely(next_hops[k] == UINT32_MAX))
939 total_time += rte_rdtsc() - begin;
941 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
942 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
943 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
949 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
950 /* rte_lpm_delete(lpm, ip, depth) */
951 status += rte_lpm_delete(lpm, large_route_table[i].ip,
952 large_route_table[i].depth);
955 total_time = rte_rdtsc() - begin;
957 printf("Average LPM Delete: %g cycles\n",
958 (double)total_time / NUM_ROUTE_ENTRIES);
960 rte_lpm_delete_all(lpm);
965 test_lpm_rcu_perf_multi_writer();
970 REGISTER_TEST_COMMAND(lpm_perf_autotest, test_lpm_perf);