app/test: rework command registration
[dpdk.git] / app / test / test_lpm_perf.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37
38 #include <rte_cycles.h>
39 #include <rte_random.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_lpm.h>
42
43 #include "test.h"
44 #include "test_lpm_routes.h"
45 #include "test_xmmt_ops.h"
46
47 #define TEST_LPM_ASSERT(cond) do {                                            \
48         if (!(cond)) {                                                        \
49                 printf("Error at line %d: \n", __LINE__);                     \
50                 return -1;                                                    \
51         }                                                                     \
52 } while(0)
53
54 #define ITERATIONS (1 << 10)
55 #define BATCH_SIZE (1 << 12)
56 #define BULK_SIZE 32
57
58 static void
59 print_route_distribution(const struct route_rule *table, uint32_t n)
60 {
61         unsigned i, j;
62
63         printf("Route distribution per prefix width: \n");
64         printf("DEPTH    QUANTITY (PERCENT)\n");
65         printf("--------------------------- \n");
66
67         /* Count depths. */
68         for (i = 1; i <= 32; i++) {
69                 unsigned depth_counter = 0;
70                 double percent_hits;
71
72                 for (j = 0; j < n; j++)
73                         if (table[j].depth == (uint8_t) i)
74                                 depth_counter++;
75
76                 percent_hits = ((double)depth_counter)/((double)n) * 100;
77                 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
78         }
79         printf("\n");
80 }
81
82 static int
83 test_lpm_perf(void)
84 {
85         struct rte_lpm *lpm = NULL;
86         struct rte_lpm_config config;
87
88         config.max_rules = 1000000;
89         config.number_tbl8s = 256;
90         config.flags = 0;
91         uint64_t begin, total_time, lpm_used_entries = 0;
92         unsigned i, j;
93         uint32_t next_hop_add = 0xAA, next_hop_return = 0;
94         int status = 0;
95         uint64_t cache_line_counter = 0;
96         int64_t count = 0;
97
98         rte_srand(rte_rdtsc());
99
100         printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
101
102         print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
103
104         lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
105         TEST_LPM_ASSERT(lpm != NULL);
106
107         /* Measue add. */
108         begin = rte_rdtsc();
109
110         for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
111                 if (rte_lpm_add(lpm, large_route_table[i].ip,
112                                 large_route_table[i].depth, next_hop_add) == 0)
113                         status++;
114         }
115         /* End Timer. */
116         total_time = rte_rdtsc() - begin;
117
118         printf("Unique added entries = %d\n", status);
119         /* Obtain add statistics. */
120         for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
121                 if (lpm->tbl24[i].valid)
122                         lpm_used_entries++;
123
124                 if (i % 32 == 0) {
125                         if ((uint64_t)count < lpm_used_entries) {
126                                 cache_line_counter++;
127                                 count = lpm_used_entries;
128                         }
129                 }
130         }
131
132         printf("Used table 24 entries = %u (%g%%)\n",
133                         (unsigned) lpm_used_entries,
134                         (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
135         printf("64 byte Cache entries used = %u (%u bytes)\n",
136                         (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
137
138         printf("Average LPM Add: %g cycles\n",
139                         (double)total_time / NUM_ROUTE_ENTRIES);
140
141         /* Measure single Lookup */
142         total_time = 0;
143         count = 0;
144
145         for (i = 0; i < ITERATIONS; i++) {
146                 static uint32_t ip_batch[BATCH_SIZE];
147
148                 for (j = 0; j < BATCH_SIZE; j++)
149                         ip_batch[j] = rte_rand();
150
151                 /* Lookup per batch */
152                 begin = rte_rdtsc();
153
154                 for (j = 0; j < BATCH_SIZE; j++) {
155                         if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
156                                 count++;
157                 }
158
159                 total_time += rte_rdtsc() - begin;
160
161         }
162         printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
163                         (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
164                         (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
165
166         /* Measure bulk Lookup */
167         total_time = 0;
168         count = 0;
169         for (i = 0; i < ITERATIONS; i++) {
170                 static uint32_t ip_batch[BATCH_SIZE];
171                 uint32_t next_hops[BULK_SIZE];
172
173                 /* Create array of random IP addresses */
174                 for (j = 0; j < BATCH_SIZE; j++)
175                         ip_batch[j] = rte_rand();
176
177                 /* Lookup per batch */
178                 begin = rte_rdtsc();
179                 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
180                         unsigned k;
181                         rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
182                         for (k = 0; k < BULK_SIZE; k++)
183                                 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
184                                         count++;
185                 }
186
187                 total_time += rte_rdtsc() - begin;
188         }
189         printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
190                         (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
191                         (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
192
193         /* Measure LookupX4 */
194         total_time = 0;
195         count = 0;
196         for (i = 0; i < ITERATIONS; i++) {
197                 static uint32_t ip_batch[BATCH_SIZE];
198                 uint32_t next_hops[4];
199
200                 /* Create array of random IP addresses */
201                 for (j = 0; j < BATCH_SIZE; j++)
202                         ip_batch[j] = rte_rand();
203
204                 /* Lookup per batch */
205                 begin = rte_rdtsc();
206                 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
207                         unsigned k;
208                         xmm_t ipx4;
209
210                         ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
211                         ipx4 = *(xmm_t *)(ip_batch + j);
212                         rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
213                         for (k = 0; k < RTE_DIM(next_hops); k++)
214                                 if (unlikely(next_hops[k] == UINT32_MAX))
215                                         count++;
216                 }
217
218                 total_time += rte_rdtsc() - begin;
219         }
220         printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
221                         (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
222                         (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
223
224         /* Delete */
225         status = 0;
226         begin = rte_rdtsc();
227
228         for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
229                 /* rte_lpm_delete(lpm, ip, depth) */
230                 status += rte_lpm_delete(lpm, large_route_table[i].ip,
231                                 large_route_table[i].depth);
232         }
233
234         total_time += rte_rdtsc() - begin;
235
236         printf("Average LPM Delete: %g cycles\n",
237                         (double)total_time / NUM_ROUTE_ENTRIES);
238
239         rte_lpm_delete_all(lpm);
240         rte_lpm_free(lpm);
241
242         return 0;
243 }
244
245 REGISTER_TEST_COMMAND(lpm_perf_autotest, test_lpm_perf);