SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_scaling.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6.c
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm6_perf.c
SRCS-y += test_debug.c
SRCS-y += test_errno.c
"Func" : default_autotest,
"Report" : None,
},
+ {
+ "Name" : "LPM6 autotest",
+ "Command" : "lpm6_autotest",
+ "Func" : default_autotest,
+ "Report" : None,
+ },
{
"Name" : "IVSHMEM autotest",
"Command" : "ivshmem_autotest",
},
]
},
-{
- "Prefix" : "lpm6",
- "Memory" : "512",
- "Tests" :
- [
- {
- "Name" : "LPM6 autotest",
- "Command" : "lpm6_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
{
"Prefix": "timer_perf",
"Memory" : per_sockets(512),
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_memory.h>
-#include <rte_random.h>
-#include <rte_branch_prediction.h>
-#include <rte_ip.h>
-#include <time.h>
+#include <rte_lpm.h>
#include "test.h"
-
-#include "rte_lpm.h"
#include "test_lpm_routes.h"
#include "test_xmmt_ops.h"
static int32_t test15(void);
static int32_t test16(void);
static int32_t test17(void);
-static int32_t perf_test(void);
rte_lpm_test tests[] = {
/* Test Cases */
test15,
test16,
test17,
- perf_test,
};
#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
}
/*
- * Lookup performance test
- */
-
-#define ITERATIONS (1 << 10)
-#define BATCH_SIZE (1 << 12)
-#define BULK_SIZE 32
-
-static void
-print_route_distribution(const struct route_rule *table, uint32_t n)
-{
- unsigned i, j;
-
- printf("Route distribution per prefix width: \n");
- printf("DEPTH QUANTITY (PERCENT)\n");
- printf("--------------------------- \n");
-
- /* Count depths. */
- for (i = 1; i <= 32; i++) {
- unsigned depth_counter = 0;
- double percent_hits;
-
- for (j = 0; j < n; j++)
- if (table[j].depth == (uint8_t) i)
- depth_counter++;
-
- percent_hits = ((double)depth_counter)/((double)n) * 100;
- printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
- }
- printf("\n");
-}
-
-int32_t
-perf_test(void)
-{
- struct rte_lpm *lpm = NULL;
- struct rte_lpm_config config;
-
- config.max_rules = 1000000;
- config.number_tbl8s = NUMBER_TBL8S;
- config.flags = 0;
- uint64_t begin, total_time, lpm_used_entries = 0;
- unsigned i, j;
- uint32_t next_hop_add = 0xAA, next_hop_return = 0;
- int status = 0;
- uint64_t cache_line_counter = 0;
- int64_t count = 0;
-
- rte_srand(rte_rdtsc());
-
- printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
-
- print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
-
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
- TEST_LPM_ASSERT(lpm != NULL);
-
- /* Measue add. */
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- if (rte_lpm_add(lpm, large_route_table[i].ip,
- large_route_table[i].depth, next_hop_add) == 0)
- status++;
- }
- /* End Timer. */
- total_time = rte_rdtsc() - begin;
-
- printf("Unique added entries = %d\n", status);
- /* Obtain add statistics. */
- for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
- if (lpm->tbl24[i].valid)
- lpm_used_entries++;
-
- if (i % 32 == 0) {
- if ((uint64_t)count < lpm_used_entries) {
- cache_line_counter++;
- count = lpm_used_entries;
- }
- }
- }
-
- printf("Used table 24 entries = %u (%g%%)\n",
- (unsigned) lpm_used_entries,
- (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
- printf("64 byte Cache entries used = %u (%u bytes)\n",
- (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
-
- printf("Average LPM Add: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- /* Measure single Lookup */
- total_time = 0;
- count = 0;
-
- for (i = 0; i < ITERATIONS; i++) {
- static uint32_t ip_batch[BATCH_SIZE];
-
- for (j = 0; j < BATCH_SIZE; j++)
- ip_batch[j] = rte_rand();
-
- /* Lookup per batch */
- begin = rte_rdtsc();
-
- for (j = 0; j < BATCH_SIZE; j++) {
- if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
-
- }
- printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Measure bulk Lookup */
- total_time = 0;
- count = 0;
- for (i = 0; i < ITERATIONS; i++) {
- static uint32_t ip_batch[BATCH_SIZE];
- uint32_t next_hops[BULK_SIZE];
-
- /* Create array of random IP addresses */
- for (j = 0; j < BATCH_SIZE; j++)
- ip_batch[j] = rte_rand();
-
- /* Lookup per batch */
- begin = rte_rdtsc();
- for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
- unsigned k;
- rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
- for (k = 0; k < BULK_SIZE; k++)
- if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
- }
- printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Measure LookupX4 */
- total_time = 0;
- count = 0;
- for (i = 0; i < ITERATIONS; i++) {
- static uint32_t ip_batch[BATCH_SIZE];
- uint32_t next_hops[4];
-
- /* Create array of random IP addresses */
- for (j = 0; j < BATCH_SIZE; j++)
- ip_batch[j] = rte_rand();
-
- /* Lookup per batch */
- begin = rte_rdtsc();
- for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
- unsigned k;
- xmm_t ipx4;
-
- ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
- ipx4 = *(xmm_t *)(ip_batch + j);
- rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
- for (k = 0; k < RTE_DIM(next_hops); k++)
- if (unlikely(next_hops[k] == UINT32_MAX))
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
- }
- printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Delete */
- status = 0;
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- /* rte_lpm_delete(lpm, ip, depth) */
- status += rte_lpm_delete(lpm, large_route_table[i].ip,
- large_route_table[i].depth);
- }
-
- total_time += rte_rdtsc() - begin;
-
- printf("Average LPM Delete: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- rte_lpm_delete_all(lpm);
- rte_lpm_free(lpm);
-
- return PASS;
-}
-
-/*
- * Do all unit and performance tests.
+ * Do all unit tests.
*/
static int
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <time.h>
-
-#include "test.h"
-#include <rte_common.h>
-#include <rte_cycles.h>
#include <rte_memory.h>
-#include <rte_random.h>
-#include <rte_branch_prediction.h>
-#include <rte_ip.h>
+#include <rte_lpm6.h>
-#include "rte_lpm6.h"
+#include "test.h"
#include "test_lpm6_routes.h"
#define TEST_LPM_ASSERT(cond) do { \
static int32_t test25(void);
static int32_t test26(void);
static int32_t test27(void);
-static int32_t perf_test(void);
rte_lpm6_test tests6[] = {
/* Test Cases */
test25,
test26,
test27,
- perf_test,
};
#define NUM_LPM6_TESTS (sizeof(tests6)/sizeof(tests6[0]))
}
/*
- * Lookup performance test
- */
-
-#define ITERATIONS (1 << 10)
-#define BATCH_SIZE 100000
-
-static void
-print_route_distribution(const struct rules_tbl_entry *table, uint32_t n)
-{
- unsigned i, j;
-
- printf("Route distribution per prefix width: \n");
- printf("DEPTH QUANTITY (PERCENT)\n");
- printf("--------------------------- \n");
-
- /* Count depths. */
- for(i = 1; i <= 128; i++) {
- unsigned depth_counter = 0;
- double percent_hits;
-
- for (j = 0; j < n; j++)
- if (table[j].depth == (uint8_t) i)
- depth_counter++;
-
- percent_hits = ((double)depth_counter)/((double)n) * 100;
- printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
- }
- printf("\n");
-}
-
-int32_t
-perf_test(void)
-{
- struct rte_lpm6 *lpm = NULL;
- struct rte_lpm6_config config;
- uint64_t begin, total_time;
- unsigned i, j;
- uint8_t next_hop_add = 0xAA, next_hop_return = 0;
- int status = 0;
- int64_t count = 0;
-
- config.max_rules = 1000000;
- config.number_tbl8s = NUMBER_TBL8S;
- config.flags = 0;
-
- rte_srand(rte_rdtsc());
-
- printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
-
- print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
-
- lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
- TEST_LPM_ASSERT(lpm != NULL);
-
- /* Measure add. */
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- if (rte_lpm6_add(lpm, large_route_table[i].ip,
- large_route_table[i].depth, next_hop_add) == 0)
- status++;
- }
- /* End Timer. */
- total_time = rte_rdtsc() - begin;
-
- printf("Unique added entries = %d\n", status);
- printf("Average LPM Add: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- /* Measure single Lookup */
- total_time = 0;
- count = 0;
-
- for (i = 0; i < ITERATIONS; i ++) {
- begin = rte_rdtsc();
-
- for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
- if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
- &next_hop_return) != 0)
- count++;
- }
-
- total_time += rte_rdtsc() - begin;
-
- }
- printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Measure bulk Lookup */
- total_time = 0;
- count = 0;
-
- uint8_t ip_batch[NUM_IPS_ENTRIES][16];
- int16_t next_hops[NUM_IPS_ENTRIES];
-
- for (i = 0; i < NUM_IPS_ENTRIES; i++)
- memcpy(ip_batch[i], large_ips_table[i].ip, 16);
-
- for (i = 0; i < ITERATIONS; i ++) {
-
- /* Lookup per batch */
- begin = rte_rdtsc();
- rte_lpm6_lookup_bulk_func(lpm, ip_batch, next_hops, NUM_IPS_ENTRIES);
- total_time += rte_rdtsc() - begin;
-
- for (j = 0; j < NUM_IPS_ENTRIES; j++)
- if (next_hops[j] < 0)
- count++;
- }
- printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
- (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
- (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
-
- /* Delete */
- status = 0;
- begin = rte_rdtsc();
-
- for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
- /* rte_lpm_delete(lpm, ip, depth) */
- status += rte_lpm6_delete(lpm, large_route_table[i].ip,
- large_route_table[i].depth);
- }
-
- total_time += rte_rdtsc() - begin;
-
- printf("Average LPM Delete: %g cycles\n",
- (double)total_time / NUM_ROUTE_ENTRIES);
-
- rte_lpm6_delete_all(lpm);
- rte_lpm6_free(lpm);
-
- return PASS;
-}
-
-/*
- * Do all unit and performance tests.
+ * Do all unit tests.
*/
static int
test_lpm6(void)
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_memory.h>
+#include <rte_lpm6.h>
+
+#include "test.h"
+#include "test_lpm6_routes.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d: \n", __LINE__); \
+ return -1; \
+ } \
+} while(0)
+
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE 100000
+#define NUMBER_TBL8S (1 << 16)
+
+static void
+print_route_distribution(const struct rules_tbl_entry *table, uint32_t n)
+{
+ unsigned i, j;
+
+ printf("Route distribution per prefix width: \n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("--------------------------- \n");
+
+ /* Count depths. */
+ for(i = 1; i <= 128; i++) {
+ unsigned depth_counter = 0;
+ double percent_hits;
+
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
+
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
+
+static int
+test_lpm6_perf(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint64_t begin, total_time;
+ unsigned i, j;
+ uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ int64_t count = 0;
+
+ config.max_rules = 1000000;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ rte_srand(rte_rdtsc());
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measure add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm6_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
+
+ printf("Unique added entries = %d\n", status);
+ printf("Average LPM Add: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i ++) {
+ begin = rte_rdtsc();
+
+ for (j = 0; j < NUM_IPS_ENTRIES; j ++) {
+ if (rte_lpm6_lookup(lpm, large_ips_table[j].ip,
+ &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+
+ uint8_t ip_batch[NUM_IPS_ENTRIES][16];
+ int16_t next_hops[NUM_IPS_ENTRIES];
+
+ for (i = 0; i < NUM_IPS_ENTRIES; i++)
+ memcpy(ip_batch[i], large_ips_table[i].ip, 16);
+
+ for (i = 0; i < ITERATIONS; i ++) {
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ rte_lpm6_lookup_bulk_func(lpm, ip_batch, next_hops, NUM_IPS_ENTRIES);
+ total_time += rte_rdtsc() - begin;
+
+ for (j = 0; j < NUM_IPS_ENTRIES; j++)
+ if (next_hops[j] < 0)
+ count++;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm6_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm6_delete_all(lpm);
+ rte_lpm6_free(lpm);
+
+ return 0;
+}
+
+static struct test_command lpm6_perf_cmd = {
+ .command = "lpm6_perf_autotest",
+ .callback = test_lpm6_perf,
+};
+REGISTER_TEST_COMMAND(lpm6_perf_cmd);
--- /dev/null
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_branch_prediction.h>
+#include <rte_lpm.h>
+
+#include "test.h"
+#include "test_lpm_routes.h"
+#include "test_xmmt_ops.h"
+
+#define TEST_LPM_ASSERT(cond) do { \
+ if (!(cond)) { \
+ printf("Error at line %d: \n", __LINE__); \
+ return -1; \
+ } \
+} while(0)
+
+#define ITERATIONS (1 << 10)
+#define BATCH_SIZE (1 << 12)
+#define BULK_SIZE 32
+
+static void
+print_route_distribution(const struct route_rule *table, uint32_t n)
+{
+ unsigned i, j;
+
+ printf("Route distribution per prefix width: \n");
+ printf("DEPTH QUANTITY (PERCENT)\n");
+ printf("--------------------------- \n");
+
+ /* Count depths. */
+ for (i = 1; i <= 32; i++) {
+ unsigned depth_counter = 0;
+ double percent_hits;
+
+ for (j = 0; j < n; j++)
+ if (table[j].depth == (uint8_t) i)
+ depth_counter++;
+
+ percent_hits = ((double)depth_counter)/((double)n) * 100;
+ printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
+ }
+ printf("\n");
+}
+
+static int
+test_lpm_perf(void)
+{
+ struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = 1000000;
+ config.number_tbl8s = 256;
+ config.flags = 0;
+ uint64_t begin, total_time, lpm_used_entries = 0;
+ unsigned i, j;
+ uint32_t next_hop_add = 0xAA, next_hop_return = 0;
+ int status = 0;
+ uint64_t cache_line_counter = 0;
+ int64_t count = 0;
+
+ rte_srand(rte_rdtsc());
+
+ printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+
+ print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Measue add. */
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ if (rte_lpm_add(lpm, large_route_table[i].ip,
+ large_route_table[i].depth, next_hop_add) == 0)
+ status++;
+ }
+ /* End Timer. */
+ total_time = rte_rdtsc() - begin;
+
+ printf("Unique added entries = %d\n", status);
+ /* Obtain add statistics. */
+ for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
+ if (lpm->tbl24[i].valid)
+ lpm_used_entries++;
+
+ if (i % 32 == 0) {
+ if ((uint64_t)count < lpm_used_entries) {
+ cache_line_counter++;
+ count = lpm_used_entries;
+ }
+ }
+ }
+
+ printf("Used table 24 entries = %u (%g%%)\n",
+ (unsigned) lpm_used_entries,
+ (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
+ printf("64 byte Cache entries used = %u (%u bytes)\n",
+ (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
+
+ printf("Average LPM Add: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ /* Measure single Lookup */
+ total_time = 0;
+ count = 0;
+
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+
+ for (j = 0; j < BATCH_SIZE; j++) {
+ if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ }
+ printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure bulk Lookup */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint32_t next_hops[BULK_SIZE];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
+ unsigned k;
+ rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
+ for (k = 0; k < BULK_SIZE; k++)
+ if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Measure LookupX4 */
+ total_time = 0;
+ count = 0;
+ for (i = 0; i < ITERATIONS; i++) {
+ static uint32_t ip_batch[BATCH_SIZE];
+ uint32_t next_hops[4];
+
+ /* Create array of random IP addresses */
+ for (j = 0; j < BATCH_SIZE; j++)
+ ip_batch[j] = rte_rand();
+
+ /* Lookup per batch */
+ begin = rte_rdtsc();
+ for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
+ unsigned k;
+ xmm_t ipx4;
+
+ ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
+ ipx4 = *(xmm_t *)(ip_batch + j);
+ rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
+ for (k = 0; k < RTE_DIM(next_hops); k++)
+ if (unlikely(next_hops[k] == UINT32_MAX))
+ count++;
+ }
+
+ total_time += rte_rdtsc() - begin;
+ }
+ printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
+ (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
+ (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
+
+ /* Delete */
+ status = 0;
+ begin = rte_rdtsc();
+
+ for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+ /* rte_lpm_delete(lpm, ip, depth) */
+ status += rte_lpm_delete(lpm, large_route_table[i].ip,
+ large_route_table[i].depth);
+ }
+
+ total_time += rte_rdtsc() - begin;
+
+ printf("Average LPM Delete: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
+
+ rte_lpm_delete_all(lpm);
+ rte_lpm_free(lpm);
+
+ return 0;
+}
+
+static struct test_command lpm_perf_cmd = {
+ .command = "lpm_perf_autotest",
+ .callback = test_lpm_perf,
+};
+REGISTER_TEST_COMMAND(lpm_perf_cmd);