X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_lpm.c;h=8b4ded98220a1d7e5a3f97fb5f650f2010afe1ed;hb=e45ae7065e92ade35d6a3883a986a4210c78cc24;hp=3a9400feab8c6a58d51df01714962dd7d3dcecdb;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c index 3a9400feab..8b4ded9822 100644 --- a/app/test/test_lpm.c +++ b/app/test/test_lpm.c @@ -1,36 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 */ #include @@ -38,7 +36,6 @@ #include #include #include -#include #include #include @@ -48,16 +45,11 @@ #include #include -#ifdef RTE_LIBRTE_LPM +#include "test.h" #include "rte_lpm.h" #include "test_lpm_routes.h" -#include "test.h" - -#define ITERATIONS (1 << 20) -#define BATCH_SIZE (1 << 13) - #define TEST_LPM_ASSERT(cond) do { \ if (!(cond)) { \ printf("Error at line %d: \n", __LINE__); \ @@ -65,8 +57,6 @@ } \ } while(0) - - typedef int32_t (* rte_lpm_test)(void); static int32_t test0(void); @@ -87,7 +77,7 @@ static int32_t test14(void); static int32_t test15(void); static int32_t test16(void); static int32_t test17(void); -static int32_t test18(void); +static int32_t perf_test(void); rte_lpm_test tests[] = { /* Test Cases */ @@ -109,7 +99,7 @@ rte_lpm_test tests[] = { test15, test16, test17, - test18 + perf_test, }; #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0])) @@ -118,8 +108,6 @@ rte_lpm_test tests[] = { #define PASS 0 /* - * TEST 0 - * * Check that rte_lpm_create fails gracefully for incorrect user input * arguments */ @@ -129,31 +117,22 @@ test0(void) struct rte_lpm *lpm = NULL; /* rte_lpm_create: lpm name == NULL */ - lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm == NULL); /* rte_lpm_create: max_rules = 0 */ /* Note: __func__ inserts the function name, in this case "test0". */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, RTE_LPM_HEAP); - TEST_LPM_ASSERT(lpm == NULL); - - /* rte_lpm_create: mem_location is not RTE_LPM_HEAP or not MEMZONE */ - /* Note: __func__ inserts the function name, in this case "test0". */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 2); - TEST_LPM_ASSERT(lpm == NULL); - - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, -1); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0); TEST_LPM_ASSERT(lpm == NULL); /* socket_id < -1 is invalid */ - lpm = rte_lpm_create(__func__, -2, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0); TEST_LPM_ASSERT(lpm == NULL); return PASS; } -/* TEST 1 - * +/* * Create lpm table then delete lpm table 100 times * Use a slightly different rules size each time * */ @@ -165,8 +144,7 @@ test1(void) /* rte_lpm_free: Free NULL */ for (i = 0; i < 100; i++) { - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, - RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0); TEST_LPM_ASSERT(lpm != NULL); rte_lpm_free(lpm); @@ -176,8 +154,7 @@ test1(void) return PASS; } -/* TEST 2 - * +/* * Call rte_lpm_free for NULL pointer user input. Note: free has no return and * therefore it is impossible to check for failure but this test is added to * increase function coverage metrics and to validate that freeing null does @@ -188,7 +165,7 @@ test2(void) { struct rte_lpm *lpm = NULL; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); rte_lpm_free(lpm); @@ -196,8 +173,7 @@ test2(void) return PASS; } -/* TEST 3 - * +/* * Check that rte_lpm_add fails gracefully for incorrect user input arguments */ int32_t @@ -213,7 +189,7 @@ test3(void) TEST_LPM_ASSERT(status < 0); /*Create vaild lpm to use in rest of test. */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* rte_lpm_add: depth < 1 */ @@ -229,8 +205,7 @@ test3(void) return PASS; } -/* TEST 4 - * +/* * Check that rte_lpm_delete fails gracefully for incorrect user input * arguments */ @@ -247,7 +222,7 @@ test4(void) TEST_LPM_ASSERT(status < 0); /*Create vaild lpm to use in rest of test. */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* rte_lpm_delete: depth < 1 */ @@ -263,8 +238,7 @@ test4(void) return PASS; } -/* TEST 5 - * +/* * Check that rte_lpm_lookup fails gracefully for incorrect user input * arguments */ @@ -282,7 +256,7 @@ test5(void) TEST_LPM_ASSERT(status < 0); /*Create vaild lpm to use in rest of test. */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* rte_lpm_lookup: depth < 1 */ @@ -296,8 +270,7 @@ test5(void) -/* TEST 6 - * +/* * Call add, lookup and delete for a single rule with depth <= 24 */ int32_t @@ -308,7 +281,7 @@ test6(void) uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); status = rte_lpm_add(lpm, ip, depth, next_hop_add); @@ -328,20 +301,21 @@ test6(void) return PASS; } -/* TEST 7 - * +/* * Call add, lookup and delete for a single rule with depth > 24 */ int32_t test7(void) { + __m128i ipx4; + uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip = IPv4(0, 0, 0, 0); uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); status = rte_lpm_add(lpm, ip, depth, next_hop_add); @@ -350,6 +324,13 @@ test7(void) status = rte_lpm_lookup(lpm, ip, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + TEST_LPM_ASSERT(hop[0] == next_hop_add); + TEST_LPM_ASSERT(hop[1] == UINT16_MAX); + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == next_hop_add); + status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); @@ -361,8 +342,7 @@ test7(void) return PASS; } -/* TEST 8 - * +/* * Use rte_lpm_add to add rules which effect only the second half of the lpm * table. Use all possible depths ranging from 1..32. Set the next hop = to the * depth. Check lookup hit for on every add and check for lookup miss on the @@ -374,12 +354,14 @@ test7(void) int32_t test8(void) { + __m128i ipx4; + uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0); uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); /* Loop with rte_lpm_add. */ @@ -397,6 +379,13 @@ test8(void) status = rte_lpm_lookup(lpm, ip2, &next_hop_return); TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + TEST_LPM_ASSERT(hop[0] == UINT16_MAX); + TEST_LPM_ASSERT(hop[1] == next_hop_add); + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == next_hop_add); } /* Loop with rte_lpm_delete. */ @@ -418,6 +407,18 @@ test8(void) status = rte_lpm_lookup(lpm, ip1, &next_hop_return); TEST_LPM_ASSERT(status == -ENOENT); + + ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + if (depth != 1) { + TEST_LPM_ASSERT(hop[0] == next_hop_add); + TEST_LPM_ASSERT(hop[1] == next_hop_add); + } else { + TEST_LPM_ASSERT(hop[0] == UINT16_MAX); + TEST_LPM_ASSERT(hop[1] == UINT16_MAX); + } + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == UINT16_MAX); } rte_lpm_free(lpm); @@ -425,8 +426,7 @@ test8(void) return PASS; } -/* TEST 9 - * +/* * - Add & lookup to hit invalid TBL24 entry * - Add & lookup to hit valid TBL24 entry not extended * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry @@ -447,7 +447,7 @@ test9(void) depth = 24; next_hop_add = 100; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); status = rte_lpm_add(lpm, ip, depth, next_hop_add); @@ -584,8 +584,7 @@ test9(void) } -/* TEST 10 - * +/* * - Add rule that covers a TBL24 range previously invalid & lookup (& delete & * lookup) * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup) @@ -608,7 +607,7 @@ test10(void) /* Add rule that covers a TBL24 range previously invalid & lookup * (& delete & lookup) */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); @@ -752,7 +751,6 @@ test10(void) ip = IPv4(128, 0, 0, 0); depth = 24; - next_hop_add = 100; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status < 0); @@ -766,7 +764,6 @@ test10(void) ip = IPv4(128, 0, 0, 0); depth = 32; - next_hop_add = 100; status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status < 0); @@ -779,8 +776,7 @@ test10(void) return PASS; } -/* TEST 11 - * +/* * Add two rules, lookup to hit the more specific one, lookup to hit the less * specific one delete the less specific rule and lookup previous values again; * add a more specific rule than the existing rule, lookup again @@ -795,7 +791,7 @@ test11(void) uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); @@ -844,8 +840,7 @@ test11(void) return PASS; } -/* TEST 12 - * +/* * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete, * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension * and contraction. @@ -855,12 +850,14 @@ test11(void) int32_t test12(void) { + __m128i ipx4; + uint16_t hop[4]; struct rte_lpm *lpm = NULL; uint32_t ip, i; uint8_t depth, next_hop_add, next_hop_return; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); @@ -875,6 +872,13 @@ test12(void) TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1); + rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX); + TEST_LPM_ASSERT(hop[0] == UINT16_MAX); + TEST_LPM_ASSERT(hop[1] == next_hop_add); + TEST_LPM_ASSERT(hop[2] == UINT16_MAX); + TEST_LPM_ASSERT(hop[3] == next_hop_add); + status = rte_lpm_delete(lpm, ip, depth); TEST_LPM_ASSERT(status == 0); @@ -887,8 +891,7 @@ test12(void) return PASS; } -/* TEST 13 - * +/* * Add a rule to tbl24, lookup (hit), then add a rule that will extend this * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension, * lookup (miss) and repeat for loop of 1000 times. This will check tbl8 @@ -904,7 +907,7 @@ test13(void) uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); ip = IPv4(128, 0, 0, 0); @@ -949,8 +952,7 @@ test13(void) return PASS; } -/* TEST 14 - * +/* * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension. * No more tbl8 extensions will be allowed. Now add one more rule that required * a tbl8 extension and get fail. @@ -968,15 +970,15 @@ test14(void) int32_t status = 0; /* Add enough space for 256 rules for every depth */ - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0); TEST_LPM_ASSERT(lpm != NULL); - ip = IPv4(0, 0, 0, 0); depth = 32; next_hop_add = 100; + ip = IPv4(0, 0, 0, 0); /* Add 256 rules that require a tbl8 extension */ - for (ip = 0; ip <= IPv4(0, 0, 255, 0); ip += 256) { + for (; ip <= IPv4(0, 0, 255, 0); ip += 256) { status = rte_lpm_add(lpm, ip, depth, next_hop_add); TEST_LPM_ASSERT(status == 0); @@ -998,220 +1000,21 @@ test14(void) return PASS; } -/* TEST test15 - * - * Lookup performance test using Mae West Routing Table - */ -static inline uint32_t -depth_to_mask(uint8_t depth) { - return (int)0x80000000 >> (depth - 1); -} - -static uint32_t -rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){ - unsigned i, j, count; - - count = 0; - for (i = 0; i < (n - 1); i++) { - uint8_t depth1 = table[i].depth; - uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1); - - for (j = (i + 1); j tbl24[i].valid) - lpm_used_entries++; - - if (i % 32 == 0){ - if (count < lpm_used_entries) { - cache_line_counter++; - count = lpm_used_entries; - } - } - } - - printf("Number of table 24 entries = %u\n", - (unsigned) RTE_LPM_TBL24_NUM_ENTRIES); - printf("Used table 24 entries = %u\n", - (unsigned) lpm_used_entries); - printf("Percentage of table 24 entries used = %u\n", - (unsigned) div64((lpm_used_entries * 100) , - RTE_LPM_TBL24_NUM_ENTRIES)); - printf("64 byte Cache entries used = %u \n", - (unsigned) cache_line_counter); - printf("Cache Required = %u bytes\n\n", - (unsigned) cache_line_counter * 64); - - printf("Average LPM Add: %u cycles\n", avg_ticks); - - /* Lookup */ - - /* Choose random seed. */ - rte_srand(0); - total_time = 0; - status = 0; - for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) { - static uint32_t ip_batch[BATCH_SIZE]; - uint64_t begin_batch, end_batch; - - /* Generate a batch of random numbers */ - for (j = 0; j < BATCH_SIZE; j ++) { - ip_batch[j] = rte_rand(); - } - - /* Lookup per batch */ - begin_batch = rte_rdtsc(); - - for (j = 0; j < BATCH_SIZE; j ++) { - status += rte_lpm_lookup(lpm, ip_batch[j], - &next_hop_return); - } - - end_batch = rte_rdtsc(); - printf("status = %d\r", next_hop_return); - TEST_LPM_ASSERT(status < 1); - - /* Accumulate batch time */ - total_time += (end_batch - begin_batch); - - TEST_LPM_ASSERT((status < -ENOENT) || - (next_hop_return == next_hop_add)); - } - - avg_ticks = (uint32_t) div64(total_time, ITERATIONS); - printf("Average LPM Lookup: %u cycles\n", avg_ticks); - - /* Delete */ - status = 0; - begin = rte_rdtsc(); - - for (i = 0; i < NUM_ROUTE_ENTRIES; i++) { - /* rte_lpm_delete(lpm, ip, depth) */ - status += rte_lpm_delete(lpm, mae_west_tbl[i].ip, - mae_west_tbl[i].depth); - } - - end = rte_rdtsc(); - - TEST_LPM_ASSERT(status == 0); - - avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES); - - printf("Average LPM Delete: %u cycles\n", avg_ticks); - - rte_lpm_delete_all(lpm); - rte_lpm_free(lpm); - - return PASS; -} - - - /* - * Sequence of operations for find existing fbk hash table + * Sequence of operations for find existing lpm table * * - create table * - find existing table: hit * - find non-existing table: miss * */ -int32_t test16(void) +int32_t +test15(void) { struct rte_lpm *lpm = NULL, *result = NULL; /* Create lpm */ - lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP); + lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0); TEST_LPM_ASSERT(lpm != NULL); /* Try to find existing lpm */ @@ -1233,18 +1036,16 @@ int32_t test16(void) * test failure condition of overloading the tbl8 so no more will fit * Check we get an error return value in that case */ -static int32_t -test17(void) +int32_t +test16(void) { uint32_t ip; struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, - 256 * 32, RTE_LPM_HEAP); - - printf("Testing filling tbl8's\n"); + 256 * 32, 0); - /* ip loops through all positibilities for top 24 bits of address */ + /* ip loops through all possibilities for top 24 bits of address */ for (ip = 0; ip < 0xFFFFFF; ip++){ - /* add an entrey within a different tbl8 each time, since + /* add an entry within a different tbl8 each time, since * depth >24 and the top 24 bits are different */ if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0) break; @@ -1261,7 +1062,6 @@ test17(void) } /* - * Test 18 * Test for overwriting of tbl8: * - add rule /32 and lookup * - add new rule /24 and lookup @@ -1269,7 +1069,7 @@ test17(void) * - lookup /32 and /24 rule to ensure the table has not been overwritten. */ int32_t -test18(void) +test17(void) { struct rte_lpm *lpm = NULL; const uint32_t ip_10_32 = IPv4(10, 10, 10, 2); @@ -1284,30 +1084,46 @@ test18(void) uint8_t next_hop_return = 0; int32_t status = 0; - lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0); TEST_LPM_ASSERT(lpm != NULL); - status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32); - TEST_LPM_ASSERT(status == 0); + if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, + next_hop_ip_10_32)) < 0) + return -1; status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); + uint8_t test_hop_10_32 = next_hop_return; TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); - status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24); - TEST_LPM_ASSERT(status == 0); + if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, + next_hop_ip_10_24)) < 0) + return -1; status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); + uint8_t test_hop_10_24 = next_hop_return; TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); - status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25); - TEST_LPM_ASSERT(status == 0); + if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, + next_hop_ip_20_25)) < 0) + return -1; status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return); + uint8_t test_hop_20_25 = next_hop_return; TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25); + if (test_hop_10_32 == test_hop_10_24) { + printf("Next hop return equal\n"); + return -1; + } + + if (test_hop_10_24 == test_hop_20_25){ + printf("Next hop return equal\n"); + return -1; + } + status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); TEST_LPM_ASSERT(status == 0); TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); @@ -1318,48 +1134,221 @@ test18(void) rte_lpm_free(lpm); - printf("%s PASSED\n", __func__); return PASS; } - /* - * Do all unit and performance tests. + * Lookup performance test */ -int -test_lpm(void) +#define ITERATIONS (1 << 10) +#define BATCH_SIZE (1 << 12) +#define BULK_SIZE 32 + +static void +print_route_distribution(const struct route_rule *table, uint32_t n) +{ + unsigned i, j; + + printf("Route distribution per prefix width: \n"); + printf("DEPTH QUANTITY (PERCENT)\n"); + printf("--------------------------- \n"); + + /* Count depths. */ + for(i = 1; i <= 32; i++) { + unsigned depth_counter = 0; + double percent_hits; + + for (j = 0; j < n; j++) + if (table[j].depth == (uint8_t) i) + depth_counter++; + + percent_hits = ((double)depth_counter)/((double)n) * 100; + printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits); + } + printf("\n"); +} + +int32_t +perf_test(void) { - unsigned test_num; - int status, global_status; + struct rte_lpm *lpm = NULL; + uint64_t begin, total_time, lpm_used_entries = 0; + unsigned i, j; + uint8_t next_hop_add = 0xAA, next_hop_return = 0; + int status = 0; + uint64_t cache_line_counter = 0; + int64_t count = 0; - printf("Running LPM tests...\n" - "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS); + rte_srand(rte_rdtsc()); - global_status = 0; + printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES); - for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) { + print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES); - status = tests[test_num](); + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0); + TEST_LPM_ASSERT(lpm != NULL); - printf("LPM Test %u: %s\n", test_num, - (status < 0) ? "FAIL" : "PASS"); + /* Measue add. */ + begin = rte_rdtsc(); - if (status < 0) { - global_status = status; + for (i = 0; i < NUM_ROUTE_ENTRIES; i++) { + if (rte_lpm_add(lpm, large_route_table[i].ip, + large_route_table[i].depth, next_hop_add) == 0) + status++; + } + /* End Timer. */ + total_time = rte_rdtsc() - begin; + + printf("Unique added entries = %d\n", status); + /* Obtain add statistics. */ + for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) { + if (lpm->tbl24[i].valid) + lpm_used_entries++; + + if (i % 32 == 0){ + if ((uint64_t)count < lpm_used_entries) { + cache_line_counter++; + count = lpm_used_entries; + } } } - return global_status; + printf("Used table 24 entries = %u (%g%%)\n", + (unsigned) lpm_used_entries, + (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES); + printf("64 byte Cache entries used = %u (%u bytes)\n", + (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64); + + printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES); + + /* Measure single Lookup */ + total_time = 0; + count = 0; + + for (i = 0; i < ITERATIONS; i ++) { + static uint32_t ip_batch[BATCH_SIZE]; + + for (j = 0; j < BATCH_SIZE; j ++) + ip_batch[j] = rte_rand(); + + /* Lookup per batch */ + begin = rte_rdtsc(); + + for (j = 0; j < BATCH_SIZE; j ++) { + if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0) + count++; + } + + total_time += rte_rdtsc() - begin; + + } + printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n", + (double)total_time / ((double)ITERATIONS * BATCH_SIZE), + (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); + + /* Measure bulk Lookup */ + total_time = 0; + count = 0; + for (i = 0; i < ITERATIONS; i ++) { + static uint32_t ip_batch[BATCH_SIZE]; + uint16_t next_hops[BULK_SIZE]; + + /* Create array of random IP addresses */ + for (j = 0; j < BATCH_SIZE; j ++) + ip_batch[j] = rte_rand(); + + /* Lookup per batch */ + begin = rte_rdtsc(); + for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) { + unsigned k; + rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE); + for (k = 0; k < BULK_SIZE; k++) + if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS))) + count++; + } + + total_time += rte_rdtsc() - begin; + } + printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n", + (double)total_time / ((double)ITERATIONS * BATCH_SIZE), + (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); + + /* Measure LookupX4 */ + total_time = 0; + count = 0; + for (i = 0; i < ITERATIONS; i++) { + static uint32_t ip_batch[BATCH_SIZE]; + uint16_t next_hops[4]; + + /* Create array of random IP addresses */ + for (j = 0; j < BATCH_SIZE; j++) + ip_batch[j] = rte_rand(); + + /* Lookup per batch */ + begin = rte_rdtsc(); + for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) { + unsigned k; + __m128i ipx4; + + ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j)); + ipx4 = *(__m128i *)(ip_batch + j); + rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX); + for (k = 0; k < RTE_DIM(next_hops); k++) + if (unlikely(next_hops[k] == UINT16_MAX)) + count++; + } + + total_time += rte_rdtsc() - begin; + } + printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n", + (double)total_time / ((double)ITERATIONS * BATCH_SIZE), + (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE)); + + /* Delete */ + status = 0; + begin = rte_rdtsc(); + + for (i = 0; i < NUM_ROUTE_ENTRIES; i++) { + /* rte_lpm_delete(lpm, ip, depth) */ + status += rte_lpm_delete(lpm, large_route_table[i].ip, + large_route_table[i].depth); + } + + total_time += rte_rdtsc() - begin; + + printf("Average LPM Delete: %g cycles\n", + (double)total_time / NUM_ROUTE_ENTRIES); + + rte_lpm_delete_all(lpm); + rte_lpm_free(lpm); + + return PASS; } -#else +/* + * Do all unit and performance tests. + */ -int +static int test_lpm(void) { - printf("The LPM library is not included in this build\n"); - return 0; + unsigned i; + int status, global_status = 0; + + for (i = 0; i < NUM_LPM_TESTS; i++) { + status = tests[i](); + if (status < 0) { + printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i])); + global_status = status; + } + } + + return global_status; } -#endif +static struct test_command lpm_cmd = { + .command = "lpm_autotest", + .callback = test_lpm, +}; +REGISTER_TEST_COMMAND(lpm_cmd);