4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_cycles.h>
42 #include <rte_memory.h>
43 #include <rte_random.h>
44 #include <rte_branch_prediction.h>
53 #include "test_lpm_routes.h"
55 #define TEST_LPM_ASSERT(cond) do { \
57 printf("Error at line %d: \n", __LINE__); \
62 typedef int32_t (* rte_lpm_test)(void);
64 static int32_t test0(void);
65 static int32_t test1(void);
66 static int32_t test2(void);
67 static int32_t test3(void);
68 static int32_t test4(void);
69 static int32_t test5(void);
70 static int32_t test6(void);
71 static int32_t test7(void);
72 static int32_t test8(void);
73 static int32_t test9(void);
74 static int32_t test10(void);
75 static int32_t test11(void);
76 static int32_t test12(void);
77 static int32_t test13(void);
78 static int32_t test14(void);
79 static int32_t test15(void);
80 static int32_t test16(void);
81 static int32_t test17(void);
82 static int32_t perf_test(void);
84 rte_lpm_test tests[] = {
107 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
109 #define MAX_RULES 256
113 * Check that rte_lpm_create fails gracefully for incorrect user input
119 struct rte_lpm *lpm = NULL;
121 /* rte_lpm_create: lpm name == NULL */
122 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
123 TEST_LPM_ASSERT(lpm == NULL);
125 /* rte_lpm_create: max_rules = 0 */
126 /* Note: __func__ inserts the function name, in this case "test0". */
127 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
128 TEST_LPM_ASSERT(lpm == NULL);
130 /* socket_id < -1 is invalid */
131 lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
132 TEST_LPM_ASSERT(lpm == NULL);
138 * Create lpm table then delete lpm table 100 times
139 * Use a slightly different rules size each time
144 struct rte_lpm *lpm = NULL;
147 /* rte_lpm_free: Free NULL */
148 for (i = 0; i < 100; i++) {
149 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
150 TEST_LPM_ASSERT(lpm != NULL);
155 /* Can not test free so return success */
160 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
161 * therefore it is impossible to check for failure but this test is added to
162 * increase function coverage metrics and to validate that freeing null does
168 struct rte_lpm *lpm = NULL;
170 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
171 TEST_LPM_ASSERT(lpm != NULL);
179 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
184 struct rte_lpm *lpm = NULL;
185 uint32_t ip = IPv4(0, 0, 0, 0);
186 uint8_t depth = 24, next_hop = 100;
189 /* rte_lpm_add: lpm == NULL */
190 status = rte_lpm_add(NULL, ip, depth, next_hop);
191 TEST_LPM_ASSERT(status < 0);
193 /*Create vaild lpm to use in rest of test. */
194 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
195 TEST_LPM_ASSERT(lpm != NULL);
197 /* rte_lpm_add: depth < 1 */
198 status = rte_lpm_add(lpm, ip, 0, next_hop);
199 TEST_LPM_ASSERT(status < 0);
201 /* rte_lpm_add: depth > MAX_DEPTH */
202 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
203 TEST_LPM_ASSERT(status < 0);
211 * Check that rte_lpm_delete fails gracefully for incorrect user input
217 struct rte_lpm *lpm = NULL;
218 uint32_t ip = IPv4(0, 0, 0, 0);
222 /* rte_lpm_delete: lpm == NULL */
223 status = rte_lpm_delete(NULL, ip, depth);
224 TEST_LPM_ASSERT(status < 0);
226 /*Create vaild lpm to use in rest of test. */
227 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
228 TEST_LPM_ASSERT(lpm != NULL);
230 /* rte_lpm_delete: depth < 1 */
231 status = rte_lpm_delete(lpm, ip, 0);
232 TEST_LPM_ASSERT(status < 0);
234 /* rte_lpm_delete: depth > MAX_DEPTH */
235 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
236 TEST_LPM_ASSERT(status < 0);
244 * Check that rte_lpm_lookup fails gracefully for incorrect user input
250 #if defined(RTE_LIBRTE_LPM_DEBUG)
251 struct rte_lpm *lpm = NULL;
252 uint32_t ip = IPv4(0, 0, 0, 0);
253 uint8_t next_hop_return = 0;
256 /* rte_lpm_lookup: lpm == NULL */
257 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
258 TEST_LPM_ASSERT(status < 0);
260 /*Create vaild lpm to use in rest of test. */
261 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
262 TEST_LPM_ASSERT(lpm != NULL);
264 /* rte_lpm_lookup: depth < 1 */
265 status = rte_lpm_lookup(lpm, ip, NULL);
266 TEST_LPM_ASSERT(status < 0);
276 * Call add, lookup and delete for a single rule with depth <= 24
281 struct rte_lpm *lpm = NULL;
282 uint32_t ip = IPv4(0, 0, 0, 0);
283 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
286 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
287 TEST_LPM_ASSERT(lpm != NULL);
289 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
290 TEST_LPM_ASSERT(status == 0);
292 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
293 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
295 status = rte_lpm_delete(lpm, ip, depth);
296 TEST_LPM_ASSERT(status == 0);
298 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
299 TEST_LPM_ASSERT(status == -ENOENT);
307 * Call add, lookup and delete for a single rule with depth > 24
313 struct rte_lpm *lpm = NULL;
314 uint32_t ip = IPv4(0, 0, 0, 0);
315 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
318 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
319 TEST_LPM_ASSERT(lpm != NULL);
321 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
322 TEST_LPM_ASSERT(status == 0);
324 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
325 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
327 status = rte_lpm_delete(lpm, ip, depth);
328 TEST_LPM_ASSERT(status == 0);
330 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
331 TEST_LPM_ASSERT(status == -ENOENT);
339 * Use rte_lpm_add to add rules which effect only the second half of the lpm
340 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
341 * depth. Check lookup hit for on every add and check for lookup miss on the
342 * first half of the lpm table after each add. Finally delete all rules going
343 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
344 * delete. The lookup should return the next_hop_add value related to the
345 * previous depth value (i.e. depth -1).
350 struct rte_lpm *lpm = NULL;
351 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
352 uint8_t depth, next_hop_add, next_hop_return;
355 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
356 TEST_LPM_ASSERT(lpm != NULL);
358 /* Loop with rte_lpm_add. */
359 for (depth = 1; depth <= 32; depth++) {
360 /* Let the next_hop_add value = depth. Just for change. */
361 next_hop_add = depth;
363 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
364 TEST_LPM_ASSERT(status == 0);
366 /* Check IP in first half of tbl24 which should be empty. */
367 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
368 TEST_LPM_ASSERT(status == -ENOENT);
370 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
371 TEST_LPM_ASSERT((status == 0) &&
372 (next_hop_return == next_hop_add));
375 /* Loop with rte_lpm_delete. */
376 for (depth = 32; depth >= 1; depth--) {
377 next_hop_add = (uint8_t) (depth - 1);
379 status = rte_lpm_delete(lpm, ip2, depth);
380 TEST_LPM_ASSERT(status == 0);
382 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
385 TEST_LPM_ASSERT((status == 0) &&
386 (next_hop_return == next_hop_add));
389 TEST_LPM_ASSERT(status == -ENOENT);
392 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
393 TEST_LPM_ASSERT(status == -ENOENT);
402 * - Add & lookup to hit invalid TBL24 entry
403 * - Add & lookup to hit valid TBL24 entry not extended
404 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
405 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
411 struct rte_lpm *lpm = NULL;
412 uint32_t ip, ip_1, ip_2;
413 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
414 next_hop_add_2, next_hop_return;
417 /* Add & lookup to hit invalid TBL24 entry */
418 ip = IPv4(128, 0, 0, 0);
422 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
423 TEST_LPM_ASSERT(lpm != NULL);
425 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
426 TEST_LPM_ASSERT(status == 0);
428 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
429 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
431 status = rte_lpm_delete(lpm, ip, depth);
432 TEST_LPM_ASSERT(status == 0);
434 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
435 TEST_LPM_ASSERT(status == -ENOENT);
437 rte_lpm_delete_all(lpm);
439 /* Add & lookup to hit valid TBL24 entry not extended */
440 ip = IPv4(128, 0, 0, 0);
444 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
445 TEST_LPM_ASSERT(status == 0);
447 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
448 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
453 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
454 TEST_LPM_ASSERT(status == 0);
456 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
457 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
461 status = rte_lpm_delete(lpm, ip, depth);
462 TEST_LPM_ASSERT(status == 0);
466 status = rte_lpm_delete(lpm, ip, depth);
467 TEST_LPM_ASSERT(status == 0);
469 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
470 TEST_LPM_ASSERT(status == -ENOENT);
472 rte_lpm_delete_all(lpm);
474 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
476 ip = IPv4(128, 0, 0, 0);
480 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
481 TEST_LPM_ASSERT(status == 0);
483 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
484 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
486 ip = IPv4(128, 0, 0, 5);
490 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
491 TEST_LPM_ASSERT(status == 0);
493 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
494 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
496 status = rte_lpm_delete(lpm, ip, depth);
497 TEST_LPM_ASSERT(status == 0);
499 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
500 TEST_LPM_ASSERT(status == -ENOENT);
502 ip = IPv4(128, 0, 0, 0);
506 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
507 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
509 status = rte_lpm_delete(lpm, ip, depth);
510 TEST_LPM_ASSERT(status == 0);
512 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
513 TEST_LPM_ASSERT(status == -ENOENT);
515 rte_lpm_delete_all(lpm);
517 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
519 ip_1 = IPv4(128, 0, 0, 0);
521 next_hop_add_1 = 101;
523 ip_2 = IPv4(128, 0, 0, 5);
525 next_hop_add_2 = 102;
529 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
530 TEST_LPM_ASSERT(status == 0);
532 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
533 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
535 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
536 TEST_LPM_ASSERT(status == 0);
538 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
539 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
541 status = rte_lpm_delete(lpm, ip_2, depth_2);
542 TEST_LPM_ASSERT(status == 0);
544 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
545 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
547 status = rte_lpm_delete(lpm, ip_1, depth_1);
548 TEST_LPM_ASSERT(status == 0);
550 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
551 TEST_LPM_ASSERT(status == -ENOENT);
560 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
562 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
563 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
565 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
566 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
567 * - Delete a rule that is not present in the TBL24 & lookup
568 * - Delete a rule that is not present in the TBL8 & lookup
575 struct rte_lpm *lpm = NULL;
577 uint8_t depth, next_hop_add, next_hop_return;
580 /* Add rule that covers a TBL24 range previously invalid & lookup
581 * (& delete & lookup) */
582 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
583 TEST_LPM_ASSERT(lpm != NULL);
585 ip = IPv4(128, 0, 0, 0);
589 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
590 TEST_LPM_ASSERT(status == 0);
592 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
593 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
595 status = rte_lpm_delete(lpm, ip, depth);
596 TEST_LPM_ASSERT(status == 0);
598 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
599 TEST_LPM_ASSERT(status == -ENOENT);
601 rte_lpm_delete_all(lpm);
603 ip = IPv4(128, 0, 0, 0);
607 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
608 TEST_LPM_ASSERT(status == 0);
610 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
611 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
613 status = rte_lpm_delete(lpm, ip, depth);
614 TEST_LPM_ASSERT(status == 0);
616 rte_lpm_delete_all(lpm);
618 /* Add rule that extends a TBL24 valid entry & lookup for both rules
619 * (& delete & lookup) */
621 ip = IPv4(128, 0, 0, 0);
625 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
626 TEST_LPM_ASSERT(status == 0);
628 ip = IPv4(128, 0, 0, 10);
632 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
633 TEST_LPM_ASSERT(status == 0);
635 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
636 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
638 ip = IPv4(128, 0, 0, 0);
641 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
642 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
644 ip = IPv4(128, 0, 0, 0);
647 status = rte_lpm_delete(lpm, ip, depth);
648 TEST_LPM_ASSERT(status == 0);
650 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
651 TEST_LPM_ASSERT(status == -ENOENT);
653 ip = IPv4(128, 0, 0, 10);
656 status = rte_lpm_delete(lpm, ip, depth);
657 TEST_LPM_ASSERT(status == 0);
659 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
660 TEST_LPM_ASSERT(status == -ENOENT);
662 rte_lpm_delete_all(lpm);
664 /* Add rule that updates the next hop in TBL24 & lookup
665 * (& delete & lookup) */
667 ip = IPv4(128, 0, 0, 0);
671 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
672 TEST_LPM_ASSERT(status == 0);
674 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
675 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
679 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
680 TEST_LPM_ASSERT(status == 0);
682 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
683 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
685 status = rte_lpm_delete(lpm, ip, depth);
686 TEST_LPM_ASSERT(status == 0);
688 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
689 TEST_LPM_ASSERT(status == -ENOENT);
691 rte_lpm_delete_all(lpm);
693 /* Add rule that updates the next hop in TBL8 & lookup
694 * (& delete & lookup) */
696 ip = IPv4(128, 0, 0, 0);
700 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
701 TEST_LPM_ASSERT(status == 0);
703 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
704 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
708 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
709 TEST_LPM_ASSERT(status == 0);
711 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
712 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
714 status = rte_lpm_delete(lpm, ip, depth);
715 TEST_LPM_ASSERT(status == 0);
717 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
718 TEST_LPM_ASSERT(status == -ENOENT);
720 rte_lpm_delete_all(lpm);
722 /* Delete a rule that is not present in the TBL24 & lookup */
724 ip = IPv4(128, 0, 0, 0);
727 status = rte_lpm_delete(lpm, ip, depth);
728 TEST_LPM_ASSERT(status < 0);
730 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
731 TEST_LPM_ASSERT(status == -ENOENT);
733 rte_lpm_delete_all(lpm);
735 /* Delete a rule that is not present in the TBL8 & lookup */
737 ip = IPv4(128, 0, 0, 0);
740 status = rte_lpm_delete(lpm, ip, depth);
741 TEST_LPM_ASSERT(status < 0);
743 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
744 TEST_LPM_ASSERT(status == -ENOENT);
752 * Add two rules, lookup to hit the more specific one, lookup to hit the less
753 * specific one delete the less specific rule and lookup previous values again;
754 * add a more specific rule than the existing rule, lookup again
761 struct rte_lpm *lpm = NULL;
763 uint8_t depth, next_hop_add, next_hop_return;
766 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
767 TEST_LPM_ASSERT(lpm != NULL);
769 ip = IPv4(128, 0, 0, 0);
773 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
774 TEST_LPM_ASSERT(status == 0);
776 ip = IPv4(128, 0, 0, 10);
780 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
781 TEST_LPM_ASSERT(status == 0);
783 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
784 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
786 ip = IPv4(128, 0, 0, 0);
789 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
790 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
792 ip = IPv4(128, 0, 0, 0);
795 status = rte_lpm_delete(lpm, ip, depth);
796 TEST_LPM_ASSERT(status == 0);
798 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
799 TEST_LPM_ASSERT(status == -ENOENT);
801 ip = IPv4(128, 0, 0, 10);
804 status = rte_lpm_delete(lpm, ip, depth);
805 TEST_LPM_ASSERT(status == 0);
807 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
808 TEST_LPM_ASSERT(status == -ENOENT);
816 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
817 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
825 struct rte_lpm *lpm = NULL;
827 uint8_t depth, next_hop_add, next_hop_return;
830 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
831 TEST_LPM_ASSERT(lpm != NULL);
833 ip = IPv4(128, 0, 0, 0);
837 for (i = 0; i < 1000; i++) {
838 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
839 TEST_LPM_ASSERT(status == 0);
841 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
842 TEST_LPM_ASSERT((status == 0) &&
843 (next_hop_return == next_hop_add));
845 status = rte_lpm_delete(lpm, ip, depth);
846 TEST_LPM_ASSERT(status == 0);
848 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
849 TEST_LPM_ASSERT(status == -ENOENT);
858 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
859 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
860 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
861 * extension and contraction.
868 struct rte_lpm *lpm = NULL;
870 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
873 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
874 TEST_LPM_ASSERT(lpm != NULL);
876 ip = IPv4(128, 0, 0, 0);
878 next_hop_add_1 = 100;
880 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
881 TEST_LPM_ASSERT(status == 0);
883 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
884 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
887 next_hop_add_2 = 101;
889 for (i = 0; i < 1000; i++) {
890 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
891 TEST_LPM_ASSERT(status == 0);
893 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
894 TEST_LPM_ASSERT((status == 0) &&
895 (next_hop_return == next_hop_add_2));
897 status = rte_lpm_delete(lpm, ip, depth);
898 TEST_LPM_ASSERT(status == 0);
900 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
901 TEST_LPM_ASSERT((status == 0) &&
902 (next_hop_return == next_hop_add_1));
907 status = rte_lpm_delete(lpm, ip, depth);
908 TEST_LPM_ASSERT(status == 0);
910 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
911 TEST_LPM_ASSERT(status == -ENOENT);
919 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
920 * No more tbl8 extensions will be allowed. Now add one more rule that required
921 * a tbl8 extension and get fail.
927 /* We only use depth = 32 in the loop below so we must make sure
928 * that we have enough storage for all rules at that depth*/
930 struct rte_lpm *lpm = NULL;
932 uint8_t depth, next_hop_add, next_hop_return;
935 /* Add enough space for 256 rules for every depth */
936 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
937 TEST_LPM_ASSERT(lpm != NULL);
941 ip = IPv4(0, 0, 0, 0);
943 /* Add 256 rules that require a tbl8 extension */
944 for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
945 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
946 TEST_LPM_ASSERT(status == 0);
948 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
949 TEST_LPM_ASSERT((status == 0) &&
950 (next_hop_return == next_hop_add));
953 /* All tbl8 extensions have been used above. Try to add one more and
955 ip = IPv4(1, 0, 0, 0);
958 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
959 TEST_LPM_ASSERT(status < 0);
967 * Sequence of operations for find existing lpm table
970 * - find existing table: hit
971 * - find non-existing table: miss
977 struct rte_lpm *lpm = NULL, *result = NULL;
980 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
981 TEST_LPM_ASSERT(lpm != NULL);
983 /* Try to find existing lpm */
984 result = rte_lpm_find_existing("lpm_find_existing");
985 TEST_LPM_ASSERT(result == lpm);
987 /* Try to find non-existing lpm */
988 result = rte_lpm_find_existing("lpm_find_non_existing");
989 TEST_LPM_ASSERT(result == NULL);
992 rte_lpm_delete_all(lpm);
999 * test failure condition of overloading the tbl8 so no more will fit
1000 * Check we get an error return value in that case
1006 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1009 /* ip loops through all possibilities for top 24 bits of address */
1010 for (ip = 0; ip < 0xFFFFFF; ip++){
1011 /* add an entry within a different tbl8 each time, since
1012 * depth >24 and the top 24 bits are different */
1013 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1017 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1018 printf("Error, unexpected failure with filling tbl8 groups\n");
1019 printf("Failed after %u additions, expected after %u\n",
1020 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1028 * Test for overwriting of tbl8:
1029 * - add rule /32 and lookup
1030 * - add new rule /24 and lookup
1031 * - add third rule /25 and lookup
1032 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1037 struct rte_lpm *lpm = NULL;
1038 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1039 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1040 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1041 const uint8_t d_ip_10_32 = 32,
1044 const uint8_t next_hop_ip_10_32 = 100,
1045 next_hop_ip_10_24 = 105,
1046 next_hop_ip_20_25 = 111;
1047 uint8_t next_hop_return = 0;
1050 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
1051 TEST_LPM_ASSERT(lpm != NULL);
1053 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1054 next_hop_ip_10_32)) < 0)
1057 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1058 uint8_t test_hop_10_32 = next_hop_return;
1059 TEST_LPM_ASSERT(status == 0);
1060 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1062 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1063 next_hop_ip_10_24)) < 0)
1066 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1067 uint8_t test_hop_10_24 = next_hop_return;
1068 TEST_LPM_ASSERT(status == 0);
1069 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1071 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1072 next_hop_ip_20_25)) < 0)
1075 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1076 uint8_t test_hop_20_25 = next_hop_return;
1077 TEST_LPM_ASSERT(status == 0);
1078 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1080 if (test_hop_10_32 == test_hop_10_24) {
1081 printf("Next hop return equal\n");
1085 if (test_hop_10_24 == test_hop_20_25){
1086 printf("Next hop return equal\n");
1090 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1091 TEST_LPM_ASSERT(status == 0);
1092 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1094 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1095 TEST_LPM_ASSERT(status == 0);
1096 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1104 * Lookup performance test
1107 #define ITERATIONS (1 << 10)
1108 #define BATCH_SIZE (1 << 12)
1109 #define BULK_SIZE 32
1112 print_route_distribution(const struct route_rule *table, uint32_t n)
1116 printf("Route distribution per prefix width: \n");
1117 printf("DEPTH QUANTITY (PERCENT)\n");
1118 printf("--------------------------- \n");
1121 for(i = 1; i <= 32; i++) {
1122 unsigned depth_counter = 0;
1123 double percent_hits;
1125 for (j = 0; j < n; j++)
1126 if (table[j].depth == (uint8_t) i)
1129 percent_hits = ((double)depth_counter)/((double)n) * 100;
1130 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1138 struct rte_lpm *lpm = NULL;
1139 uint64_t begin, total_time, lpm_used_entries = 0;
1141 uint8_t next_hop_add = 0xAA, next_hop_return = 0;
1143 uint64_t cache_line_counter = 0;
1146 rte_srand(rte_rdtsc());
1148 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1150 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1152 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
1153 TEST_LPM_ASSERT(lpm != NULL);
1156 begin = rte_rdtsc();
1158 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1159 if (rte_lpm_add(lpm, large_route_table[i].ip,
1160 large_route_table[i].depth, next_hop_add) == 0)
1164 total_time = rte_rdtsc() - begin;
1166 printf("Unique added entries = %d\n", status);
1167 /* Obtain add statistics. */
1168 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1169 if (lpm->tbl24[i].valid)
1173 if ((uint64_t)count < lpm_used_entries) {
1174 cache_line_counter++;
1175 count = lpm_used_entries;
1180 printf("Used table 24 entries = %u (%g%%)\n",
1181 (unsigned) lpm_used_entries,
1182 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1183 printf("64 byte Cache entries used = %u (%u bytes)\n",
1184 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1186 printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
1188 /* Measure single Lookup */
1192 for (i = 0; i < ITERATIONS; i ++) {
1193 static uint32_t ip_batch[BATCH_SIZE];
1195 for (j = 0; j < BATCH_SIZE; j ++)
1196 ip_batch[j] = rte_rand();
1198 /* Lookup per batch */
1199 begin = rte_rdtsc();
1201 for (j = 0; j < BATCH_SIZE; j ++) {
1202 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1206 total_time += rte_rdtsc() - begin;
1209 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1210 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1211 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1213 /* Measure bulk Lookup */
1216 for (i = 0; i < ITERATIONS; i ++) {
1217 static uint32_t ip_batch[BATCH_SIZE];
1218 uint16_t next_hops[BULK_SIZE];
1220 /* Create array of random IP addresses */
1221 for (j = 0; j < BATCH_SIZE; j ++)
1222 ip_batch[j] = rte_rand();
1224 /* Lookup per batch */
1225 begin = rte_rdtsc();
1226 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1228 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1229 for (k = 0; k < BULK_SIZE; k++)
1230 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1234 total_time += rte_rdtsc() - begin;
1236 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1237 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1238 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1242 begin = rte_rdtsc();
1244 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1245 /* rte_lpm_delete(lpm, ip, depth) */
1246 status += rte_lpm_delete(lpm, large_route_table[i].ip,
1247 large_route_table[i].depth);
1250 total_time += rte_rdtsc() - begin;
1252 printf("Average LPM Delete: %g cycles\n",
1253 (double)total_time / NUM_ROUTE_ENTRIES);
1255 rte_lpm_delete_all(lpm);
1262 * Do all unit and performance tests.
1269 int status, global_status = 0;
1271 for (i = 0; i < NUM_LPM_TESTS; i++) {
1272 status = tests[i]();
1274 printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1275 global_status = status;
1279 return global_status;
1282 #else /* RTE_LIBRTE_LPM */
1287 printf("The LPM library is not included in this build\n");
1291 #endif /* RTE_LIBRTE_LPM */