4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
40 #include <cmdline_parse.h>
42 #include <rte_common.h>
43 #include <rte_cycles.h>
44 #include <rte_memory.h>
45 #include <rte_random.h>
46 #include <rte_branch_prediction.h>
53 #include "test_lpm_routes.h"
57 #define ITERATIONS (1 << 20)
58 #define BATCH_SIZE (1 << 13)
60 #define TEST_LPM_ASSERT(cond) do { \
62 printf("Error at line %d: \n", __LINE__); \
69 typedef int32_t (* rte_lpm_test)(void);
71 static int32_t test0(void);
72 static int32_t test1(void);
73 static int32_t test2(void);
74 static int32_t test3(void);
75 static int32_t test4(void);
76 static int32_t test5(void);
77 static int32_t test6(void);
78 static int32_t test7(void);
79 static int32_t test8(void);
80 static int32_t test9(void);
81 static int32_t test10(void);
82 static int32_t test11(void);
83 static int32_t test12(void);
84 static int32_t test13(void);
85 static int32_t test14(void);
86 static int32_t test15(void);
87 static int32_t test16(void);
88 static int32_t test17(void);
89 static int32_t test18(void);
91 rte_lpm_test tests[] = {
114 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
116 #define MAX_RULES 256
122 * Check that rte_lpm_create fails gracefully for incorrect user input
128 struct rte_lpm *lpm = NULL;
130 /* rte_lpm_create: lpm name == NULL */
131 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
132 TEST_LPM_ASSERT(lpm == NULL);
134 /* rte_lpm_create: max_rules = 0 */
135 /* Note: __func__ inserts the function name, in this case "test0". */
136 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, RTE_LPM_HEAP);
137 TEST_LPM_ASSERT(lpm == NULL);
139 /* rte_lpm_create: mem_location is not RTE_LPM_HEAP or not MEMZONE */
140 /* Note: __func__ inserts the function name, in this case "test0". */
141 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 2);
142 TEST_LPM_ASSERT(lpm == NULL);
144 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, -1);
145 TEST_LPM_ASSERT(lpm == NULL);
147 /* socket_id < -1 is invalid */
148 lpm = rte_lpm_create(__func__, -2, MAX_RULES, RTE_LPM_HEAP);
149 TEST_LPM_ASSERT(lpm == NULL);
156 * Create lpm table then delete lpm table 100 times
157 * Use a slightly different rules size each time
162 struct rte_lpm *lpm = NULL;
165 /* rte_lpm_free: Free NULL */
166 for (i = 0; i < 100; i++) {
167 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i,
169 TEST_LPM_ASSERT(lpm != NULL);
174 /* Can not test free so return success */
180 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
181 * therefore it is impossible to check for failure but this test is added to
182 * increase function coverage metrics and to validate that freeing null does
188 struct rte_lpm *lpm = NULL;
190 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
191 TEST_LPM_ASSERT(lpm != NULL);
200 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
205 struct rte_lpm *lpm = NULL;
206 uint32_t ip = IPv4(0, 0, 0, 0);
207 uint8_t depth = 24, next_hop = 100;
210 /* rte_lpm_add: lpm == NULL */
211 status = rte_lpm_add(NULL, ip, depth, next_hop);
212 TEST_LPM_ASSERT(status < 0);
214 /*Create vaild lpm to use in rest of test. */
215 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
216 TEST_LPM_ASSERT(lpm != NULL);
218 /* rte_lpm_add: depth < 1 */
219 status = rte_lpm_add(lpm, ip, 0, next_hop);
220 TEST_LPM_ASSERT(status < 0);
222 /* rte_lpm_add: depth > MAX_DEPTH */
223 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
224 TEST_LPM_ASSERT(status < 0);
233 * Check that rte_lpm_delete fails gracefully for incorrect user input
239 struct rte_lpm *lpm = NULL;
240 uint32_t ip = IPv4(0, 0, 0, 0);
244 /* rte_lpm_delete: lpm == NULL */
245 status = rte_lpm_delete(NULL, ip, depth);
246 TEST_LPM_ASSERT(status < 0);
248 /*Create vaild lpm to use in rest of test. */
249 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
250 TEST_LPM_ASSERT(lpm != NULL);
252 /* rte_lpm_delete: depth < 1 */
253 status = rte_lpm_delete(lpm, ip, 0);
254 TEST_LPM_ASSERT(status < 0);
256 /* rte_lpm_delete: depth > MAX_DEPTH */
257 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
258 TEST_LPM_ASSERT(status < 0);
267 * Check that rte_lpm_lookup fails gracefully for incorrect user input
273 #if defined(RTE_LIBRTE_LPM_DEBUG)
274 struct rte_lpm *lpm = NULL;
275 uint32_t ip = IPv4(0, 0, 0, 0);
276 uint8_t next_hop_return = 0;
279 /* rte_lpm_lookup: lpm == NULL */
280 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
281 TEST_LPM_ASSERT(status < 0);
283 /*Create vaild lpm to use in rest of test. */
284 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
285 TEST_LPM_ASSERT(lpm != NULL);
287 /* rte_lpm_lookup: depth < 1 */
288 status = rte_lpm_lookup(lpm, ip, NULL);
289 TEST_LPM_ASSERT(status < 0);
300 * Call add, lookup and delete for a single rule with depth <= 24
305 struct rte_lpm *lpm = NULL;
306 uint32_t ip = IPv4(0, 0, 0, 0);
307 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
310 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
311 TEST_LPM_ASSERT(lpm != NULL);
313 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
314 TEST_LPM_ASSERT(status == 0);
316 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
317 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
319 status = rte_lpm_delete(lpm, ip, depth);
320 TEST_LPM_ASSERT(status == 0);
322 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
323 TEST_LPM_ASSERT(status == -ENOENT);
332 * Call add, lookup and delete for a single rule with depth > 24
338 struct rte_lpm *lpm = NULL;
339 uint32_t ip = IPv4(0, 0, 0, 0);
340 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
343 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
344 TEST_LPM_ASSERT(lpm != NULL);
346 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
347 TEST_LPM_ASSERT(status == 0);
349 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
350 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
352 status = rte_lpm_delete(lpm, ip, depth);
353 TEST_LPM_ASSERT(status == 0);
355 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
356 TEST_LPM_ASSERT(status == -ENOENT);
365 * Use rte_lpm_add to add rules which effect only the second half of the lpm
366 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
367 * depth. Check lookup hit for on every add and check for lookup miss on the
368 * first half of the lpm table after each add. Finally delete all rules going
369 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
370 * delete. The lookup should return the next_hop_add value related to the
371 * previous depth value (i.e. depth -1).
376 struct rte_lpm *lpm = NULL;
377 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
378 uint8_t depth, next_hop_add, next_hop_return;
381 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
382 TEST_LPM_ASSERT(lpm != NULL);
384 /* Loop with rte_lpm_add. */
385 for (depth = 1; depth <= 32; depth++) {
386 /* Let the next_hop_add value = depth. Just for change. */
387 next_hop_add = depth;
389 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
390 TEST_LPM_ASSERT(status == 0);
392 /* Check IP in first half of tbl24 which should be empty. */
393 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
394 TEST_LPM_ASSERT(status == -ENOENT);
396 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
397 TEST_LPM_ASSERT((status == 0) &&
398 (next_hop_return == next_hop_add));
401 /* Loop with rte_lpm_delete. */
402 for (depth = 32; depth >= 1; depth--) {
403 next_hop_add = (uint8_t) (depth - 1);
405 status = rte_lpm_delete(lpm, ip2, depth);
406 TEST_LPM_ASSERT(status == 0);
408 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
411 TEST_LPM_ASSERT((status == 0) &&
412 (next_hop_return == next_hop_add));
415 TEST_LPM_ASSERT(status == -ENOENT);
418 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
419 TEST_LPM_ASSERT(status == -ENOENT);
429 * - Add & lookup to hit invalid TBL24 entry
430 * - Add & lookup to hit valid TBL24 entry not extended
431 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
432 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
438 struct rte_lpm *lpm = NULL;
439 uint32_t ip, ip_1, ip_2;
440 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
441 next_hop_add_2, next_hop_return;
444 /* Add & lookup to hit invalid TBL24 entry */
445 ip = IPv4(128, 0, 0, 0);
449 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
450 TEST_LPM_ASSERT(lpm != NULL);
452 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
453 TEST_LPM_ASSERT(status == 0);
455 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
456 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
458 status = rte_lpm_delete(lpm, ip, depth);
459 TEST_LPM_ASSERT(status == 0);
461 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
462 TEST_LPM_ASSERT(status == -ENOENT);
464 rte_lpm_delete_all(lpm);
466 /* Add & lookup to hit valid TBL24 entry not extended */
467 ip = IPv4(128, 0, 0, 0);
471 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
472 TEST_LPM_ASSERT(status == 0);
474 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
475 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
480 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
481 TEST_LPM_ASSERT(status == 0);
483 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
484 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
488 status = rte_lpm_delete(lpm, ip, depth);
489 TEST_LPM_ASSERT(status == 0);
493 status = rte_lpm_delete(lpm, ip, depth);
494 TEST_LPM_ASSERT(status == 0);
496 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
497 TEST_LPM_ASSERT(status == -ENOENT);
499 rte_lpm_delete_all(lpm);
501 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
503 ip = IPv4(128, 0, 0, 0);
507 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
508 TEST_LPM_ASSERT(status == 0);
510 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
511 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
513 ip = IPv4(128, 0, 0, 5);
517 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
518 TEST_LPM_ASSERT(status == 0);
520 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
521 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
523 status = rte_lpm_delete(lpm, ip, depth);
524 TEST_LPM_ASSERT(status == 0);
526 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
527 TEST_LPM_ASSERT(status == -ENOENT);
529 ip = IPv4(128, 0, 0, 0);
533 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
534 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
536 status = rte_lpm_delete(lpm, ip, depth);
537 TEST_LPM_ASSERT(status == 0);
539 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
540 TEST_LPM_ASSERT(status == -ENOENT);
542 rte_lpm_delete_all(lpm);
544 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
546 ip_1 = IPv4(128, 0, 0, 0);
548 next_hop_add_1 = 101;
550 ip_2 = IPv4(128, 0, 0, 5);
552 next_hop_add_2 = 102;
556 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
557 TEST_LPM_ASSERT(status == 0);
559 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
560 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
562 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
563 TEST_LPM_ASSERT(status == 0);
565 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
566 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
568 status = rte_lpm_delete(lpm, ip_2, depth_2);
569 TEST_LPM_ASSERT(status == 0);
571 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
572 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
574 status = rte_lpm_delete(lpm, ip_1, depth_1);
575 TEST_LPM_ASSERT(status == 0);
577 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
578 TEST_LPM_ASSERT(status == -ENOENT);
588 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
590 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
591 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
593 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
594 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
595 * - Delete a rule that is not present in the TBL24 & lookup
596 * - Delete a rule that is not present in the TBL8 & lookup
603 struct rte_lpm *lpm = NULL;
605 uint8_t depth, next_hop_add, next_hop_return;
608 /* Add rule that covers a TBL24 range previously invalid & lookup
609 * (& delete & lookup) */
610 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
611 TEST_LPM_ASSERT(lpm != NULL);
613 ip = IPv4(128, 0, 0, 0);
617 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
618 TEST_LPM_ASSERT(status == 0);
620 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
621 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
623 status = rte_lpm_delete(lpm, ip, depth);
624 TEST_LPM_ASSERT(status == 0);
626 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
627 TEST_LPM_ASSERT(status == -ENOENT);
629 rte_lpm_delete_all(lpm);
631 ip = IPv4(128, 0, 0, 0);
635 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
636 TEST_LPM_ASSERT(status == 0);
638 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
639 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
641 status = rte_lpm_delete(lpm, ip, depth);
642 TEST_LPM_ASSERT(status == 0);
644 rte_lpm_delete_all(lpm);
646 /* Add rule that extends a TBL24 valid entry & lookup for both rules
647 * (& delete & lookup) */
649 ip = IPv4(128, 0, 0, 0);
653 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
654 TEST_LPM_ASSERT(status == 0);
656 ip = IPv4(128, 0, 0, 10);
660 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
661 TEST_LPM_ASSERT(status == 0);
663 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
664 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
666 ip = IPv4(128, 0, 0, 0);
669 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
670 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
672 ip = IPv4(128, 0, 0, 0);
675 status = rte_lpm_delete(lpm, ip, depth);
676 TEST_LPM_ASSERT(status == 0);
678 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
679 TEST_LPM_ASSERT(status == -ENOENT);
681 ip = IPv4(128, 0, 0, 10);
684 status = rte_lpm_delete(lpm, ip, depth);
685 TEST_LPM_ASSERT(status == 0);
687 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
688 TEST_LPM_ASSERT(status == -ENOENT);
690 rte_lpm_delete_all(lpm);
692 /* Add rule that updates the next hop in TBL24 & lookup
693 * (& delete & lookup) */
695 ip = IPv4(128, 0, 0, 0);
699 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
700 TEST_LPM_ASSERT(status == 0);
702 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
703 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
707 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
708 TEST_LPM_ASSERT(status == 0);
710 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
711 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
713 status = rte_lpm_delete(lpm, ip, depth);
714 TEST_LPM_ASSERT(status == 0);
716 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
717 TEST_LPM_ASSERT(status == -ENOENT);
719 rte_lpm_delete_all(lpm);
721 /* Add rule that updates the next hop in TBL8 & lookup
722 * (& delete & lookup) */
724 ip = IPv4(128, 0, 0, 0);
728 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
729 TEST_LPM_ASSERT(status == 0);
731 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
732 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
736 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
737 TEST_LPM_ASSERT(status == 0);
739 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
740 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
742 status = rte_lpm_delete(lpm, ip, depth);
743 TEST_LPM_ASSERT(status == 0);
745 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
746 TEST_LPM_ASSERT(status == -ENOENT);
748 rte_lpm_delete_all(lpm);
750 /* Delete a rule that is not present in the TBL24 & lookup */
752 ip = IPv4(128, 0, 0, 0);
756 status = rte_lpm_delete(lpm, ip, depth);
757 TEST_LPM_ASSERT(status < 0);
759 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
760 TEST_LPM_ASSERT(status == -ENOENT);
762 rte_lpm_delete_all(lpm);
764 /* Delete a rule that is not present in the TBL8 & lookup */
766 ip = IPv4(128, 0, 0, 0);
770 status = rte_lpm_delete(lpm, ip, depth);
771 TEST_LPM_ASSERT(status < 0);
773 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
774 TEST_LPM_ASSERT(status == -ENOENT);
783 * Add two rules, lookup to hit the more specific one, lookup to hit the less
784 * specific one delete the less specific rule and lookup previous values again;
785 * add a more specific rule than the existing rule, lookup again
792 struct rte_lpm *lpm = NULL;
794 uint8_t depth, next_hop_add, next_hop_return;
797 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
798 TEST_LPM_ASSERT(lpm != NULL);
800 ip = IPv4(128, 0, 0, 0);
804 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
805 TEST_LPM_ASSERT(status == 0);
807 ip = IPv4(128, 0, 0, 10);
811 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
812 TEST_LPM_ASSERT(status == 0);
814 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
815 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
817 ip = IPv4(128, 0, 0, 0);
820 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
821 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
823 ip = IPv4(128, 0, 0, 0);
826 status = rte_lpm_delete(lpm, ip, depth);
827 TEST_LPM_ASSERT(status == 0);
829 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
830 TEST_LPM_ASSERT(status == -ENOENT);
832 ip = IPv4(128, 0, 0, 10);
835 status = rte_lpm_delete(lpm, ip, depth);
836 TEST_LPM_ASSERT(status == 0);
838 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
839 TEST_LPM_ASSERT(status == -ENOENT);
848 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
849 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
857 struct rte_lpm *lpm = NULL;
859 uint8_t depth, next_hop_add, next_hop_return;
862 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
863 TEST_LPM_ASSERT(lpm != NULL);
865 ip = IPv4(128, 0, 0, 0);
869 for (i = 0; i < 1000; i++) {
870 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
871 TEST_LPM_ASSERT(status == 0);
873 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
874 TEST_LPM_ASSERT((status == 0) &&
875 (next_hop_return == next_hop_add));
877 status = rte_lpm_delete(lpm, ip, depth);
878 TEST_LPM_ASSERT(status == 0);
880 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
881 TEST_LPM_ASSERT(status == -ENOENT);
891 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
892 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
893 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
894 * extension and contraction.
901 struct rte_lpm *lpm = NULL;
903 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
906 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
907 TEST_LPM_ASSERT(lpm != NULL);
909 ip = IPv4(128, 0, 0, 0);
911 next_hop_add_1 = 100;
913 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
914 TEST_LPM_ASSERT(status == 0);
916 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
917 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
920 next_hop_add_2 = 101;
922 for (i = 0; i < 1000; i++) {
923 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
924 TEST_LPM_ASSERT(status == 0);
926 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
927 TEST_LPM_ASSERT((status == 0) &&
928 (next_hop_return == next_hop_add_2));
930 status = rte_lpm_delete(lpm, ip, depth);
931 TEST_LPM_ASSERT(status == 0);
933 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
934 TEST_LPM_ASSERT((status == 0) &&
935 (next_hop_return == next_hop_add_1));
940 status = rte_lpm_delete(lpm, ip, depth);
941 TEST_LPM_ASSERT(status == 0);
943 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
944 TEST_LPM_ASSERT(status == -ENOENT);
953 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
954 * No more tbl8 extensions will be allowed. Now add one more rule that required
955 * a tbl8 extension and get fail.
961 /* We only use depth = 32 in the loop below so we must make sure
962 * that we have enough storage for all rules at that depth*/
964 struct rte_lpm *lpm = NULL;
966 uint8_t depth, next_hop_add, next_hop_return;
969 /* Add enough space for 256 rules for every depth */
970 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP);
971 TEST_LPM_ASSERT(lpm != NULL);
973 ip = IPv4(0, 0, 0, 0);
977 /* Add 256 rules that require a tbl8 extension */
978 for (ip = 0; ip <= IPv4(0, 0, 255, 0); ip += 256) {
979 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
980 TEST_LPM_ASSERT(status == 0);
982 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
983 TEST_LPM_ASSERT((status == 0) &&
984 (next_hop_return == next_hop_add));
987 /* All tbl8 extensions have been used above. Try to add one more and
989 ip = IPv4(1, 0, 0, 0);
992 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
993 TEST_LPM_ASSERT(status < 0);
1002 * Lookup performance test using Mae West Routing Table
1004 static inline uint32_t
1005 depth_to_mask(uint8_t depth) {
1006 return (int)0x80000000 >> (depth - 1);
1010 rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){
1011 unsigned i, j, count;
1014 for (i = 0; i < (n - 1); i++) {
1015 uint8_t depth1 = table[i].depth;
1016 uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1);
1018 for (j = (i + 1); j <n; j ++) {
1019 uint8_t depth2 = table[j].depth;
1020 uint32_t ip2_masked = table[j].ip &
1021 depth_to_mask(depth2);
1023 if ((depth1 == depth2) && (ip1_masked == ip2_masked)){
1024 printf("Rule %u is a duplicate of rule %u\n",
1035 rule_table_characterisation(const struct route_rule *table, uint32_t n){
1038 printf("DEPTH QUANTITY (PERCENT)\n");
1039 printf("--------------------------------- \n");
1041 for(i = 1; i <= 32; i++) {
1042 unsigned depth_counter = 0;
1043 double percent_hits;
1045 for (j = 0; j < n; j++) {
1046 if (table[j].depth == (uint8_t) i)
1050 percent_hits = ((double)depth_counter)/((double)n) * 100;
1052 printf("%u - %5u (%.2f)\n",
1053 i, depth_counter, percent_hits);
1059 static inline uint64_t
1060 div64(uint64_t dividend, uint64_t divisor)
1062 return ((2 * dividend) + divisor) / (2 * divisor);
1068 struct rte_lpm *lpm = NULL;
1069 uint64_t begin, end, total_time, lpm_used_entries = 0;
1070 unsigned avg_ticks, i, j;
1071 uint8_t next_hop_add = 0, next_hop_return = 0;
1074 printf("Using Mae West routing table from www.oiforum.com\n");
1075 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1076 printf("No. duplicate routes = %u\n\n", (unsigned)
1077 rule_table_check_for_duplicates(mae_west_tbl, NUM_ROUTE_ENTRIES));
1078 printf("Route distribution per prefix width: \n");
1079 rule_table_characterisation(mae_west_tbl,
1080 (uint32_t) NUM_ROUTE_ENTRIES);
1083 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000,
1085 TEST_LPM_ASSERT(lpm != NULL);
1091 begin = rte_rdtsc();
1093 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1094 /* rte_lpm_add(lpm, ip, depth, next_hop_add) */
1095 status += rte_lpm_add(lpm, mae_west_tbl[i].ip,
1096 mae_west_tbl[i].depth, next_hop_add);
1101 TEST_LPM_ASSERT(status == 0);
1103 /* Calculate average cycles per add. */
1104 avg_ticks = (uint32_t) div64((end - begin),
1105 (uint64_t) NUM_ROUTE_ENTRIES);
1107 uint64_t cache_line_counter = 0;
1110 /* Obtain add statistics. */
1111 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1112 if (lpm->tbl24[i].valid)
1116 if (count < lpm_used_entries) {
1117 cache_line_counter++;
1118 count = lpm_used_entries;
1123 printf("Number of table 24 entries = %u\n",
1124 (unsigned) RTE_LPM_TBL24_NUM_ENTRIES);
1125 printf("Used table 24 entries = %u\n",
1126 (unsigned) lpm_used_entries);
1127 printf("Percentage of table 24 entries used = %u\n",
1128 (unsigned) div64((lpm_used_entries * 100) ,
1129 RTE_LPM_TBL24_NUM_ENTRIES));
1130 printf("64 byte Cache entries used = %u \n",
1131 (unsigned) cache_line_counter);
1132 printf("Cache Required = %u bytes\n\n",
1133 (unsigned) cache_line_counter * 64);
1135 printf("Average LPM Add: %u cycles\n", avg_ticks);
1139 /* Choose random seed. */
1143 for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) {
1144 static uint32_t ip_batch[BATCH_SIZE];
1145 uint64_t begin_batch, end_batch;
1147 /* Generate a batch of random numbers */
1148 for (j = 0; j < BATCH_SIZE; j ++) {
1149 ip_batch[j] = rte_rand();
1152 /* Lookup per batch */
1153 begin_batch = rte_rdtsc();
1155 for (j = 0; j < BATCH_SIZE; j ++) {
1156 status += rte_lpm_lookup(lpm, ip_batch[j],
1160 end_batch = rte_rdtsc();
1161 printf("status = %d\r", next_hop_return);
1162 TEST_LPM_ASSERT(status < 1);
1164 /* Accumulate batch time */
1165 total_time += (end_batch - begin_batch);
1167 TEST_LPM_ASSERT((status < -ENOENT) ||
1168 (next_hop_return == next_hop_add));
1171 avg_ticks = (uint32_t) div64(total_time, ITERATIONS);
1172 printf("Average LPM Lookup: %u cycles\n", avg_ticks);
1176 begin = rte_rdtsc();
1178 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1179 /* rte_lpm_delete(lpm, ip, depth) */
1180 status += rte_lpm_delete(lpm, mae_west_tbl[i].ip,
1181 mae_west_tbl[i].depth);
1186 TEST_LPM_ASSERT(status == 0);
1188 avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES);
1190 printf("Average LPM Delete: %u cycles\n", avg_ticks);
1192 rte_lpm_delete_all(lpm);
1201 * Sequence of operations for find existing fbk hash table
1204 * - find existing table: hit
1205 * - find non-existing table: miss
1208 int32_t test16(void)
1210 struct rte_lpm *lpm = NULL, *result = NULL;
1213 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP);
1214 TEST_LPM_ASSERT(lpm != NULL);
1216 /* Try to find existing lpm */
1217 result = rte_lpm_find_existing("lpm_find_existing");
1218 TEST_LPM_ASSERT(result == lpm);
1220 /* Try to find non-existing lpm */
1221 result = rte_lpm_find_existing("lpm_find_non_existing");
1222 TEST_LPM_ASSERT(result == NULL);
1225 rte_lpm_delete_all(lpm);
1232 * test failure condition of overloading the tbl8 so no more will fit
1233 * Check we get an error return value in that case
1239 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1240 256 * 32, RTE_LPM_HEAP);
1242 printf("Testing filling tbl8's\n");
1244 /* ip loops through all positibilities for top 24 bits of address */
1245 for (ip = 0; ip < 0xFFFFFF; ip++){
1246 /* add an entrey within a different tbl8 each time, since
1247 * depth >24 and the top 24 bits are different */
1248 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1252 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1253 printf("Error, unexpected failure with filling tbl8 groups\n");
1254 printf("Failed after %u additions, expected after %u\n",
1255 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1264 * Test for overwriting of tbl8:
1265 * - add rule /32 and lookup
1266 * - add new rule /24 and lookup
1267 * - add third rule /25 and lookup
1268 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1273 struct rte_lpm *lpm = NULL;
1274 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1275 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1276 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1277 const uint8_t d_ip_10_32 = 32,
1280 const uint8_t next_hop_ip_10_32 = 100,
1281 next_hop_ip_10_24 = 105,
1282 next_hop_ip_20_25 = 111;
1283 uint8_t next_hop_return = 0;
1286 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
1287 TEST_LPM_ASSERT(lpm != NULL);
1289 status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32);
1290 TEST_LPM_ASSERT(status == 0);
1292 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1293 TEST_LPM_ASSERT(status == 0);
1294 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1296 status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24);
1297 TEST_LPM_ASSERT(status == 0);
1299 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1300 TEST_LPM_ASSERT(status == 0);
1301 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1303 status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25);
1304 TEST_LPM_ASSERT(status == 0);
1306 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1307 TEST_LPM_ASSERT(status == 0);
1308 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1310 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1311 TEST_LPM_ASSERT(status == 0);
1312 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1314 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1315 TEST_LPM_ASSERT(status == 0);
1316 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1320 printf("%s PASSED\n", __func__);
1326 * Do all unit and performance tests.
1333 int status, global_status;
1335 printf("Running LPM tests...\n"
1336 "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS);
1340 for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) {
1342 status = tests[test_num]();
1344 printf("LPM Test %u: %s\n", test_num,
1345 (status < 0) ? "FAIL" : "PASS");
1348 global_status = status;
1352 return global_status;
1360 printf("The LPM library is not included in this build\n");