4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
40 #include <cmdline_parse.h>
42 #include <rte_common.h>
43 #include <rte_cycles.h>
44 #include <rte_memory.h>
45 #include <rte_random.h>
46 #include <rte_branch_prediction.h>
53 #include "test_lpm_routes.h"
57 #define ITERATIONS (1 << 20)
58 #define BATCH_SIZE (1 << 13)
60 #define TEST_LPM_ASSERT(cond) do { \
62 printf("Error at line %d: \n", __LINE__); \
69 typedef int32_t (* rte_lpm_test)(void);
71 static int32_t test0(void);
72 static int32_t test1(void);
73 static int32_t test2(void);
74 static int32_t test3(void);
75 static int32_t test4(void);
76 static int32_t test5(void);
77 static int32_t test6(void);
78 static int32_t test7(void);
79 static int32_t test8(void);
80 static int32_t test9(void);
81 static int32_t test10(void);
82 static int32_t test11(void);
83 static int32_t test12(void);
84 static int32_t test13(void);
85 static int32_t test14(void);
86 static int32_t test15(void);
87 static int32_t test16(void);
88 static int32_t test17(void);
89 static int32_t test18(void);
91 rte_lpm_test tests[] = {
114 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
116 #define MAX_RULES 256
120 * Check that rte_lpm_create fails gracefully for incorrect user input
126 struct rte_lpm *lpm = NULL;
128 /* rte_lpm_create: lpm name == NULL */
129 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
130 TEST_LPM_ASSERT(lpm == NULL);
132 /* rte_lpm_create: max_rules = 0 */
133 /* Note: __func__ inserts the function name, in this case "test0". */
134 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
135 TEST_LPM_ASSERT(lpm == NULL);
137 /* socket_id < -1 is invalid */
138 lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
139 TEST_LPM_ASSERT(lpm == NULL);
145 * Create lpm table then delete lpm table 100 times
146 * Use a slightly different rules size each time
151 struct rte_lpm *lpm = NULL;
154 /* rte_lpm_free: Free NULL */
155 for (i = 0; i < 100; i++) {
156 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
157 TEST_LPM_ASSERT(lpm != NULL);
162 /* Can not test free so return success */
167 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
168 * therefore it is impossible to check for failure but this test is added to
169 * increase function coverage metrics and to validate that freeing null does
175 struct rte_lpm *lpm = NULL;
177 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
178 TEST_LPM_ASSERT(lpm != NULL);
186 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
191 struct rte_lpm *lpm = NULL;
192 uint32_t ip = IPv4(0, 0, 0, 0);
193 uint8_t depth = 24, next_hop = 100;
196 /* rte_lpm_add: lpm == NULL */
197 status = rte_lpm_add(NULL, ip, depth, next_hop);
198 TEST_LPM_ASSERT(status < 0);
200 /*Create vaild lpm to use in rest of test. */
201 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
202 TEST_LPM_ASSERT(lpm != NULL);
204 /* rte_lpm_add: depth < 1 */
205 status = rte_lpm_add(lpm, ip, 0, next_hop);
206 TEST_LPM_ASSERT(status < 0);
208 /* rte_lpm_add: depth > MAX_DEPTH */
209 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
210 TEST_LPM_ASSERT(status < 0);
218 * Check that rte_lpm_delete fails gracefully for incorrect user input
224 struct rte_lpm *lpm = NULL;
225 uint32_t ip = IPv4(0, 0, 0, 0);
229 /* rte_lpm_delete: lpm == NULL */
230 status = rte_lpm_delete(NULL, ip, depth);
231 TEST_LPM_ASSERT(status < 0);
233 /*Create vaild lpm to use in rest of test. */
234 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
235 TEST_LPM_ASSERT(lpm != NULL);
237 /* rte_lpm_delete: depth < 1 */
238 status = rte_lpm_delete(lpm, ip, 0);
239 TEST_LPM_ASSERT(status < 0);
241 /* rte_lpm_delete: depth > MAX_DEPTH */
242 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
243 TEST_LPM_ASSERT(status < 0);
251 * Check that rte_lpm_lookup fails gracefully for incorrect user input
257 #if defined(RTE_LIBRTE_LPM_DEBUG)
258 struct rte_lpm *lpm = NULL;
259 uint32_t ip = IPv4(0, 0, 0, 0);
260 uint8_t next_hop_return = 0;
263 /* rte_lpm_lookup: lpm == NULL */
264 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
265 TEST_LPM_ASSERT(status < 0);
267 /*Create vaild lpm to use in rest of test. */
268 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
269 TEST_LPM_ASSERT(lpm != NULL);
271 /* rte_lpm_lookup: depth < 1 */
272 status = rte_lpm_lookup(lpm, ip, NULL);
273 TEST_LPM_ASSERT(status < 0);
283 * Call add, lookup and delete for a single rule with depth <= 24
288 struct rte_lpm *lpm = NULL;
289 uint32_t ip = IPv4(0, 0, 0, 0);
290 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
293 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
294 TEST_LPM_ASSERT(lpm != NULL);
296 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
297 TEST_LPM_ASSERT(status == 0);
299 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
300 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
302 status = rte_lpm_delete(lpm, ip, depth);
303 TEST_LPM_ASSERT(status == 0);
305 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
306 TEST_LPM_ASSERT(status == -ENOENT);
314 * Call add, lookup and delete for a single rule with depth > 24
320 struct rte_lpm *lpm = NULL;
321 uint32_t ip = IPv4(0, 0, 0, 0);
322 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
325 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
326 TEST_LPM_ASSERT(lpm != NULL);
328 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
329 TEST_LPM_ASSERT(status == 0);
331 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
332 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
334 status = rte_lpm_delete(lpm, ip, depth);
335 TEST_LPM_ASSERT(status == 0);
337 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
338 TEST_LPM_ASSERT(status == -ENOENT);
346 * Use rte_lpm_add to add rules which effect only the second half of the lpm
347 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
348 * depth. Check lookup hit for on every add and check for lookup miss on the
349 * first half of the lpm table after each add. Finally delete all rules going
350 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
351 * delete. The lookup should return the next_hop_add value related to the
352 * previous depth value (i.e. depth -1).
357 struct rte_lpm *lpm = NULL;
358 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
359 uint8_t depth, next_hop_add, next_hop_return;
362 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
363 TEST_LPM_ASSERT(lpm != NULL);
365 /* Loop with rte_lpm_add. */
366 for (depth = 1; depth <= 32; depth++) {
367 /* Let the next_hop_add value = depth. Just for change. */
368 next_hop_add = depth;
370 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
371 TEST_LPM_ASSERT(status == 0);
373 /* Check IP in first half of tbl24 which should be empty. */
374 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
375 TEST_LPM_ASSERT(status == -ENOENT);
377 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
378 TEST_LPM_ASSERT((status == 0) &&
379 (next_hop_return == next_hop_add));
382 /* Loop with rte_lpm_delete. */
383 for (depth = 32; depth >= 1; depth--) {
384 next_hop_add = (uint8_t) (depth - 1);
386 status = rte_lpm_delete(lpm, ip2, depth);
387 TEST_LPM_ASSERT(status == 0);
389 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
392 TEST_LPM_ASSERT((status == 0) &&
393 (next_hop_return == next_hop_add));
396 TEST_LPM_ASSERT(status == -ENOENT);
399 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
400 TEST_LPM_ASSERT(status == -ENOENT);
409 * - Add & lookup to hit invalid TBL24 entry
410 * - Add & lookup to hit valid TBL24 entry not extended
411 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
412 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
418 struct rte_lpm *lpm = NULL;
419 uint32_t ip, ip_1, ip_2;
420 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
421 next_hop_add_2, next_hop_return;
424 /* Add & lookup to hit invalid TBL24 entry */
425 ip = IPv4(128, 0, 0, 0);
429 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
430 TEST_LPM_ASSERT(lpm != NULL);
432 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
433 TEST_LPM_ASSERT(status == 0);
435 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
436 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
438 status = rte_lpm_delete(lpm, ip, depth);
439 TEST_LPM_ASSERT(status == 0);
441 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
442 TEST_LPM_ASSERT(status == -ENOENT);
444 rte_lpm_delete_all(lpm);
446 /* Add & lookup to hit valid TBL24 entry not extended */
447 ip = IPv4(128, 0, 0, 0);
451 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
452 TEST_LPM_ASSERT(status == 0);
454 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
455 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
460 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
461 TEST_LPM_ASSERT(status == 0);
463 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
464 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
468 status = rte_lpm_delete(lpm, ip, depth);
469 TEST_LPM_ASSERT(status == 0);
473 status = rte_lpm_delete(lpm, ip, depth);
474 TEST_LPM_ASSERT(status == 0);
476 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
477 TEST_LPM_ASSERT(status == -ENOENT);
479 rte_lpm_delete_all(lpm);
481 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
483 ip = IPv4(128, 0, 0, 0);
487 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
488 TEST_LPM_ASSERT(status == 0);
490 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
491 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
493 ip = IPv4(128, 0, 0, 5);
497 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
498 TEST_LPM_ASSERT(status == 0);
500 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
501 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
503 status = rte_lpm_delete(lpm, ip, depth);
504 TEST_LPM_ASSERT(status == 0);
506 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
507 TEST_LPM_ASSERT(status == -ENOENT);
509 ip = IPv4(128, 0, 0, 0);
513 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
514 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
516 status = rte_lpm_delete(lpm, ip, depth);
517 TEST_LPM_ASSERT(status == 0);
519 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
520 TEST_LPM_ASSERT(status == -ENOENT);
522 rte_lpm_delete_all(lpm);
524 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
526 ip_1 = IPv4(128, 0, 0, 0);
528 next_hop_add_1 = 101;
530 ip_2 = IPv4(128, 0, 0, 5);
532 next_hop_add_2 = 102;
536 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
537 TEST_LPM_ASSERT(status == 0);
539 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
540 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
542 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
543 TEST_LPM_ASSERT(status == 0);
545 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
546 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
548 status = rte_lpm_delete(lpm, ip_2, depth_2);
549 TEST_LPM_ASSERT(status == 0);
551 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
552 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
554 status = rte_lpm_delete(lpm, ip_1, depth_1);
555 TEST_LPM_ASSERT(status == 0);
557 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
558 TEST_LPM_ASSERT(status == -ENOENT);
567 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
569 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
570 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
572 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
573 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
574 * - Delete a rule that is not present in the TBL24 & lookup
575 * - Delete a rule that is not present in the TBL8 & lookup
582 struct rte_lpm *lpm = NULL;
584 uint8_t depth, next_hop_add, next_hop_return;
587 /* Add rule that covers a TBL24 range previously invalid & lookup
588 * (& delete & lookup) */
589 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
590 TEST_LPM_ASSERT(lpm != NULL);
592 ip = IPv4(128, 0, 0, 0);
596 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
597 TEST_LPM_ASSERT(status == 0);
599 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
600 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
602 status = rte_lpm_delete(lpm, ip, depth);
603 TEST_LPM_ASSERT(status == 0);
605 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
606 TEST_LPM_ASSERT(status == -ENOENT);
608 rte_lpm_delete_all(lpm);
610 ip = IPv4(128, 0, 0, 0);
614 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
615 TEST_LPM_ASSERT(status == 0);
617 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
618 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
620 status = rte_lpm_delete(lpm, ip, depth);
621 TEST_LPM_ASSERT(status == 0);
623 rte_lpm_delete_all(lpm);
625 /* Add rule that extends a TBL24 valid entry & lookup for both rules
626 * (& delete & lookup) */
628 ip = IPv4(128, 0, 0, 0);
632 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
633 TEST_LPM_ASSERT(status == 0);
635 ip = IPv4(128, 0, 0, 10);
639 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
640 TEST_LPM_ASSERT(status == 0);
642 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
643 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
645 ip = IPv4(128, 0, 0, 0);
648 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
649 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
651 ip = IPv4(128, 0, 0, 0);
654 status = rte_lpm_delete(lpm, ip, depth);
655 TEST_LPM_ASSERT(status == 0);
657 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
658 TEST_LPM_ASSERT(status == -ENOENT);
660 ip = IPv4(128, 0, 0, 10);
663 status = rte_lpm_delete(lpm, ip, depth);
664 TEST_LPM_ASSERT(status == 0);
666 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
667 TEST_LPM_ASSERT(status == -ENOENT);
669 rte_lpm_delete_all(lpm);
671 /* Add rule that updates the next hop in TBL24 & lookup
672 * (& delete & lookup) */
674 ip = IPv4(128, 0, 0, 0);
678 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
679 TEST_LPM_ASSERT(status == 0);
681 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
682 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
686 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
687 TEST_LPM_ASSERT(status == 0);
689 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
690 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
692 status = rte_lpm_delete(lpm, ip, depth);
693 TEST_LPM_ASSERT(status == 0);
695 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
696 TEST_LPM_ASSERT(status == -ENOENT);
698 rte_lpm_delete_all(lpm);
700 /* Add rule that updates the next hop in TBL8 & lookup
701 * (& delete & lookup) */
703 ip = IPv4(128, 0, 0, 0);
707 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
708 TEST_LPM_ASSERT(status == 0);
710 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
711 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
715 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
716 TEST_LPM_ASSERT(status == 0);
718 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
719 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
721 status = rte_lpm_delete(lpm, ip, depth);
722 TEST_LPM_ASSERT(status == 0);
724 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
725 TEST_LPM_ASSERT(status == -ENOENT);
727 rte_lpm_delete_all(lpm);
729 /* Delete a rule that is not present in the TBL24 & lookup */
731 ip = IPv4(128, 0, 0, 0);
735 status = rte_lpm_delete(lpm, ip, depth);
736 TEST_LPM_ASSERT(status < 0);
738 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
739 TEST_LPM_ASSERT(status == -ENOENT);
741 rte_lpm_delete_all(lpm);
743 /* Delete a rule that is not present in the TBL8 & lookup */
745 ip = IPv4(128, 0, 0, 0);
749 status = rte_lpm_delete(lpm, ip, depth);
750 TEST_LPM_ASSERT(status < 0);
752 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
753 TEST_LPM_ASSERT(status == -ENOENT);
761 * Add two rules, lookup to hit the more specific one, lookup to hit the less
762 * specific one delete the less specific rule and lookup previous values again;
763 * add a more specific rule than the existing rule, lookup again
770 struct rte_lpm *lpm = NULL;
772 uint8_t depth, next_hop_add, next_hop_return;
775 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
776 TEST_LPM_ASSERT(lpm != NULL);
778 ip = IPv4(128, 0, 0, 0);
782 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
783 TEST_LPM_ASSERT(status == 0);
785 ip = IPv4(128, 0, 0, 10);
789 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
790 TEST_LPM_ASSERT(status == 0);
792 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
793 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
795 ip = IPv4(128, 0, 0, 0);
798 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
799 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
801 ip = IPv4(128, 0, 0, 0);
804 status = rte_lpm_delete(lpm, ip, depth);
805 TEST_LPM_ASSERT(status == 0);
807 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
808 TEST_LPM_ASSERT(status == -ENOENT);
810 ip = IPv4(128, 0, 0, 10);
813 status = rte_lpm_delete(lpm, ip, depth);
814 TEST_LPM_ASSERT(status == 0);
816 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
817 TEST_LPM_ASSERT(status == -ENOENT);
825 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
826 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
834 struct rte_lpm *lpm = NULL;
836 uint8_t depth, next_hop_add, next_hop_return;
839 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
840 TEST_LPM_ASSERT(lpm != NULL);
842 ip = IPv4(128, 0, 0, 0);
846 for (i = 0; i < 1000; i++) {
847 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
848 TEST_LPM_ASSERT(status == 0);
850 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
851 TEST_LPM_ASSERT((status == 0) &&
852 (next_hop_return == next_hop_add));
854 status = rte_lpm_delete(lpm, ip, depth);
855 TEST_LPM_ASSERT(status == 0);
857 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
858 TEST_LPM_ASSERT(status == -ENOENT);
867 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
868 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
869 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
870 * extension and contraction.
877 struct rte_lpm *lpm = NULL;
879 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
882 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
883 TEST_LPM_ASSERT(lpm != NULL);
885 ip = IPv4(128, 0, 0, 0);
887 next_hop_add_1 = 100;
889 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
890 TEST_LPM_ASSERT(status == 0);
892 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
893 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
896 next_hop_add_2 = 101;
898 for (i = 0; i < 1000; i++) {
899 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
900 TEST_LPM_ASSERT(status == 0);
902 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
903 TEST_LPM_ASSERT((status == 0) &&
904 (next_hop_return == next_hop_add_2));
906 status = rte_lpm_delete(lpm, ip, depth);
907 TEST_LPM_ASSERT(status == 0);
909 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
910 TEST_LPM_ASSERT((status == 0) &&
911 (next_hop_return == next_hop_add_1));
916 status = rte_lpm_delete(lpm, ip, depth);
917 TEST_LPM_ASSERT(status == 0);
919 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
920 TEST_LPM_ASSERT(status == -ENOENT);
928 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
929 * No more tbl8 extensions will be allowed. Now add one more rule that required
930 * a tbl8 extension and get fail.
936 /* We only use depth = 32 in the loop below so we must make sure
937 * that we have enough storage for all rules at that depth*/
939 struct rte_lpm *lpm = NULL;
941 uint8_t depth, next_hop_add, next_hop_return;
944 /* Add enough space for 256 rules for every depth */
945 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
946 TEST_LPM_ASSERT(lpm != NULL);
948 ip = IPv4(0, 0, 0, 0);
952 /* Add 256 rules that require a tbl8 extension */
953 for (ip = 0; ip <= IPv4(0, 0, 255, 0); ip += 256) {
954 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
955 TEST_LPM_ASSERT(status == 0);
957 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
958 TEST_LPM_ASSERT((status == 0) &&
959 (next_hop_return == next_hop_add));
962 /* All tbl8 extensions have been used above. Try to add one more and
964 ip = IPv4(1, 0, 0, 0);
967 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
968 TEST_LPM_ASSERT(status < 0);
977 * Lookup performance test using Mae West Routing Table
979 static inline uint32_t
980 depth_to_mask(uint8_t depth) {
981 return (int)0x80000000 >> (depth - 1);
985 rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){
986 unsigned i, j, count;
989 for (i = 0; i < (n - 1); i++) {
990 uint8_t depth1 = table[i].depth;
991 uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1);
993 for (j = (i + 1); j <n; j ++) {
994 uint8_t depth2 = table[j].depth;
995 uint32_t ip2_masked = table[j].ip &
996 depth_to_mask(depth2);
998 if ((depth1 == depth2) && (ip1_masked == ip2_masked)){
999 printf("Rule %u is a duplicate of rule %u\n",
1010 rule_table_characterisation(const struct route_rule *table, uint32_t n){
1013 printf("DEPTH QUANTITY (PERCENT)\n");
1014 printf("--------------------------------- \n");
1016 for(i = 1; i <= 32; i++) {
1017 unsigned depth_counter = 0;
1018 double percent_hits;
1020 for (j = 0; j < n; j++) {
1021 if (table[j].depth == (uint8_t) i)
1025 percent_hits = ((double)depth_counter)/((double)n) * 100;
1027 printf("%u - %5u (%.2f)\n",
1028 i, depth_counter, percent_hits);
1034 static inline uint64_t
1035 div64(uint64_t dividend, uint64_t divisor)
1037 return ((2 * dividend) + divisor) / (2 * divisor);
1043 struct rte_lpm *lpm = NULL;
1044 uint64_t begin, end, total_time, lpm_used_entries = 0;
1045 unsigned avg_ticks, i, j;
1046 uint8_t next_hop_add = 0, next_hop_return = 0;
1049 printf("Using Mae West routing table from www.oiforum.com\n");
1050 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1051 printf("No. duplicate routes = %u\n\n", (unsigned)
1052 rule_table_check_for_duplicates(mae_west_tbl, NUM_ROUTE_ENTRIES));
1053 printf("Route distribution per prefix width: \n");
1054 rule_table_characterisation(mae_west_tbl,
1055 (uint32_t) NUM_ROUTE_ENTRIES);
1058 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000,
1060 TEST_LPM_ASSERT(lpm != NULL);
1066 begin = rte_rdtsc();
1068 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1069 /* rte_lpm_add(lpm, ip, depth, next_hop_add) */
1070 status += rte_lpm_add(lpm, mae_west_tbl[i].ip,
1071 mae_west_tbl[i].depth, next_hop_add);
1076 TEST_LPM_ASSERT(status == 0);
1078 /* Calculate average cycles per add. */
1079 avg_ticks = (uint32_t) div64((end - begin),
1080 (uint64_t) NUM_ROUTE_ENTRIES);
1082 uint64_t cache_line_counter = 0;
1085 /* Obtain add statistics. */
1086 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1087 if (lpm->tbl24[i].valid)
1091 if (count < lpm_used_entries) {
1092 cache_line_counter++;
1093 count = lpm_used_entries;
1098 printf("Number of table 24 entries = %u\n",
1099 (unsigned) RTE_LPM_TBL24_NUM_ENTRIES);
1100 printf("Used table 24 entries = %u\n",
1101 (unsigned) lpm_used_entries);
1102 printf("Percentage of table 24 entries used = %u\n",
1103 (unsigned) div64((lpm_used_entries * 100) ,
1104 RTE_LPM_TBL24_NUM_ENTRIES));
1105 printf("64 byte Cache entries used = %u \n",
1106 (unsigned) cache_line_counter);
1107 printf("Cache Required = %u bytes\n\n",
1108 (unsigned) cache_line_counter * 64);
1110 printf("Average LPM Add: %u cycles\n", avg_ticks);
1114 /* Choose random seed. */
1118 for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) {
1119 static uint32_t ip_batch[BATCH_SIZE];
1120 uint64_t begin_batch, end_batch;
1122 /* Generate a batch of random numbers */
1123 for (j = 0; j < BATCH_SIZE; j ++) {
1124 ip_batch[j] = rte_rand();
1127 /* Lookup per batch */
1128 begin_batch = rte_rdtsc();
1130 for (j = 0; j < BATCH_SIZE; j ++) {
1131 status += rte_lpm_lookup(lpm, ip_batch[j],
1135 end_batch = rte_rdtsc();
1136 printf("status = %d\r", next_hop_return);
1137 TEST_LPM_ASSERT(status < 1);
1139 /* Accumulate batch time */
1140 total_time += (end_batch - begin_batch);
1142 TEST_LPM_ASSERT((status < -ENOENT) ||
1143 (next_hop_return == next_hop_add));
1146 avg_ticks = (uint32_t) div64(total_time, ITERATIONS);
1147 printf("Average LPM Lookup: %u cycles\n", avg_ticks);
1151 begin = rte_rdtsc();
1153 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1154 /* rte_lpm_delete(lpm, ip, depth) */
1155 status += rte_lpm_delete(lpm, mae_west_tbl[i].ip,
1156 mae_west_tbl[i].depth);
1161 TEST_LPM_ASSERT(status == 0);
1163 avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES);
1165 printf("Average LPM Delete: %u cycles\n", avg_ticks);
1167 rte_lpm_delete_all(lpm);
1176 * Sequence of operations for find existing fbk hash table
1179 * - find existing table: hit
1180 * - find non-existing table: miss
1183 int32_t test16(void)
1185 struct rte_lpm *lpm = NULL, *result = NULL;
1188 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
1189 TEST_LPM_ASSERT(lpm != NULL);
1191 /* Try to find existing lpm */
1192 result = rte_lpm_find_existing("lpm_find_existing");
1193 TEST_LPM_ASSERT(result == lpm);
1195 /* Try to find non-existing lpm */
1196 result = rte_lpm_find_existing("lpm_find_non_existing");
1197 TEST_LPM_ASSERT(result == NULL);
1200 rte_lpm_delete_all(lpm);
1207 * test failure condition of overloading the tbl8 so no more will fit
1208 * Check we get an error return value in that case
1214 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1215 256 * 32, RTE_LPM_HEAP);
1217 printf("Testing filling tbl8's\n");
1219 /* ip loops through all positibilities for top 24 bits of address */
1220 for (ip = 0; ip < 0xFFFFFF; ip++){
1221 /* add an entrey within a different tbl8 each time, since
1222 * depth >24 and the top 24 bits are different */
1223 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1227 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1228 printf("Error, unexpected failure with filling tbl8 groups\n");
1229 printf("Failed after %u additions, expected after %u\n",
1230 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1239 * Test for overwriting of tbl8:
1240 * - add rule /32 and lookup
1241 * - add new rule /24 and lookup
1242 * - add third rule /25 and lookup
1243 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1248 struct rte_lpm *lpm = NULL;
1249 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1250 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1251 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1252 const uint8_t d_ip_10_32 = 32,
1255 const uint8_t next_hop_ip_10_32 = 100,
1256 next_hop_ip_10_24 = 105,
1257 next_hop_ip_20_25 = 111;
1258 uint8_t next_hop_return = 0;
1261 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
1262 TEST_LPM_ASSERT(lpm != NULL);
1264 status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32);
1265 TEST_LPM_ASSERT(status == 0);
1267 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1268 TEST_LPM_ASSERT(status == 0);
1269 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1271 status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24);
1272 TEST_LPM_ASSERT(status == 0);
1274 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1275 TEST_LPM_ASSERT(status == 0);
1276 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1278 status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25);
1279 TEST_LPM_ASSERT(status == 0);
1281 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1282 TEST_LPM_ASSERT(status == 0);
1283 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1285 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1286 TEST_LPM_ASSERT(status == 0);
1287 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1289 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1290 TEST_LPM_ASSERT(status == 0);
1291 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1295 printf("%s PASSED\n", __func__);
1301 * Do all unit and performance tests.
1308 int status, global_status;
1310 printf("Running LPM tests...\n"
1311 "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS);
1315 for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) {
1317 status = tests[test_num]();
1319 printf("LPM Test %u: %s\n", test_num,
1320 (status < 0) ? "FAIL" : "PASS");
1323 global_status = status;
1327 return global_status;
1335 printf("The LPM library is not included in this build\n");