4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
40 #include <cmdline_parse.h>
42 #include <rte_common.h>
43 #include <rte_cycles.h>
44 #include <rte_memory.h>
45 #include <rte_random.h>
46 #include <rte_branch_prediction.h>
53 #include "test_lpm_routes.h"
57 #define TEST_LPM_ASSERT(cond) do { \
59 printf("Error at line %d: \n", __LINE__); \
64 typedef int32_t (* rte_lpm_test)(void);
66 static int32_t test0(void);
67 static int32_t test1(void);
68 static int32_t test2(void);
69 static int32_t test3(void);
70 static int32_t test4(void);
71 static int32_t test5(void);
72 static int32_t test6(void);
73 static int32_t test7(void);
74 static int32_t test8(void);
75 static int32_t test9(void);
76 static int32_t test10(void);
77 static int32_t test11(void);
78 static int32_t test12(void);
79 static int32_t test13(void);
80 static int32_t test14(void);
81 static int32_t test15(void);
82 static int32_t test16(void);
83 static int32_t test17(void);
84 static int32_t perf_test(void);
86 rte_lpm_test tests[] = {
109 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
111 #define MAX_RULES 256
115 * Check that rte_lpm_create fails gracefully for incorrect user input
121 struct rte_lpm *lpm = NULL;
123 /* rte_lpm_create: lpm name == NULL */
124 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
125 TEST_LPM_ASSERT(lpm == NULL);
127 /* rte_lpm_create: max_rules = 0 */
128 /* Note: __func__ inserts the function name, in this case "test0". */
129 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
130 TEST_LPM_ASSERT(lpm == NULL);
132 /* socket_id < -1 is invalid */
133 lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
134 TEST_LPM_ASSERT(lpm == NULL);
140 * Create lpm table then delete lpm table 100 times
141 * Use a slightly different rules size each time
146 struct rte_lpm *lpm = NULL;
149 /* rte_lpm_free: Free NULL */
150 for (i = 0; i < 100; i++) {
151 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
152 TEST_LPM_ASSERT(lpm != NULL);
157 /* Can not test free so return success */
162 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
163 * therefore it is impossible to check for failure but this test is added to
164 * increase function coverage metrics and to validate that freeing null does
170 struct rte_lpm *lpm = NULL;
172 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
173 TEST_LPM_ASSERT(lpm != NULL);
181 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
186 struct rte_lpm *lpm = NULL;
187 uint32_t ip = IPv4(0, 0, 0, 0);
188 uint8_t depth = 24, next_hop = 100;
191 /* rte_lpm_add: lpm == NULL */
192 status = rte_lpm_add(NULL, ip, depth, next_hop);
193 TEST_LPM_ASSERT(status < 0);
195 /*Create vaild lpm to use in rest of test. */
196 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
197 TEST_LPM_ASSERT(lpm != NULL);
199 /* rte_lpm_add: depth < 1 */
200 status = rte_lpm_add(lpm, ip, 0, next_hop);
201 TEST_LPM_ASSERT(status < 0);
203 /* rte_lpm_add: depth > MAX_DEPTH */
204 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
205 TEST_LPM_ASSERT(status < 0);
213 * Check that rte_lpm_delete fails gracefully for incorrect user input
219 struct rte_lpm *lpm = NULL;
220 uint32_t ip = IPv4(0, 0, 0, 0);
224 /* rte_lpm_delete: lpm == NULL */
225 status = rte_lpm_delete(NULL, ip, depth);
226 TEST_LPM_ASSERT(status < 0);
228 /*Create vaild lpm to use in rest of test. */
229 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
230 TEST_LPM_ASSERT(lpm != NULL);
232 /* rte_lpm_delete: depth < 1 */
233 status = rte_lpm_delete(lpm, ip, 0);
234 TEST_LPM_ASSERT(status < 0);
236 /* rte_lpm_delete: depth > MAX_DEPTH */
237 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
238 TEST_LPM_ASSERT(status < 0);
246 * Check that rte_lpm_lookup fails gracefully for incorrect user input
252 #if defined(RTE_LIBRTE_LPM_DEBUG)
253 struct rte_lpm *lpm = NULL;
254 uint32_t ip = IPv4(0, 0, 0, 0);
255 uint8_t next_hop_return = 0;
258 /* rte_lpm_lookup: lpm == NULL */
259 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
260 TEST_LPM_ASSERT(status < 0);
262 /*Create vaild lpm to use in rest of test. */
263 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
264 TEST_LPM_ASSERT(lpm != NULL);
266 /* rte_lpm_lookup: depth < 1 */
267 status = rte_lpm_lookup(lpm, ip, NULL);
268 TEST_LPM_ASSERT(status < 0);
278 * Call add, lookup and delete for a single rule with depth <= 24
283 struct rte_lpm *lpm = NULL;
284 uint32_t ip = IPv4(0, 0, 0, 0);
285 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
288 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
289 TEST_LPM_ASSERT(lpm != NULL);
291 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
292 TEST_LPM_ASSERT(status == 0);
294 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
295 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
297 status = rte_lpm_delete(lpm, ip, depth);
298 TEST_LPM_ASSERT(status == 0);
300 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
301 TEST_LPM_ASSERT(status == -ENOENT);
309 * Call add, lookup and delete for a single rule with depth > 24
315 struct rte_lpm *lpm = NULL;
316 uint32_t ip = IPv4(0, 0, 0, 0);
317 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
320 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
321 TEST_LPM_ASSERT(lpm != NULL);
323 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
324 TEST_LPM_ASSERT(status == 0);
326 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
327 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
329 status = rte_lpm_delete(lpm, ip, depth);
330 TEST_LPM_ASSERT(status == 0);
332 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
333 TEST_LPM_ASSERT(status == -ENOENT);
341 * Use rte_lpm_add to add rules which effect only the second half of the lpm
342 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
343 * depth. Check lookup hit for on every add and check for lookup miss on the
344 * first half of the lpm table after each add. Finally delete all rules going
345 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
346 * delete. The lookup should return the next_hop_add value related to the
347 * previous depth value (i.e. depth -1).
352 struct rte_lpm *lpm = NULL;
353 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
354 uint8_t depth, next_hop_add, next_hop_return;
357 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
358 TEST_LPM_ASSERT(lpm != NULL);
360 /* Loop with rte_lpm_add. */
361 for (depth = 1; depth <= 32; depth++) {
362 /* Let the next_hop_add value = depth. Just for change. */
363 next_hop_add = depth;
365 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
366 TEST_LPM_ASSERT(status == 0);
368 /* Check IP in first half of tbl24 which should be empty. */
369 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
370 TEST_LPM_ASSERT(status == -ENOENT);
372 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
373 TEST_LPM_ASSERT((status == 0) &&
374 (next_hop_return == next_hop_add));
377 /* Loop with rte_lpm_delete. */
378 for (depth = 32; depth >= 1; depth--) {
379 next_hop_add = (uint8_t) (depth - 1);
381 status = rte_lpm_delete(lpm, ip2, depth);
382 TEST_LPM_ASSERT(status == 0);
384 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
387 TEST_LPM_ASSERT((status == 0) &&
388 (next_hop_return == next_hop_add));
391 TEST_LPM_ASSERT(status == -ENOENT);
394 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
395 TEST_LPM_ASSERT(status == -ENOENT);
404 * - Add & lookup to hit invalid TBL24 entry
405 * - Add & lookup to hit valid TBL24 entry not extended
406 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
407 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
413 struct rte_lpm *lpm = NULL;
414 uint32_t ip, ip_1, ip_2;
415 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
416 next_hop_add_2, next_hop_return;
419 /* Add & lookup to hit invalid TBL24 entry */
420 ip = IPv4(128, 0, 0, 0);
424 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
425 TEST_LPM_ASSERT(lpm != NULL);
427 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
428 TEST_LPM_ASSERT(status == 0);
430 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
431 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
433 status = rte_lpm_delete(lpm, ip, depth);
434 TEST_LPM_ASSERT(status == 0);
436 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
437 TEST_LPM_ASSERT(status == -ENOENT);
439 rte_lpm_delete_all(lpm);
441 /* Add & lookup to hit valid TBL24 entry not extended */
442 ip = IPv4(128, 0, 0, 0);
446 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
447 TEST_LPM_ASSERT(status == 0);
449 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
450 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
455 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
456 TEST_LPM_ASSERT(status == 0);
458 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
459 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
463 status = rte_lpm_delete(lpm, ip, depth);
464 TEST_LPM_ASSERT(status == 0);
468 status = rte_lpm_delete(lpm, ip, depth);
469 TEST_LPM_ASSERT(status == 0);
471 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
472 TEST_LPM_ASSERT(status == -ENOENT);
474 rte_lpm_delete_all(lpm);
476 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
478 ip = IPv4(128, 0, 0, 0);
482 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
483 TEST_LPM_ASSERT(status == 0);
485 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
486 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
488 ip = IPv4(128, 0, 0, 5);
492 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
493 TEST_LPM_ASSERT(status == 0);
495 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
496 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
498 status = rte_lpm_delete(lpm, ip, depth);
499 TEST_LPM_ASSERT(status == 0);
501 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
502 TEST_LPM_ASSERT(status == -ENOENT);
504 ip = IPv4(128, 0, 0, 0);
508 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
509 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
511 status = rte_lpm_delete(lpm, ip, depth);
512 TEST_LPM_ASSERT(status == 0);
514 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
515 TEST_LPM_ASSERT(status == -ENOENT);
517 rte_lpm_delete_all(lpm);
519 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
521 ip_1 = IPv4(128, 0, 0, 0);
523 next_hop_add_1 = 101;
525 ip_2 = IPv4(128, 0, 0, 5);
527 next_hop_add_2 = 102;
531 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
532 TEST_LPM_ASSERT(status == 0);
534 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
535 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
537 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
538 TEST_LPM_ASSERT(status == 0);
540 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
541 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
543 status = rte_lpm_delete(lpm, ip_2, depth_2);
544 TEST_LPM_ASSERT(status == 0);
546 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
547 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
549 status = rte_lpm_delete(lpm, ip_1, depth_1);
550 TEST_LPM_ASSERT(status == 0);
552 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
553 TEST_LPM_ASSERT(status == -ENOENT);
562 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
564 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
565 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
567 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
568 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
569 * - Delete a rule that is not present in the TBL24 & lookup
570 * - Delete a rule that is not present in the TBL8 & lookup
577 struct rte_lpm *lpm = NULL;
579 uint8_t depth, next_hop_add, next_hop_return;
582 /* Add rule that covers a TBL24 range previously invalid & lookup
583 * (& delete & lookup) */
584 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
585 TEST_LPM_ASSERT(lpm != NULL);
587 ip = IPv4(128, 0, 0, 0);
591 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
592 TEST_LPM_ASSERT(status == 0);
594 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
595 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
597 status = rte_lpm_delete(lpm, ip, depth);
598 TEST_LPM_ASSERT(status == 0);
600 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
601 TEST_LPM_ASSERT(status == -ENOENT);
603 rte_lpm_delete_all(lpm);
605 ip = IPv4(128, 0, 0, 0);
609 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
610 TEST_LPM_ASSERT(status == 0);
612 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
613 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
615 status = rte_lpm_delete(lpm, ip, depth);
616 TEST_LPM_ASSERT(status == 0);
618 rte_lpm_delete_all(lpm);
620 /* Add rule that extends a TBL24 valid entry & lookup for both rules
621 * (& delete & lookup) */
623 ip = IPv4(128, 0, 0, 0);
627 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
628 TEST_LPM_ASSERT(status == 0);
630 ip = IPv4(128, 0, 0, 10);
634 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
635 TEST_LPM_ASSERT(status == 0);
637 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
638 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
640 ip = IPv4(128, 0, 0, 0);
643 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
644 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
646 ip = IPv4(128, 0, 0, 0);
649 status = rte_lpm_delete(lpm, ip, depth);
650 TEST_LPM_ASSERT(status == 0);
652 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
653 TEST_LPM_ASSERT(status == -ENOENT);
655 ip = IPv4(128, 0, 0, 10);
658 status = rte_lpm_delete(lpm, ip, depth);
659 TEST_LPM_ASSERT(status == 0);
661 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
662 TEST_LPM_ASSERT(status == -ENOENT);
664 rte_lpm_delete_all(lpm);
666 /* Add rule that updates the next hop in TBL24 & lookup
667 * (& delete & lookup) */
669 ip = IPv4(128, 0, 0, 0);
673 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
674 TEST_LPM_ASSERT(status == 0);
676 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
677 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
681 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
682 TEST_LPM_ASSERT(status == 0);
684 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
685 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
687 status = rte_lpm_delete(lpm, ip, depth);
688 TEST_LPM_ASSERT(status == 0);
690 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
691 TEST_LPM_ASSERT(status == -ENOENT);
693 rte_lpm_delete_all(lpm);
695 /* Add rule that updates the next hop in TBL8 & lookup
696 * (& delete & lookup) */
698 ip = IPv4(128, 0, 0, 0);
702 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
703 TEST_LPM_ASSERT(status == 0);
705 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
706 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
710 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
711 TEST_LPM_ASSERT(status == 0);
713 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
714 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
716 status = rte_lpm_delete(lpm, ip, depth);
717 TEST_LPM_ASSERT(status == 0);
719 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
720 TEST_LPM_ASSERT(status == -ENOENT);
722 rte_lpm_delete_all(lpm);
724 /* Delete a rule that is not present in the TBL24 & lookup */
726 ip = IPv4(128, 0, 0, 0);
729 status = rte_lpm_delete(lpm, ip, depth);
730 TEST_LPM_ASSERT(status < 0);
732 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
733 TEST_LPM_ASSERT(status == -ENOENT);
735 rte_lpm_delete_all(lpm);
737 /* Delete a rule that is not present in the TBL8 & lookup */
739 ip = IPv4(128, 0, 0, 0);
742 status = rte_lpm_delete(lpm, ip, depth);
743 TEST_LPM_ASSERT(status < 0);
745 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
746 TEST_LPM_ASSERT(status == -ENOENT);
754 * Add two rules, lookup to hit the more specific one, lookup to hit the less
755 * specific one delete the less specific rule and lookup previous values again;
756 * add a more specific rule than the existing rule, lookup again
763 struct rte_lpm *lpm = NULL;
765 uint8_t depth, next_hop_add, next_hop_return;
768 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
769 TEST_LPM_ASSERT(lpm != NULL);
771 ip = IPv4(128, 0, 0, 0);
775 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
776 TEST_LPM_ASSERT(status == 0);
778 ip = IPv4(128, 0, 0, 10);
782 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
783 TEST_LPM_ASSERT(status == 0);
785 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
786 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
788 ip = IPv4(128, 0, 0, 0);
791 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
792 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
794 ip = IPv4(128, 0, 0, 0);
797 status = rte_lpm_delete(lpm, ip, depth);
798 TEST_LPM_ASSERT(status == 0);
800 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
801 TEST_LPM_ASSERT(status == -ENOENT);
803 ip = IPv4(128, 0, 0, 10);
806 status = rte_lpm_delete(lpm, ip, depth);
807 TEST_LPM_ASSERT(status == 0);
809 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
810 TEST_LPM_ASSERT(status == -ENOENT);
818 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
819 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
827 struct rte_lpm *lpm = NULL;
829 uint8_t depth, next_hop_add, next_hop_return;
832 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
833 TEST_LPM_ASSERT(lpm != NULL);
835 ip = IPv4(128, 0, 0, 0);
839 for (i = 0; i < 1000; i++) {
840 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
841 TEST_LPM_ASSERT(status == 0);
843 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
844 TEST_LPM_ASSERT((status == 0) &&
845 (next_hop_return == next_hop_add));
847 status = rte_lpm_delete(lpm, ip, depth);
848 TEST_LPM_ASSERT(status == 0);
850 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
851 TEST_LPM_ASSERT(status == -ENOENT);
860 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
861 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
862 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
863 * extension and contraction.
870 struct rte_lpm *lpm = NULL;
872 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
875 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
876 TEST_LPM_ASSERT(lpm != NULL);
878 ip = IPv4(128, 0, 0, 0);
880 next_hop_add_1 = 100;
882 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
883 TEST_LPM_ASSERT(status == 0);
885 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
886 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
889 next_hop_add_2 = 101;
891 for (i = 0; i < 1000; i++) {
892 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
893 TEST_LPM_ASSERT(status == 0);
895 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
896 TEST_LPM_ASSERT((status == 0) &&
897 (next_hop_return == next_hop_add_2));
899 status = rte_lpm_delete(lpm, ip, depth);
900 TEST_LPM_ASSERT(status == 0);
902 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
903 TEST_LPM_ASSERT((status == 0) &&
904 (next_hop_return == next_hop_add_1));
909 status = rte_lpm_delete(lpm, ip, depth);
910 TEST_LPM_ASSERT(status == 0);
912 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
913 TEST_LPM_ASSERT(status == -ENOENT);
921 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
922 * No more tbl8 extensions will be allowed. Now add one more rule that required
923 * a tbl8 extension and get fail.
929 /* We only use depth = 32 in the loop below so we must make sure
930 * that we have enough storage for all rules at that depth*/
932 struct rte_lpm *lpm = NULL;
934 uint8_t depth, next_hop_add, next_hop_return;
937 /* Add enough space for 256 rules for every depth */
938 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
939 TEST_LPM_ASSERT(lpm != NULL);
941 ip = IPv4(0, 0, 0, 0);
945 /* Add 256 rules that require a tbl8 extension */
946 for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
947 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
948 TEST_LPM_ASSERT(status == 0);
950 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
951 TEST_LPM_ASSERT((status == 0) &&
952 (next_hop_return == next_hop_add));
955 /* All tbl8 extensions have been used above. Try to add one more and
957 ip = IPv4(1, 0, 0, 0);
960 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
961 TEST_LPM_ASSERT(status < 0);
969 * Sequence of operations for find existing lpm table
972 * - find existing table: hit
973 * - find non-existing table: miss
979 struct rte_lpm *lpm = NULL, *result = NULL;
982 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
983 TEST_LPM_ASSERT(lpm != NULL);
985 /* Try to find existing lpm */
986 result = rte_lpm_find_existing("lpm_find_existing");
987 TEST_LPM_ASSERT(result == lpm);
989 /* Try to find non-existing lpm */
990 result = rte_lpm_find_existing("lpm_find_non_existing");
991 TEST_LPM_ASSERT(result == NULL);
994 rte_lpm_delete_all(lpm);
1001 * test failure condition of overloading the tbl8 so no more will fit
1002 * Check we get an error return value in that case
1008 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1011 /* ip loops through all possibilities for top 24 bits of address */
1012 for (ip = 0; ip < 0xFFFFFF; ip++){
1013 /* add an entry within a different tbl8 each time, since
1014 * depth >24 and the top 24 bits are different */
1015 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1019 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1020 printf("Error, unexpected failure with filling tbl8 groups\n");
1021 printf("Failed after %u additions, expected after %u\n",
1022 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1030 * Test for overwriting of tbl8:
1031 * - add rule /32 and lookup
1032 * - add new rule /24 and lookup
1033 * - add third rule /25 and lookup
1034 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1039 struct rte_lpm *lpm = NULL;
1040 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1041 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1042 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1043 const uint8_t d_ip_10_32 = 32,
1046 const uint8_t next_hop_ip_10_32 = 100,
1047 next_hop_ip_10_24 = 105,
1048 next_hop_ip_20_25 = 111;
1049 uint8_t next_hop_return = 0;
1052 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
1053 TEST_LPM_ASSERT(lpm != NULL);
1055 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1056 next_hop_ip_10_32)) < 0)
1059 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1060 uint8_t test_hop_10_32 = next_hop_return;
1061 TEST_LPM_ASSERT(status == 0);
1062 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1064 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1065 next_hop_ip_10_24)) < 0)
1068 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1069 uint8_t test_hop_10_24 = next_hop_return;
1070 TEST_LPM_ASSERT(status == 0);
1071 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1073 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1074 next_hop_ip_20_25)) < 0)
1077 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1078 uint8_t test_hop_20_25 = next_hop_return;
1079 TEST_LPM_ASSERT(status == 0);
1080 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1082 if (test_hop_10_32 == test_hop_10_24) {
1083 printf("Next hop return equal\n");
1087 if (test_hop_10_24 == test_hop_20_25){
1088 printf("Next hop return equal\n");
1092 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1093 TEST_LPM_ASSERT(status == 0);
1094 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1096 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1097 TEST_LPM_ASSERT(status == 0);
1098 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1106 * Lookup performance test
1109 #define ITERATIONS (1 << 10)
1110 #define BATCH_SIZE (1 << 12)
1111 #define BULK_SIZE 32
1114 print_route_distribution(const struct route_rule *table, uint32_t n)
1118 printf("Route distribution per prefix width: \n");
1119 printf("DEPTH QUANTITY (PERCENT)\n");
1120 printf("--------------------------- \n");
1123 for(i = 1; i <= 32; i++) {
1124 unsigned depth_counter = 0;
1125 double percent_hits;
1127 for (j = 0; j < n; j++)
1128 if (table[j].depth == (uint8_t) i)
1131 percent_hits = ((double)depth_counter)/((double)n) * 100;
1132 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1140 struct rte_lpm *lpm = NULL;
1141 uint64_t begin, total_time, lpm_used_entries = 0;
1143 uint8_t next_hop_add = 0xAA, next_hop_return = 0;
1145 uint64_t cache_line_counter = 0;
1148 rte_srand(rte_rdtsc());
1150 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1152 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1154 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
1155 TEST_LPM_ASSERT(lpm != NULL);
1158 begin = rte_rdtsc();
1160 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1161 if (rte_lpm_add(lpm, large_route_table[i].ip,
1162 large_route_table[i].depth, next_hop_add) == 0)
1166 total_time = rte_rdtsc() - begin;
1168 printf("Unique added entries = %d\n", status);
1169 /* Obtain add statistics. */
1170 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1171 if (lpm->tbl24[i].valid)
1175 if ((uint64_t)count < lpm_used_entries) {
1176 cache_line_counter++;
1177 count = lpm_used_entries;
1182 printf("Used table 24 entries = %u (%g%%)\n",
1183 (unsigned) lpm_used_entries,
1184 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1185 printf("64 byte Cache entries used = %u (%u bytes)\n",
1186 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1188 printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
1190 /* Measure single Lookup */
1194 for (i = 0; i < ITERATIONS; i ++) {
1195 static uint32_t ip_batch[BATCH_SIZE];
1197 for (j = 0; j < BATCH_SIZE; j ++)
1198 ip_batch[j] = rte_rand();
1200 /* Lookup per batch */
1201 begin = rte_rdtsc();
1203 for (j = 0; j < BATCH_SIZE; j ++) {
1204 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1208 total_time += rte_rdtsc() - begin;
1211 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1212 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1213 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1215 /* Measure bulk Lookup */
1218 for (i = 0; i < ITERATIONS; i ++) {
1219 static uint32_t ip_batch[BATCH_SIZE];
1220 uint16_t next_hops[BULK_SIZE];
1222 /* Create array of random IP addresses */
1223 for (j = 0; j < BATCH_SIZE; j ++)
1224 ip_batch[j] = rte_rand();
1226 /* Lookup per batch */
1227 begin = rte_rdtsc();
1228 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1230 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1231 for (k = 0; k < BULK_SIZE; k++)
1232 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1236 total_time += rte_rdtsc() - begin;
1238 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1239 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1240 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1244 begin = rte_rdtsc();
1246 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1247 /* rte_lpm_delete(lpm, ip, depth) */
1248 status += rte_lpm_delete(lpm, large_route_table[i].ip,
1249 large_route_table[i].depth);
1252 total_time += rte_rdtsc() - begin;
1254 printf("Average LPM Delete: %g cycles\n",
1255 (double)total_time / NUM_ROUTE_ENTRIES);
1257 rte_lpm_delete_all(lpm);
1264 * Do all unit and performance tests.
1271 int status, global_status = 0;
1273 for (i = 0; i < NUM_LPM_TESTS; i++) {
1274 status = tests[i]();
1276 printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1277 global_status = status;
1281 return global_status;
1289 printf("The LPM library is not included in this build\n");