4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * version: DPDK.L.1.2.3-3
40 #include <sys/queue.h>
41 #include <cmdline_parse.h>
43 #include <rte_common.h>
44 #include <rte_cycles.h>
45 #include <rte_memory.h>
46 #include <rte_random.h>
47 #include <rte_branch_prediction.h>
54 #include "test_lpm_routes.h"
58 #define ITERATIONS (1 << 20)
59 #define BATCH_SIZE (1 << 13)
61 #define TEST_LPM_ASSERT(cond) do { \
63 printf("Error at line %d: \n", __LINE__); \
70 typedef int32_t (* rte_lpm_test)(void);
72 static int32_t test0(void);
73 static int32_t test1(void);
74 static int32_t test2(void);
75 static int32_t test3(void);
76 static int32_t test4(void);
77 static int32_t test5(void);
78 static int32_t test6(void);
79 static int32_t test7(void);
80 static int32_t test8(void);
81 static int32_t test9(void);
82 static int32_t test10(void);
83 static int32_t test11(void);
84 static int32_t test12(void);
85 static int32_t test13(void);
86 static int32_t test14(void);
87 static int32_t test15(void);
88 static int32_t test16(void);
89 static int32_t test17(void);
90 static int32_t test18(void);
92 rte_lpm_test tests[] = {
115 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
117 #define MAX_RULES 256
123 * Check that rte_lpm_create fails gracefully for incorrect user input
129 struct rte_lpm *lpm = NULL;
131 /* rte_lpm_create: lpm name == NULL */
132 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
133 TEST_LPM_ASSERT(lpm == NULL);
135 /* rte_lpm_create: max_rules = 0 */
136 /* Note: __func__ inserts the function name, in this case "test0". */
137 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, RTE_LPM_HEAP);
138 TEST_LPM_ASSERT(lpm == NULL);
140 /* rte_lpm_create: mem_location is not RTE_LPM_HEAP or not MEMZONE */
141 /* Note: __func__ inserts the function name, in this case "test0". */
142 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 2);
143 TEST_LPM_ASSERT(lpm == NULL);
145 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, -1);
146 TEST_LPM_ASSERT(lpm == NULL);
148 /* socket_id < -1 is invalid */
149 lpm = rte_lpm_create(__func__, -2, MAX_RULES, RTE_LPM_HEAP);
150 TEST_LPM_ASSERT(lpm == NULL);
157 * Create lpm table then delete lpm table 100 times
158 * Use a slightly different rules size each time
163 struct rte_lpm *lpm = NULL;
166 /* rte_lpm_free: Free NULL */
167 for (i = 0; i < 100; i++) {
168 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i,
170 TEST_LPM_ASSERT(lpm != NULL);
175 /* Can not test free so return success */
181 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
182 * therefore it is impossible to check for failure but this test is added to
183 * increase function coverage metrics and to validate that freeing null does
189 struct rte_lpm *lpm = NULL;
191 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
192 TEST_LPM_ASSERT(lpm != NULL);
201 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
206 struct rte_lpm *lpm = NULL;
207 uint32_t ip = IPv4(0, 0, 0, 0);
208 uint8_t depth = 24, next_hop = 100;
211 /* rte_lpm_add: lpm == NULL */
212 status = rte_lpm_add(NULL, ip, depth, next_hop);
213 TEST_LPM_ASSERT(status < 0);
215 /*Create vaild lpm to use in rest of test. */
216 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
217 TEST_LPM_ASSERT(lpm != NULL);
219 /* rte_lpm_add: depth < 1 */
220 status = rte_lpm_add(lpm, ip, 0, next_hop);
221 TEST_LPM_ASSERT(status < 0);
223 /* rte_lpm_add: depth > MAX_DEPTH */
224 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
225 TEST_LPM_ASSERT(status < 0);
234 * Check that rte_lpm_delete fails gracefully for incorrect user input
240 struct rte_lpm *lpm = NULL;
241 uint32_t ip = IPv4(0, 0, 0, 0);
245 /* rte_lpm_delete: lpm == NULL */
246 status = rte_lpm_delete(NULL, ip, depth);
247 TEST_LPM_ASSERT(status < 0);
249 /*Create vaild lpm to use in rest of test. */
250 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
251 TEST_LPM_ASSERT(lpm != NULL);
253 /* rte_lpm_delete: depth < 1 */
254 status = rte_lpm_delete(lpm, ip, 0);
255 TEST_LPM_ASSERT(status < 0);
257 /* rte_lpm_delete: depth > MAX_DEPTH */
258 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
259 TEST_LPM_ASSERT(status < 0);
268 * Check that rte_lpm_lookup fails gracefully for incorrect user input
274 #if defined(RTE_LIBRTE_LPM_DEBUG)
275 struct rte_lpm *lpm = NULL;
276 uint32_t ip = IPv4(0, 0, 0, 0);
277 uint8_t next_hop_return = 0;
280 /* rte_lpm_lookup: lpm == NULL */
281 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
282 TEST_LPM_ASSERT(status < 0);
284 /*Create vaild lpm to use in rest of test. */
285 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
286 TEST_LPM_ASSERT(lpm != NULL);
288 /* rte_lpm_lookup: depth < 1 */
289 status = rte_lpm_lookup(lpm, ip, NULL);
290 TEST_LPM_ASSERT(status < 0);
301 * Call add, lookup and delete for a single rule with depth <= 24
306 struct rte_lpm *lpm = NULL;
307 uint32_t ip = IPv4(0, 0, 0, 0);
308 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
311 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
312 TEST_LPM_ASSERT(lpm != NULL);
314 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
315 TEST_LPM_ASSERT(status == 0);
317 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
318 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
320 status = rte_lpm_delete(lpm, ip, depth);
321 TEST_LPM_ASSERT(status == 0);
323 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
324 TEST_LPM_ASSERT(status == -ENOENT);
333 * Call add, lookup and delete for a single rule with depth > 24
339 struct rte_lpm *lpm = NULL;
340 uint32_t ip = IPv4(0, 0, 0, 0);
341 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
344 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
345 TEST_LPM_ASSERT(lpm != NULL);
347 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
348 TEST_LPM_ASSERT(status == 0);
350 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
351 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
353 status = rte_lpm_delete(lpm, ip, depth);
354 TEST_LPM_ASSERT(status == 0);
356 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
357 TEST_LPM_ASSERT(status == -ENOENT);
366 * Use rte_lpm_add to add rules which effect only the second half of the lpm
367 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
368 * depth. Check lookup hit for on every add and check for lookup miss on the
369 * first half of the lpm table after each add. Finally delete all rules going
370 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
371 * delete. The lookup should return the next_hop_add value related to the
372 * previous depth value (i.e. depth -1).
377 struct rte_lpm *lpm = NULL;
378 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
379 uint8_t depth, next_hop_add, next_hop_return;
382 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
383 TEST_LPM_ASSERT(lpm != NULL);
385 /* Loop with rte_lpm_add. */
386 for (depth = 1; depth <= 32; depth++) {
387 /* Let the next_hop_add value = depth. Just for change. */
388 next_hop_add = depth;
390 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
391 TEST_LPM_ASSERT(status == 0);
393 /* Check IP in first half of tbl24 which should be empty. */
394 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
395 TEST_LPM_ASSERT(status == -ENOENT);
397 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
398 TEST_LPM_ASSERT((status == 0) &&
399 (next_hop_return == next_hop_add));
402 /* Loop with rte_lpm_delete. */
403 for (depth = 32; depth >= 1; depth--) {
404 next_hop_add = (uint8_t) (depth - 1);
406 status = rte_lpm_delete(lpm, ip2, depth);
407 TEST_LPM_ASSERT(status == 0);
409 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
412 TEST_LPM_ASSERT((status == 0) &&
413 (next_hop_return == next_hop_add));
416 TEST_LPM_ASSERT(status == -ENOENT);
419 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
420 TEST_LPM_ASSERT(status == -ENOENT);
430 * - Add & lookup to hit invalid TBL24 entry
431 * - Add & lookup to hit valid TBL24 entry not extended
432 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
433 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
439 struct rte_lpm *lpm = NULL;
440 uint32_t ip, ip_1, ip_2;
441 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
442 next_hop_add_2, next_hop_return;
445 /* Add & lookup to hit invalid TBL24 entry */
446 ip = IPv4(128, 0, 0, 0);
450 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
451 TEST_LPM_ASSERT(lpm != NULL);
453 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
454 TEST_LPM_ASSERT(status == 0);
456 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
457 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
459 status = rte_lpm_delete(lpm, ip, depth);
460 TEST_LPM_ASSERT(status == 0);
462 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
463 TEST_LPM_ASSERT(status == -ENOENT);
465 rte_lpm_delete_all(lpm);
467 /* Add & lookup to hit valid TBL24 entry not extended */
468 ip = IPv4(128, 0, 0, 0);
472 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
473 TEST_LPM_ASSERT(status == 0);
475 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
476 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
481 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
482 TEST_LPM_ASSERT(status == 0);
484 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
485 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
489 status = rte_lpm_delete(lpm, ip, depth);
490 TEST_LPM_ASSERT(status == 0);
494 status = rte_lpm_delete(lpm, ip, depth);
495 TEST_LPM_ASSERT(status == 0);
497 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
498 TEST_LPM_ASSERT(status == -ENOENT);
500 rte_lpm_delete_all(lpm);
502 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
504 ip = IPv4(128, 0, 0, 0);
508 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
509 TEST_LPM_ASSERT(status == 0);
511 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
512 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
514 ip = IPv4(128, 0, 0, 5);
518 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
519 TEST_LPM_ASSERT(status == 0);
521 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
522 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
524 status = rte_lpm_delete(lpm, ip, depth);
525 TEST_LPM_ASSERT(status == 0);
527 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
528 TEST_LPM_ASSERT(status == -ENOENT);
530 ip = IPv4(128, 0, 0, 0);
534 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
535 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
537 status = rte_lpm_delete(lpm, ip, depth);
538 TEST_LPM_ASSERT(status == 0);
540 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
541 TEST_LPM_ASSERT(status == -ENOENT);
543 rte_lpm_delete_all(lpm);
545 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
547 ip_1 = IPv4(128, 0, 0, 0);
549 next_hop_add_1 = 101;
551 ip_2 = IPv4(128, 0, 0, 5);
553 next_hop_add_2 = 102;
557 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
558 TEST_LPM_ASSERT(status == 0);
560 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
561 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
563 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
564 TEST_LPM_ASSERT(status == 0);
566 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
567 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
569 status = rte_lpm_delete(lpm, ip_2, depth_2);
570 TEST_LPM_ASSERT(status == 0);
572 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
573 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
575 status = rte_lpm_delete(lpm, ip_1, depth_1);
576 TEST_LPM_ASSERT(status == 0);
578 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
579 TEST_LPM_ASSERT(status == -ENOENT);
589 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
591 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
592 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
594 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
595 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
596 * - Delete a rule that is not present in the TBL24 & lookup
597 * - Delete a rule that is not present in the TBL8 & lookup
604 struct rte_lpm *lpm = NULL;
606 uint8_t depth, next_hop_add, next_hop_return;
609 /* Add rule that covers a TBL24 range previously invalid & lookup
610 * (& delete & lookup) */
611 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
612 TEST_LPM_ASSERT(lpm != NULL);
614 ip = IPv4(128, 0, 0, 0);
618 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
619 TEST_LPM_ASSERT(status == 0);
621 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
622 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
624 status = rte_lpm_delete(lpm, ip, depth);
625 TEST_LPM_ASSERT(status == 0);
627 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
628 TEST_LPM_ASSERT(status == -ENOENT);
630 rte_lpm_delete_all(lpm);
632 ip = IPv4(128, 0, 0, 0);
636 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
637 TEST_LPM_ASSERT(status == 0);
639 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
640 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
642 status = rte_lpm_delete(lpm, ip, depth);
643 TEST_LPM_ASSERT(status == 0);
645 rte_lpm_delete_all(lpm);
647 /* Add rule that extends a TBL24 valid entry & lookup for both rules
648 * (& delete & lookup) */
650 ip = IPv4(128, 0, 0, 0);
654 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
655 TEST_LPM_ASSERT(status == 0);
657 ip = IPv4(128, 0, 0, 10);
661 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
662 TEST_LPM_ASSERT(status == 0);
664 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
665 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
667 ip = IPv4(128, 0, 0, 0);
670 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
671 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
673 ip = IPv4(128, 0, 0, 0);
676 status = rte_lpm_delete(lpm, ip, depth);
677 TEST_LPM_ASSERT(status == 0);
679 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
680 TEST_LPM_ASSERT(status == -ENOENT);
682 ip = IPv4(128, 0, 0, 10);
685 status = rte_lpm_delete(lpm, ip, depth);
686 TEST_LPM_ASSERT(status == 0);
688 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
689 TEST_LPM_ASSERT(status == -ENOENT);
691 rte_lpm_delete_all(lpm);
693 /* Add rule that updates the next hop in TBL24 & lookup
694 * (& delete & lookup) */
696 ip = IPv4(128, 0, 0, 0);
700 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
701 TEST_LPM_ASSERT(status == 0);
703 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
704 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
708 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
709 TEST_LPM_ASSERT(status == 0);
711 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
712 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
714 status = rte_lpm_delete(lpm, ip, depth);
715 TEST_LPM_ASSERT(status == 0);
717 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
718 TEST_LPM_ASSERT(status == -ENOENT);
720 rte_lpm_delete_all(lpm);
722 /* Add rule that updates the next hop in TBL8 & lookup
723 * (& delete & lookup) */
725 ip = IPv4(128, 0, 0, 0);
729 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
730 TEST_LPM_ASSERT(status == 0);
732 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
733 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
737 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
738 TEST_LPM_ASSERT(status == 0);
740 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
741 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
743 status = rte_lpm_delete(lpm, ip, depth);
744 TEST_LPM_ASSERT(status == 0);
746 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
747 TEST_LPM_ASSERT(status == -ENOENT);
749 rte_lpm_delete_all(lpm);
751 /* Delete a rule that is not present in the TBL24 & lookup */
753 ip = IPv4(128, 0, 0, 0);
757 status = rte_lpm_delete(lpm, ip, depth);
758 TEST_LPM_ASSERT(status < 0);
760 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
761 TEST_LPM_ASSERT(status == -ENOENT);
763 rte_lpm_delete_all(lpm);
765 /* Delete a rule that is not present in the TBL8 & lookup */
767 ip = IPv4(128, 0, 0, 0);
771 status = rte_lpm_delete(lpm, ip, depth);
772 TEST_LPM_ASSERT(status < 0);
774 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
775 TEST_LPM_ASSERT(status == -ENOENT);
784 * Add two rules, lookup to hit the more specific one, lookup to hit the less
785 * specific one delete the less specific rule and lookup previous values again;
786 * add a more specific rule than the existing rule, lookup again
793 struct rte_lpm *lpm = NULL;
795 uint8_t depth, next_hop_add, next_hop_return;
798 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
799 TEST_LPM_ASSERT(lpm != NULL);
801 ip = IPv4(128, 0, 0, 0);
805 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
806 TEST_LPM_ASSERT(status == 0);
808 ip = IPv4(128, 0, 0, 10);
812 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
813 TEST_LPM_ASSERT(status == 0);
815 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
816 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
818 ip = IPv4(128, 0, 0, 0);
821 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
822 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
824 ip = IPv4(128, 0, 0, 0);
827 status = rte_lpm_delete(lpm, ip, depth);
828 TEST_LPM_ASSERT(status == 0);
830 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
831 TEST_LPM_ASSERT(status == -ENOENT);
833 ip = IPv4(128, 0, 0, 10);
836 status = rte_lpm_delete(lpm, ip, depth);
837 TEST_LPM_ASSERT(status == 0);
839 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
840 TEST_LPM_ASSERT(status == -ENOENT);
849 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
850 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
858 struct rte_lpm *lpm = NULL;
860 uint8_t depth, next_hop_add, next_hop_return;
863 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
864 TEST_LPM_ASSERT(lpm != NULL);
866 ip = IPv4(128, 0, 0, 0);
870 for (i = 0; i < 1000; i++) {
871 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
872 TEST_LPM_ASSERT(status == 0);
874 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
875 TEST_LPM_ASSERT((status == 0) &&
876 (next_hop_return == next_hop_add));
878 status = rte_lpm_delete(lpm, ip, depth);
879 TEST_LPM_ASSERT(status == 0);
881 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
882 TEST_LPM_ASSERT(status == -ENOENT);
892 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
893 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
894 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
895 * extension and contraction.
902 struct rte_lpm *lpm = NULL;
904 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
907 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
908 TEST_LPM_ASSERT(lpm != NULL);
910 ip = IPv4(128, 0, 0, 0);
912 next_hop_add_1 = 100;
914 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
915 TEST_LPM_ASSERT(status == 0);
917 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
918 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
921 next_hop_add_2 = 101;
923 for (i = 0; i < 1000; i++) {
924 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
925 TEST_LPM_ASSERT(status == 0);
927 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
928 TEST_LPM_ASSERT((status == 0) &&
929 (next_hop_return == next_hop_add_2));
931 status = rte_lpm_delete(lpm, ip, depth);
932 TEST_LPM_ASSERT(status == 0);
934 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
935 TEST_LPM_ASSERT((status == 0) &&
936 (next_hop_return == next_hop_add_1));
941 status = rte_lpm_delete(lpm, ip, depth);
942 TEST_LPM_ASSERT(status == 0);
944 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
945 TEST_LPM_ASSERT(status == -ENOENT);
954 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
955 * No more tbl8 extensions will be allowed. Now add one more rule that required
956 * a tbl8 extension and get fail.
962 /* We only use depth = 32 in the loop below so we must make sure
963 * that we have enough storage for all rules at that depth*/
965 struct rte_lpm *lpm = NULL;
967 uint8_t depth, next_hop_add, next_hop_return;
970 /* Add enough space for 256 rules for every depth */
971 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP);
972 TEST_LPM_ASSERT(lpm != NULL);
974 ip = IPv4(0, 0, 0, 0);
978 /* Add 256 rules that require a tbl8 extension */
979 for (ip = 0; ip <= IPv4(0, 0, 255, 0); ip += 256) {
980 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
981 TEST_LPM_ASSERT(status == 0);
983 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
984 TEST_LPM_ASSERT((status == 0) &&
985 (next_hop_return == next_hop_add));
988 /* All tbl8 extensions have been used above. Try to add one more and
990 ip = IPv4(1, 0, 0, 0);
993 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
994 TEST_LPM_ASSERT(status < 0);
1003 * Lookup performance test using Mae West Routing Table
1005 static inline uint32_t
1006 depth_to_mask(uint8_t depth) {
1007 return (int)0x80000000 >> (depth - 1);
1011 rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){
1012 unsigned i, j, count;
1015 for (i = 0; i < (n - 1); i++) {
1016 uint8_t depth1 = table[i].depth;
1017 uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1);
1019 for (j = (i + 1); j <n; j ++) {
1020 uint8_t depth2 = table[j].depth;
1021 uint32_t ip2_masked = table[j].ip &
1022 depth_to_mask(depth2);
1024 if ((depth1 == depth2) && (ip1_masked == ip2_masked)){
1025 printf("Rule %u is a duplicate of rule %u\n",
1036 rule_table_characterisation(const struct route_rule *table, uint32_t n){
1039 printf("DEPTH QUANTITY (PERCENT)\n");
1040 printf("--------------------------------- \n");
1042 for(i = 1; i <= 32; i++) {
1043 unsigned depth_counter = 0;
1044 double percent_hits;
1046 for (j = 0; j < n; j++) {
1047 if (table[j].depth == (uint8_t) i)
1051 percent_hits = ((double)depth_counter)/((double)n) * 100;
1053 printf("%u - %5u (%.2f)\n",
1054 i, depth_counter, percent_hits);
1060 static inline uint64_t
1061 div64(uint64_t dividend, uint64_t divisor)
1063 return ((2 * dividend) + divisor) / (2 * divisor);
1069 struct rte_lpm *lpm = NULL;
1070 uint64_t begin, end, total_time, lpm_used_entries = 0;
1071 unsigned avg_ticks, i, j;
1072 uint8_t next_hop_add = 0, next_hop_return = 0;
1075 printf("Using Mae West routing table from www.oiforum.com\n");
1076 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1077 printf("No. duplicate routes = %u\n\n", (unsigned)
1078 rule_table_check_for_duplicates(mae_west_tbl, NUM_ROUTE_ENTRIES));
1079 printf("Route distribution per prefix width: \n");
1080 rule_table_characterisation(mae_west_tbl,
1081 (uint32_t) NUM_ROUTE_ENTRIES);
1084 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000,
1086 TEST_LPM_ASSERT(lpm != NULL);
1092 begin = rte_rdtsc();
1094 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1095 /* rte_lpm_add(lpm, ip, depth, next_hop_add) */
1096 status += rte_lpm_add(lpm, mae_west_tbl[i].ip,
1097 mae_west_tbl[i].depth, next_hop_add);
1102 TEST_LPM_ASSERT(status == 0);
1104 /* Calculate average cycles per add. */
1105 avg_ticks = (uint32_t) div64((end - begin),
1106 (uint64_t) NUM_ROUTE_ENTRIES);
1108 uint64_t cache_line_counter = 0;
1111 /* Obtain add statistics. */
1112 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1113 if (lpm->tbl24[i].valid)
1117 if (count < lpm_used_entries) {
1118 cache_line_counter++;
1119 count = lpm_used_entries;
1124 printf("Number of table 24 entries = %u\n",
1125 (unsigned) RTE_LPM_TBL24_NUM_ENTRIES);
1126 printf("Used table 24 entries = %u\n",
1127 (unsigned) lpm_used_entries);
1128 printf("Percentage of table 24 entries used = %u\n",
1129 (unsigned) div64((lpm_used_entries * 100) ,
1130 RTE_LPM_TBL24_NUM_ENTRIES));
1131 printf("64 byte Cache entries used = %u \n",
1132 (unsigned) cache_line_counter);
1133 printf("Cache Required = %u bytes\n\n",
1134 (unsigned) cache_line_counter * 64);
1136 printf("Average LPM Add: %u cycles\n", avg_ticks);
1140 /* Choose random seed. */
1144 for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) {
1145 static uint32_t ip_batch[BATCH_SIZE];
1146 uint64_t begin_batch, end_batch;
1148 /* Generate a batch of random numbers */
1149 for (j = 0; j < BATCH_SIZE; j ++) {
1150 ip_batch[j] = rte_rand();
1153 /* Lookup per batch */
1154 begin_batch = rte_rdtsc();
1156 for (j = 0; j < BATCH_SIZE; j ++) {
1157 status += rte_lpm_lookup(lpm, ip_batch[j],
1161 end_batch = rte_rdtsc();
1162 printf("status = %d\r", next_hop_return);
1163 TEST_LPM_ASSERT(status < 1);
1165 /* Accumulate batch time */
1166 total_time += (end_batch - begin_batch);
1168 TEST_LPM_ASSERT((status < -ENOENT) ||
1169 (next_hop_return == next_hop_add));
1172 avg_ticks = (uint32_t) div64(total_time, ITERATIONS);
1173 printf("Average LPM Lookup: %u cycles\n", avg_ticks);
1177 begin = rte_rdtsc();
1179 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1180 /* rte_lpm_delete(lpm, ip, depth) */
1181 status += rte_lpm_delete(lpm, mae_west_tbl[i].ip,
1182 mae_west_tbl[i].depth);
1187 TEST_LPM_ASSERT(status == 0);
1189 avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES);
1191 printf("Average LPM Delete: %u cycles\n", avg_ticks);
1193 rte_lpm_delete_all(lpm);
1202 * Sequence of operations for find existing fbk hash table
1205 * - find existing table: hit
1206 * - find non-existing table: miss
1209 int32_t test16(void)
1211 struct rte_lpm *lpm = NULL, *result = NULL;
1214 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP);
1215 TEST_LPM_ASSERT(lpm != NULL);
1217 /* Try to find existing lpm */
1218 result = rte_lpm_find_existing("lpm_find_existing");
1219 TEST_LPM_ASSERT(result == lpm);
1221 /* Try to find non-existing lpm */
1222 result = rte_lpm_find_existing("lpm_find_non_existing");
1223 TEST_LPM_ASSERT(result == NULL);
1226 rte_lpm_delete_all(lpm);
1233 * test failure condition of overloading the tbl8 so no more will fit
1234 * Check we get an error return value in that case
1240 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1241 256 * 32, RTE_LPM_HEAP);
1243 printf("Testing filling tbl8's\n");
1245 /* ip loops through all positibilities for top 24 bits of address */
1246 for (ip = 0; ip < 0xFFFFFF; ip++){
1247 /* add an entrey within a different tbl8 each time, since
1248 * depth >24 and the top 24 bits are different */
1249 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1253 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1254 printf("Error, unexpected failure with filling tbl8 groups\n");
1255 printf("Failed after %u additions, expected after %u\n",
1256 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1265 * Test for overwriting of tbl8:
1266 * - add rule /32 and lookup
1267 * - add new rule /24 and lookup
1268 * - add third rule /25 and lookup
1269 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1274 struct rte_lpm *lpm = NULL;
1275 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1276 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1277 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1278 const uint8_t d_ip_10_32 = 32,
1281 const uint8_t next_hop_ip_10_32 = 100,
1282 next_hop_ip_10_24 = 105,
1283 next_hop_ip_20_25 = 111;
1284 uint8_t next_hop_return = 0;
1287 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
1288 TEST_LPM_ASSERT(lpm != NULL);
1290 status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32);
1291 TEST_LPM_ASSERT(status == 0);
1293 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1294 TEST_LPM_ASSERT(status == 0);
1295 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1297 status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24);
1298 TEST_LPM_ASSERT(status == 0);
1300 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1301 TEST_LPM_ASSERT(status == 0);
1302 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1304 status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25);
1305 TEST_LPM_ASSERT(status == 0);
1307 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1308 TEST_LPM_ASSERT(status == 0);
1309 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1311 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1312 TEST_LPM_ASSERT(status == 0);
1313 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1315 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1316 TEST_LPM_ASSERT(status == 0);
1317 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1321 printf("%s PASSED\n", __func__);
1327 * Do all unit and performance tests.
1334 int status, global_status;
1336 printf("Running LPM tests...\n"
1337 "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS);
1341 for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) {
1343 status = tests[test_num]();
1345 printf("LPM Test %u: %s\n", test_num,
1346 (status < 0) ? "FAIL" : "PASS");
1349 global_status = status;
1353 return global_status;
1361 printf("The LPM library is not included in this build\n");