4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_cycles.h>
42 #include <rte_memory.h>
43 #include <rte_random.h>
44 #include <rte_branch_prediction.h>
53 #include "test_lpm_routes.h"
55 #define TEST_LPM_ASSERT(cond) do { \
57 printf("Error at line %d: \n", __LINE__); \
62 typedef int32_t (* rte_lpm_test)(void);
64 static int32_t test0(void);
65 static int32_t test1(void);
66 static int32_t test2(void);
67 static int32_t test3(void);
68 static int32_t test4(void);
69 static int32_t test5(void);
70 static int32_t test6(void);
71 static int32_t test7(void);
72 static int32_t test8(void);
73 static int32_t test9(void);
74 static int32_t test10(void);
75 static int32_t test11(void);
76 static int32_t test12(void);
77 static int32_t test13(void);
78 static int32_t test14(void);
79 static int32_t test15(void);
80 static int32_t test16(void);
81 static int32_t test17(void);
82 static int32_t perf_test(void);
84 rte_lpm_test tests[] = {
107 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
109 #define MAX_RULES 256
113 * Check that rte_lpm_create fails gracefully for incorrect user input
119 struct rte_lpm *lpm = NULL;
121 /* rte_lpm_create: lpm name == NULL */
122 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
123 TEST_LPM_ASSERT(lpm == NULL);
125 /* rte_lpm_create: max_rules = 0 */
126 /* Note: __func__ inserts the function name, in this case "test0". */
127 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
128 TEST_LPM_ASSERT(lpm == NULL);
130 /* socket_id < -1 is invalid */
131 lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
132 TEST_LPM_ASSERT(lpm == NULL);
138 * Create lpm table then delete lpm table 100 times
139 * Use a slightly different rules size each time
144 struct rte_lpm *lpm = NULL;
147 /* rte_lpm_free: Free NULL */
148 for (i = 0; i < 100; i++) {
149 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
150 TEST_LPM_ASSERT(lpm != NULL);
155 /* Can not test free so return success */
160 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
161 * therefore it is impossible to check for failure but this test is added to
162 * increase function coverage metrics and to validate that freeing null does
168 struct rte_lpm *lpm = NULL;
170 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
171 TEST_LPM_ASSERT(lpm != NULL);
179 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
184 struct rte_lpm *lpm = NULL;
185 uint32_t ip = IPv4(0, 0, 0, 0);
186 uint8_t depth = 24, next_hop = 100;
189 /* rte_lpm_add: lpm == NULL */
190 status = rte_lpm_add(NULL, ip, depth, next_hop);
191 TEST_LPM_ASSERT(status < 0);
193 /*Create vaild lpm to use in rest of test. */
194 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
195 TEST_LPM_ASSERT(lpm != NULL);
197 /* rte_lpm_add: depth < 1 */
198 status = rte_lpm_add(lpm, ip, 0, next_hop);
199 TEST_LPM_ASSERT(status < 0);
201 /* rte_lpm_add: depth > MAX_DEPTH */
202 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
203 TEST_LPM_ASSERT(status < 0);
211 * Check that rte_lpm_delete fails gracefully for incorrect user input
217 struct rte_lpm *lpm = NULL;
218 uint32_t ip = IPv4(0, 0, 0, 0);
222 /* rte_lpm_delete: lpm == NULL */
223 status = rte_lpm_delete(NULL, ip, depth);
224 TEST_LPM_ASSERT(status < 0);
226 /*Create vaild lpm to use in rest of test. */
227 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
228 TEST_LPM_ASSERT(lpm != NULL);
230 /* rte_lpm_delete: depth < 1 */
231 status = rte_lpm_delete(lpm, ip, 0);
232 TEST_LPM_ASSERT(status < 0);
234 /* rte_lpm_delete: depth > MAX_DEPTH */
235 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
236 TEST_LPM_ASSERT(status < 0);
244 * Check that rte_lpm_lookup fails gracefully for incorrect user input
250 #if defined(RTE_LIBRTE_LPM_DEBUG)
251 struct rte_lpm *lpm = NULL;
252 uint32_t ip = IPv4(0, 0, 0, 0);
253 uint8_t next_hop_return = 0;
256 /* rte_lpm_lookup: lpm == NULL */
257 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
258 TEST_LPM_ASSERT(status < 0);
260 /*Create vaild lpm to use in rest of test. */
261 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
262 TEST_LPM_ASSERT(lpm != NULL);
264 /* rte_lpm_lookup: depth < 1 */
265 status = rte_lpm_lookup(lpm, ip, NULL);
266 TEST_LPM_ASSERT(status < 0);
276 * Call add, lookup and delete for a single rule with depth <= 24
281 struct rte_lpm *lpm = NULL;
282 uint32_t ip = IPv4(0, 0, 0, 0);
283 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
286 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
287 TEST_LPM_ASSERT(lpm != NULL);
289 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
290 TEST_LPM_ASSERT(status == 0);
292 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
293 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
295 status = rte_lpm_delete(lpm, ip, depth);
296 TEST_LPM_ASSERT(status == 0);
298 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
299 TEST_LPM_ASSERT(status == -ENOENT);
307 * Call add, lookup and delete for a single rule with depth > 24
315 struct rte_lpm *lpm = NULL;
316 uint32_t ip = IPv4(0, 0, 0, 0);
317 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
320 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
321 TEST_LPM_ASSERT(lpm != NULL);
323 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
324 TEST_LPM_ASSERT(status == 0);
326 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
327 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
329 ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
330 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
331 TEST_LPM_ASSERT(hop[0] == next_hop_add);
332 TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
333 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
334 TEST_LPM_ASSERT(hop[3] == next_hop_add);
336 status = rte_lpm_delete(lpm, ip, depth);
337 TEST_LPM_ASSERT(status == 0);
339 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
340 TEST_LPM_ASSERT(status == -ENOENT);
348 * Use rte_lpm_add to add rules which effect only the second half of the lpm
349 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
350 * depth. Check lookup hit for on every add and check for lookup miss on the
351 * first half of the lpm table after each add. Finally delete all rules going
352 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
353 * delete. The lookup should return the next_hop_add value related to the
354 * previous depth value (i.e. depth -1).
361 struct rte_lpm *lpm = NULL;
362 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
363 uint8_t depth, next_hop_add, next_hop_return;
366 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
367 TEST_LPM_ASSERT(lpm != NULL);
369 /* Loop with rte_lpm_add. */
370 for (depth = 1; depth <= 32; depth++) {
371 /* Let the next_hop_add value = depth. Just for change. */
372 next_hop_add = depth;
374 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
375 TEST_LPM_ASSERT(status == 0);
377 /* Check IP in first half of tbl24 which should be empty. */
378 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
379 TEST_LPM_ASSERT(status == -ENOENT);
381 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
382 TEST_LPM_ASSERT((status == 0) &&
383 (next_hop_return == next_hop_add));
385 ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
386 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
387 TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
388 TEST_LPM_ASSERT(hop[1] == next_hop_add);
389 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
390 TEST_LPM_ASSERT(hop[3] == next_hop_add);
393 /* Loop with rte_lpm_delete. */
394 for (depth = 32; depth >= 1; depth--) {
395 next_hop_add = (uint8_t) (depth - 1);
397 status = rte_lpm_delete(lpm, ip2, depth);
398 TEST_LPM_ASSERT(status == 0);
400 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
403 TEST_LPM_ASSERT((status == 0) &&
404 (next_hop_return == next_hop_add));
407 TEST_LPM_ASSERT(status == -ENOENT);
410 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
411 TEST_LPM_ASSERT(status == -ENOENT);
413 ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
414 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
416 TEST_LPM_ASSERT(hop[0] == next_hop_add);
417 TEST_LPM_ASSERT(hop[1] == next_hop_add);
419 TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
420 TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
422 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
423 TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
432 * - Add & lookup to hit invalid TBL24 entry
433 * - Add & lookup to hit valid TBL24 entry not extended
434 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
435 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
441 struct rte_lpm *lpm = NULL;
442 uint32_t ip, ip_1, ip_2;
443 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
444 next_hop_add_2, next_hop_return;
447 /* Add & lookup to hit invalid TBL24 entry */
448 ip = IPv4(128, 0, 0, 0);
452 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
453 TEST_LPM_ASSERT(lpm != NULL);
455 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
456 TEST_LPM_ASSERT(status == 0);
458 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
459 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
461 status = rte_lpm_delete(lpm, ip, depth);
462 TEST_LPM_ASSERT(status == 0);
464 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
465 TEST_LPM_ASSERT(status == -ENOENT);
467 rte_lpm_delete_all(lpm);
469 /* Add & lookup to hit valid TBL24 entry not extended */
470 ip = IPv4(128, 0, 0, 0);
474 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
475 TEST_LPM_ASSERT(status == 0);
477 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
478 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
483 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
484 TEST_LPM_ASSERT(status == 0);
486 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
487 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
491 status = rte_lpm_delete(lpm, ip, depth);
492 TEST_LPM_ASSERT(status == 0);
496 status = rte_lpm_delete(lpm, ip, depth);
497 TEST_LPM_ASSERT(status == 0);
499 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
500 TEST_LPM_ASSERT(status == -ENOENT);
502 rte_lpm_delete_all(lpm);
504 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
506 ip = IPv4(128, 0, 0, 0);
510 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
511 TEST_LPM_ASSERT(status == 0);
513 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
514 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
516 ip = IPv4(128, 0, 0, 5);
520 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
521 TEST_LPM_ASSERT(status == 0);
523 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
524 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
526 status = rte_lpm_delete(lpm, ip, depth);
527 TEST_LPM_ASSERT(status == 0);
529 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
530 TEST_LPM_ASSERT(status == -ENOENT);
532 ip = IPv4(128, 0, 0, 0);
536 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
537 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
539 status = rte_lpm_delete(lpm, ip, depth);
540 TEST_LPM_ASSERT(status == 0);
542 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
543 TEST_LPM_ASSERT(status == -ENOENT);
545 rte_lpm_delete_all(lpm);
547 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
549 ip_1 = IPv4(128, 0, 0, 0);
551 next_hop_add_1 = 101;
553 ip_2 = IPv4(128, 0, 0, 5);
555 next_hop_add_2 = 102;
559 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
560 TEST_LPM_ASSERT(status == 0);
562 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
563 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
565 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
566 TEST_LPM_ASSERT(status == 0);
568 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
569 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
571 status = rte_lpm_delete(lpm, ip_2, depth_2);
572 TEST_LPM_ASSERT(status == 0);
574 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
575 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
577 status = rte_lpm_delete(lpm, ip_1, depth_1);
578 TEST_LPM_ASSERT(status == 0);
580 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
581 TEST_LPM_ASSERT(status == -ENOENT);
590 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
592 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
593 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
595 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
596 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
597 * - Delete a rule that is not present in the TBL24 & lookup
598 * - Delete a rule that is not present in the TBL8 & lookup
605 struct rte_lpm *lpm = NULL;
607 uint8_t depth, next_hop_add, next_hop_return;
610 /* Add rule that covers a TBL24 range previously invalid & lookup
611 * (& delete & lookup) */
612 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
613 TEST_LPM_ASSERT(lpm != NULL);
615 ip = IPv4(128, 0, 0, 0);
619 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
620 TEST_LPM_ASSERT(status == 0);
622 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
623 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
625 status = rte_lpm_delete(lpm, ip, depth);
626 TEST_LPM_ASSERT(status == 0);
628 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
629 TEST_LPM_ASSERT(status == -ENOENT);
631 rte_lpm_delete_all(lpm);
633 ip = IPv4(128, 0, 0, 0);
637 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
638 TEST_LPM_ASSERT(status == 0);
640 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
641 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
643 status = rte_lpm_delete(lpm, ip, depth);
644 TEST_LPM_ASSERT(status == 0);
646 rte_lpm_delete_all(lpm);
648 /* Add rule that extends a TBL24 valid entry & lookup for both rules
649 * (& delete & lookup) */
651 ip = IPv4(128, 0, 0, 0);
655 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
656 TEST_LPM_ASSERT(status == 0);
658 ip = IPv4(128, 0, 0, 10);
662 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
663 TEST_LPM_ASSERT(status == 0);
665 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
666 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
668 ip = IPv4(128, 0, 0, 0);
671 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
672 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
674 ip = IPv4(128, 0, 0, 0);
677 status = rte_lpm_delete(lpm, ip, depth);
678 TEST_LPM_ASSERT(status == 0);
680 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
681 TEST_LPM_ASSERT(status == -ENOENT);
683 ip = IPv4(128, 0, 0, 10);
686 status = rte_lpm_delete(lpm, ip, depth);
687 TEST_LPM_ASSERT(status == 0);
689 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
690 TEST_LPM_ASSERT(status == -ENOENT);
692 rte_lpm_delete_all(lpm);
694 /* Add rule that updates the next hop in TBL24 & lookup
695 * (& delete & lookup) */
697 ip = IPv4(128, 0, 0, 0);
701 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
702 TEST_LPM_ASSERT(status == 0);
704 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
705 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
709 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
710 TEST_LPM_ASSERT(status == 0);
712 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
713 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
715 status = rte_lpm_delete(lpm, ip, depth);
716 TEST_LPM_ASSERT(status == 0);
718 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
719 TEST_LPM_ASSERT(status == -ENOENT);
721 rte_lpm_delete_all(lpm);
723 /* Add rule that updates the next hop in TBL8 & lookup
724 * (& delete & lookup) */
726 ip = IPv4(128, 0, 0, 0);
730 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
731 TEST_LPM_ASSERT(status == 0);
733 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
734 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
738 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
739 TEST_LPM_ASSERT(status == 0);
741 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
742 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
744 status = rte_lpm_delete(lpm, ip, depth);
745 TEST_LPM_ASSERT(status == 0);
747 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
748 TEST_LPM_ASSERT(status == -ENOENT);
750 rte_lpm_delete_all(lpm);
752 /* Delete a rule that is not present in the TBL24 & lookup */
754 ip = IPv4(128, 0, 0, 0);
757 status = rte_lpm_delete(lpm, ip, depth);
758 TEST_LPM_ASSERT(status < 0);
760 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
761 TEST_LPM_ASSERT(status == -ENOENT);
763 rte_lpm_delete_all(lpm);
765 /* Delete a rule that is not present in the TBL8 & lookup */
767 ip = IPv4(128, 0, 0, 0);
770 status = rte_lpm_delete(lpm, ip, depth);
771 TEST_LPM_ASSERT(status < 0);
773 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
774 TEST_LPM_ASSERT(status == -ENOENT);
782 * Add two rules, lookup to hit the more specific one, lookup to hit the less
783 * specific one delete the less specific rule and lookup previous values again;
784 * add a more specific rule than the existing rule, lookup again
791 struct rte_lpm *lpm = NULL;
793 uint8_t depth, next_hop_add, next_hop_return;
796 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
797 TEST_LPM_ASSERT(lpm != NULL);
799 ip = IPv4(128, 0, 0, 0);
803 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
804 TEST_LPM_ASSERT(status == 0);
806 ip = IPv4(128, 0, 0, 10);
810 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
811 TEST_LPM_ASSERT(status == 0);
813 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
814 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
816 ip = IPv4(128, 0, 0, 0);
819 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
820 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
822 ip = IPv4(128, 0, 0, 0);
825 status = rte_lpm_delete(lpm, ip, depth);
826 TEST_LPM_ASSERT(status == 0);
828 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
829 TEST_LPM_ASSERT(status == -ENOENT);
831 ip = IPv4(128, 0, 0, 10);
834 status = rte_lpm_delete(lpm, ip, depth);
835 TEST_LPM_ASSERT(status == 0);
837 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
838 TEST_LPM_ASSERT(status == -ENOENT);
846 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
847 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
857 struct rte_lpm *lpm = NULL;
859 uint8_t depth, next_hop_add, next_hop_return;
862 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
863 TEST_LPM_ASSERT(lpm != NULL);
865 ip = IPv4(128, 0, 0, 0);
869 for (i = 0; i < 1000; i++) {
870 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
871 TEST_LPM_ASSERT(status == 0);
873 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
874 TEST_LPM_ASSERT((status == 0) &&
875 (next_hop_return == next_hop_add));
877 ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
878 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
879 TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
880 TEST_LPM_ASSERT(hop[1] == next_hop_add);
881 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
882 TEST_LPM_ASSERT(hop[3] == next_hop_add);
884 status = rte_lpm_delete(lpm, ip, depth);
885 TEST_LPM_ASSERT(status == 0);
887 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
888 TEST_LPM_ASSERT(status == -ENOENT);
897 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
898 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
899 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
900 * extension and contraction.
907 struct rte_lpm *lpm = NULL;
909 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
912 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
913 TEST_LPM_ASSERT(lpm != NULL);
915 ip = IPv4(128, 0, 0, 0);
917 next_hop_add_1 = 100;
919 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
920 TEST_LPM_ASSERT(status == 0);
922 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
923 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
926 next_hop_add_2 = 101;
928 for (i = 0; i < 1000; i++) {
929 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
930 TEST_LPM_ASSERT(status == 0);
932 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
933 TEST_LPM_ASSERT((status == 0) &&
934 (next_hop_return == next_hop_add_2));
936 status = rte_lpm_delete(lpm, ip, depth);
937 TEST_LPM_ASSERT(status == 0);
939 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
940 TEST_LPM_ASSERT((status == 0) &&
941 (next_hop_return == next_hop_add_1));
946 status = rte_lpm_delete(lpm, ip, depth);
947 TEST_LPM_ASSERT(status == 0);
949 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
950 TEST_LPM_ASSERT(status == -ENOENT);
958 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
959 * No more tbl8 extensions will be allowed. Now add one more rule that required
960 * a tbl8 extension and get fail.
966 /* We only use depth = 32 in the loop below so we must make sure
967 * that we have enough storage for all rules at that depth*/
969 struct rte_lpm *lpm = NULL;
971 uint8_t depth, next_hop_add, next_hop_return;
974 /* Add enough space for 256 rules for every depth */
975 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
976 TEST_LPM_ASSERT(lpm != NULL);
980 ip = IPv4(0, 0, 0, 0);
982 /* Add 256 rules that require a tbl8 extension */
983 for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
984 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
985 TEST_LPM_ASSERT(status == 0);
987 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
988 TEST_LPM_ASSERT((status == 0) &&
989 (next_hop_return == next_hop_add));
992 /* All tbl8 extensions have been used above. Try to add one more and
994 ip = IPv4(1, 0, 0, 0);
997 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
998 TEST_LPM_ASSERT(status < 0);
1006 * Sequence of operations for find existing lpm table
1009 * - find existing table: hit
1010 * - find non-existing table: miss
1016 struct rte_lpm *lpm = NULL, *result = NULL;
1019 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
1020 TEST_LPM_ASSERT(lpm != NULL);
1022 /* Try to find existing lpm */
1023 result = rte_lpm_find_existing("lpm_find_existing");
1024 TEST_LPM_ASSERT(result == lpm);
1026 /* Try to find non-existing lpm */
1027 result = rte_lpm_find_existing("lpm_find_non_existing");
1028 TEST_LPM_ASSERT(result == NULL);
1031 rte_lpm_delete_all(lpm);
1038 * test failure condition of overloading the tbl8 so no more will fit
1039 * Check we get an error return value in that case
1045 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1048 /* ip loops through all possibilities for top 24 bits of address */
1049 for (ip = 0; ip < 0xFFFFFF; ip++){
1050 /* add an entry within a different tbl8 each time, since
1051 * depth >24 and the top 24 bits are different */
1052 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1056 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1057 printf("Error, unexpected failure with filling tbl8 groups\n");
1058 printf("Failed after %u additions, expected after %u\n",
1059 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1067 * Test for overwriting of tbl8:
1068 * - add rule /32 and lookup
1069 * - add new rule /24 and lookup
1070 * - add third rule /25 and lookup
1071 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1076 struct rte_lpm *lpm = NULL;
1077 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1078 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1079 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1080 const uint8_t d_ip_10_32 = 32,
1083 const uint8_t next_hop_ip_10_32 = 100,
1084 next_hop_ip_10_24 = 105,
1085 next_hop_ip_20_25 = 111;
1086 uint8_t next_hop_return = 0;
1089 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
1090 TEST_LPM_ASSERT(lpm != NULL);
1092 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1093 next_hop_ip_10_32)) < 0)
1096 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1097 uint8_t test_hop_10_32 = next_hop_return;
1098 TEST_LPM_ASSERT(status == 0);
1099 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1101 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1102 next_hop_ip_10_24)) < 0)
1105 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1106 uint8_t test_hop_10_24 = next_hop_return;
1107 TEST_LPM_ASSERT(status == 0);
1108 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1110 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1111 next_hop_ip_20_25)) < 0)
1114 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1115 uint8_t test_hop_20_25 = next_hop_return;
1116 TEST_LPM_ASSERT(status == 0);
1117 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1119 if (test_hop_10_32 == test_hop_10_24) {
1120 printf("Next hop return equal\n");
1124 if (test_hop_10_24 == test_hop_20_25){
1125 printf("Next hop return equal\n");
1129 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1130 TEST_LPM_ASSERT(status == 0);
1131 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1133 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1134 TEST_LPM_ASSERT(status == 0);
1135 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1143 * Lookup performance test
1146 #define ITERATIONS (1 << 10)
1147 #define BATCH_SIZE (1 << 12)
1148 #define BULK_SIZE 32
1151 print_route_distribution(const struct route_rule *table, uint32_t n)
1155 printf("Route distribution per prefix width: \n");
1156 printf("DEPTH QUANTITY (PERCENT)\n");
1157 printf("--------------------------- \n");
1160 for(i = 1; i <= 32; i++) {
1161 unsigned depth_counter = 0;
1162 double percent_hits;
1164 for (j = 0; j < n; j++)
1165 if (table[j].depth == (uint8_t) i)
1168 percent_hits = ((double)depth_counter)/((double)n) * 100;
1169 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1177 struct rte_lpm *lpm = NULL;
1178 uint64_t begin, total_time, lpm_used_entries = 0;
1180 uint8_t next_hop_add = 0xAA, next_hop_return = 0;
1182 uint64_t cache_line_counter = 0;
1185 rte_srand(rte_rdtsc());
1187 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1189 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1191 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
1192 TEST_LPM_ASSERT(lpm != NULL);
1195 begin = rte_rdtsc();
1197 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1198 if (rte_lpm_add(lpm, large_route_table[i].ip,
1199 large_route_table[i].depth, next_hop_add) == 0)
1203 total_time = rte_rdtsc() - begin;
1205 printf("Unique added entries = %d\n", status);
1206 /* Obtain add statistics. */
1207 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1208 if (lpm->tbl24[i].valid)
1212 if ((uint64_t)count < lpm_used_entries) {
1213 cache_line_counter++;
1214 count = lpm_used_entries;
1219 printf("Used table 24 entries = %u (%g%%)\n",
1220 (unsigned) lpm_used_entries,
1221 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1222 printf("64 byte Cache entries used = %u (%u bytes)\n",
1223 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1225 printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
1227 /* Measure single Lookup */
1231 for (i = 0; i < ITERATIONS; i ++) {
1232 static uint32_t ip_batch[BATCH_SIZE];
1234 for (j = 0; j < BATCH_SIZE; j ++)
1235 ip_batch[j] = rte_rand();
1237 /* Lookup per batch */
1238 begin = rte_rdtsc();
1240 for (j = 0; j < BATCH_SIZE; j ++) {
1241 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1245 total_time += rte_rdtsc() - begin;
1248 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1249 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1250 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1252 /* Measure bulk Lookup */
1255 for (i = 0; i < ITERATIONS; i ++) {
1256 static uint32_t ip_batch[BATCH_SIZE];
1257 uint16_t next_hops[BULK_SIZE];
1259 /* Create array of random IP addresses */
1260 for (j = 0; j < BATCH_SIZE; j ++)
1261 ip_batch[j] = rte_rand();
1263 /* Lookup per batch */
1264 begin = rte_rdtsc();
1265 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1267 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1268 for (k = 0; k < BULK_SIZE; k++)
1269 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1273 total_time += rte_rdtsc() - begin;
1275 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1276 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1277 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1279 /* Measure LookupX4 */
1282 for (i = 0; i < ITERATIONS; i++) {
1283 static uint32_t ip_batch[BATCH_SIZE];
1284 uint16_t next_hops[4];
1286 /* Create array of random IP addresses */
1287 for (j = 0; j < BATCH_SIZE; j++)
1288 ip_batch[j] = rte_rand();
1290 /* Lookup per batch */
1291 begin = rte_rdtsc();
1292 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
1296 ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
1297 ipx4 = *(__m128i *)(ip_batch + j);
1298 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
1299 for (k = 0; k < RTE_DIM(next_hops); k++)
1300 if (unlikely(next_hops[k] == UINT16_MAX))
1304 total_time += rte_rdtsc() - begin;
1306 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
1307 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1308 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1312 begin = rte_rdtsc();
1314 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1315 /* rte_lpm_delete(lpm, ip, depth) */
1316 status += rte_lpm_delete(lpm, large_route_table[i].ip,
1317 large_route_table[i].depth);
1320 total_time += rte_rdtsc() - begin;
1322 printf("Average LPM Delete: %g cycles\n",
1323 (double)total_time / NUM_ROUTE_ENTRIES);
1325 rte_lpm_delete_all(lpm);
1332 * Do all unit and performance tests.
1339 int status, global_status = 0;
1341 for (i = 0; i < NUM_LPM_TESTS; i++) {
1342 status = tests[i]();
1344 printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1345 global_status = status;
1349 return global_status;
1352 #else /* RTE_LIBRTE_LPM */
1357 printf("The LPM library is not included in this build\n");
1361 #endif /* RTE_LIBRTE_LPM */