4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_cycles.h>
42 #include <rte_memory.h>
43 #include <rte_random.h>
44 #include <rte_branch_prediction.h>
51 #include "test_lpm_routes.h"
53 #define TEST_LPM_ASSERT(cond) do { \
55 printf("Error at line %d: \n", __LINE__); \
60 typedef int32_t (*rte_lpm_test)(void);
62 static int32_t test0(void);
63 static int32_t test1(void);
64 static int32_t test2(void);
65 static int32_t test3(void);
66 static int32_t test4(void);
67 static int32_t test5(void);
68 static int32_t test6(void);
69 static int32_t test7(void);
70 static int32_t test8(void);
71 static int32_t test9(void);
72 static int32_t test10(void);
73 static int32_t test11(void);
74 static int32_t test12(void);
75 static int32_t test13(void);
76 static int32_t test14(void);
77 static int32_t test15(void);
78 static int32_t test16(void);
79 static int32_t test17(void);
80 static int32_t perf_test(void);
82 rte_lpm_test tests[] = {
105 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
107 #define MAX_RULES 256
111 * Check that rte_lpm_create fails gracefully for incorrect user input
117 struct rte_lpm *lpm = NULL;
119 /* rte_lpm_create: lpm name == NULL */
120 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
121 TEST_LPM_ASSERT(lpm == NULL);
123 /* rte_lpm_create: max_rules = 0 */
124 /* Note: __func__ inserts the function name, in this case "test0". */
125 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
126 TEST_LPM_ASSERT(lpm == NULL);
128 /* socket_id < -1 is invalid */
129 lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
130 TEST_LPM_ASSERT(lpm == NULL);
136 * Create lpm table then delete lpm table 100 times
137 * Use a slightly different rules size each time
142 struct rte_lpm *lpm = NULL;
145 /* rte_lpm_free: Free NULL */
146 for (i = 0; i < 100; i++) {
147 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
148 TEST_LPM_ASSERT(lpm != NULL);
153 /* Can not test free so return success */
158 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
159 * therefore it is impossible to check for failure but this test is added to
160 * increase function coverage metrics and to validate that freeing null does
166 struct rte_lpm *lpm = NULL;
168 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
169 TEST_LPM_ASSERT(lpm != NULL);
177 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
182 struct rte_lpm *lpm = NULL;
183 uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
187 /* rte_lpm_add: lpm == NULL */
188 status = rte_lpm_add(NULL, ip, depth, next_hop);
189 TEST_LPM_ASSERT(status < 0);
191 /*Create vaild lpm to use in rest of test. */
192 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
193 TEST_LPM_ASSERT(lpm != NULL);
195 /* rte_lpm_add: depth < 1 */
196 status = rte_lpm_add(lpm, ip, 0, next_hop);
197 TEST_LPM_ASSERT(status < 0);
199 /* rte_lpm_add: depth > MAX_DEPTH */
200 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
201 TEST_LPM_ASSERT(status < 0);
209 * Check that rte_lpm_delete fails gracefully for incorrect user input
215 struct rte_lpm *lpm = NULL;
216 uint32_t ip = IPv4(0, 0, 0, 0);
220 /* rte_lpm_delete: lpm == NULL */
221 status = rte_lpm_delete(NULL, ip, depth);
222 TEST_LPM_ASSERT(status < 0);
224 /*Create vaild lpm to use in rest of test. */
225 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
226 TEST_LPM_ASSERT(lpm != NULL);
228 /* rte_lpm_delete: depth < 1 */
229 status = rte_lpm_delete(lpm, ip, 0);
230 TEST_LPM_ASSERT(status < 0);
232 /* rte_lpm_delete: depth > MAX_DEPTH */
233 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
234 TEST_LPM_ASSERT(status < 0);
242 * Check that rte_lpm_lookup fails gracefully for incorrect user input
248 #if defined(RTE_LIBRTE_LPM_DEBUG)
249 struct rte_lpm *lpm = NULL;
250 uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
253 /* rte_lpm_lookup: lpm == NULL */
254 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
255 TEST_LPM_ASSERT(status < 0);
257 /*Create vaild lpm to use in rest of test. */
258 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
259 TEST_LPM_ASSERT(lpm != NULL);
261 /* rte_lpm_lookup: depth < 1 */
262 status = rte_lpm_lookup(lpm, ip, NULL);
263 TEST_LPM_ASSERT(status < 0);
273 * Call add, lookup and delete for a single rule with depth <= 24
278 struct rte_lpm *lpm = NULL;
279 uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
283 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
284 TEST_LPM_ASSERT(lpm != NULL);
286 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
287 TEST_LPM_ASSERT(status == 0);
289 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
290 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
292 status = rte_lpm_delete(lpm, ip, depth);
293 TEST_LPM_ASSERT(status == 0);
295 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
296 TEST_LPM_ASSERT(status == -ENOENT);
304 * Call add, lookup and delete for a single rule with depth > 24
312 struct rte_lpm *lpm = NULL;
313 uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
317 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
318 TEST_LPM_ASSERT(lpm != NULL);
320 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
321 TEST_LPM_ASSERT(status == 0);
323 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
324 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
326 ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
327 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
328 TEST_LPM_ASSERT(hop[0] == next_hop_add);
329 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
330 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
331 TEST_LPM_ASSERT(hop[3] == next_hop_add);
333 status = rte_lpm_delete(lpm, ip, depth);
334 TEST_LPM_ASSERT(status == 0);
336 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
337 TEST_LPM_ASSERT(status == -ENOENT);
345 * Use rte_lpm_add to add rules which effect only the second half of the lpm
346 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
347 * depth. Check lookup hit for on every add and check for lookup miss on the
348 * first half of the lpm table after each add. Finally delete all rules going
349 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
350 * delete. The lookup should return the next_hop_add value related to the
351 * previous depth value (i.e. depth -1).
358 struct rte_lpm *lpm = NULL;
359 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
360 uint32_t next_hop_add, next_hop_return;
364 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
365 TEST_LPM_ASSERT(lpm != NULL);
367 /* Loop with rte_lpm_add. */
368 for (depth = 1; depth <= 32; depth++) {
369 /* Let the next_hop_add value = depth. Just for change. */
370 next_hop_add = depth;
372 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
373 TEST_LPM_ASSERT(status == 0);
375 /* Check IP in first half of tbl24 which should be empty. */
376 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
377 TEST_LPM_ASSERT(status == -ENOENT);
379 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
380 TEST_LPM_ASSERT((status == 0) &&
381 (next_hop_return == next_hop_add));
383 ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
384 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
385 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
386 TEST_LPM_ASSERT(hop[1] == next_hop_add);
387 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
388 TEST_LPM_ASSERT(hop[3] == next_hop_add);
391 /* Loop with rte_lpm_delete. */
392 for (depth = 32; depth >= 1; depth--) {
393 next_hop_add = (uint8_t) (depth - 1);
395 status = rte_lpm_delete(lpm, ip2, depth);
396 TEST_LPM_ASSERT(status == 0);
398 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
401 TEST_LPM_ASSERT((status == 0) &&
402 (next_hop_return == next_hop_add));
404 TEST_LPM_ASSERT(status == -ENOENT);
407 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
408 TEST_LPM_ASSERT(status == -ENOENT);
410 ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
411 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
413 TEST_LPM_ASSERT(hop[0] == next_hop_add);
414 TEST_LPM_ASSERT(hop[1] == next_hop_add);
416 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
417 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
419 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
420 TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
429 * - Add & lookup to hit invalid TBL24 entry
430 * - Add & lookup to hit valid TBL24 entry not extended
431 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
432 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
438 struct rte_lpm *lpm = NULL;
439 uint32_t ip, ip_1, ip_2;
440 uint8_t depth, depth_1, depth_2;
441 uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
444 /* Add & lookup to hit invalid TBL24 entry */
445 ip = IPv4(128, 0, 0, 0);
449 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
450 TEST_LPM_ASSERT(lpm != NULL);
452 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
453 TEST_LPM_ASSERT(status == 0);
455 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
456 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
458 status = rte_lpm_delete(lpm, ip, depth);
459 TEST_LPM_ASSERT(status == 0);
461 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
462 TEST_LPM_ASSERT(status == -ENOENT);
464 rte_lpm_delete_all(lpm);
466 /* Add & lookup to hit valid TBL24 entry not extended */
467 ip = IPv4(128, 0, 0, 0);
471 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
472 TEST_LPM_ASSERT(status == 0);
474 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
475 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
480 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
481 TEST_LPM_ASSERT(status == 0);
483 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
484 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
488 status = rte_lpm_delete(lpm, ip, depth);
489 TEST_LPM_ASSERT(status == 0);
493 status = rte_lpm_delete(lpm, ip, depth);
494 TEST_LPM_ASSERT(status == 0);
496 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
497 TEST_LPM_ASSERT(status == -ENOENT);
499 rte_lpm_delete_all(lpm);
501 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
503 ip = IPv4(128, 0, 0, 0);
507 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
508 TEST_LPM_ASSERT(status == 0);
510 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
511 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
513 ip = IPv4(128, 0, 0, 5);
517 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
518 TEST_LPM_ASSERT(status == 0);
520 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
521 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
523 status = rte_lpm_delete(lpm, ip, depth);
524 TEST_LPM_ASSERT(status == 0);
526 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
527 TEST_LPM_ASSERT(status == -ENOENT);
529 ip = IPv4(128, 0, 0, 0);
533 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
534 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
536 status = rte_lpm_delete(lpm, ip, depth);
537 TEST_LPM_ASSERT(status == 0);
539 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
540 TEST_LPM_ASSERT(status == -ENOENT);
542 rte_lpm_delete_all(lpm);
544 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
546 ip_1 = IPv4(128, 0, 0, 0);
548 next_hop_add_1 = 101;
550 ip_2 = IPv4(128, 0, 0, 5);
552 next_hop_add_2 = 102;
556 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
557 TEST_LPM_ASSERT(status == 0);
559 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
560 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
562 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
563 TEST_LPM_ASSERT(status == 0);
565 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
566 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
568 status = rte_lpm_delete(lpm, ip_2, depth_2);
569 TEST_LPM_ASSERT(status == 0);
571 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
572 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
574 status = rte_lpm_delete(lpm, ip_1, depth_1);
575 TEST_LPM_ASSERT(status == 0);
577 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
578 TEST_LPM_ASSERT(status == -ENOENT);
587 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
589 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
590 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
592 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
593 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
594 * - Delete a rule that is not present in the TBL24 & lookup
595 * - Delete a rule that is not present in the TBL8 & lookup
602 struct rte_lpm *lpm = NULL;
603 uint32_t ip, next_hop_add, next_hop_return;
607 /* Add rule that covers a TBL24 range previously invalid & lookup
608 * (& delete & lookup) */
609 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
610 TEST_LPM_ASSERT(lpm != NULL);
612 ip = IPv4(128, 0, 0, 0);
616 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
617 TEST_LPM_ASSERT(status == 0);
619 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
620 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
622 status = rte_lpm_delete(lpm, ip, depth);
623 TEST_LPM_ASSERT(status == 0);
625 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
626 TEST_LPM_ASSERT(status == -ENOENT);
628 rte_lpm_delete_all(lpm);
630 ip = IPv4(128, 0, 0, 0);
634 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
635 TEST_LPM_ASSERT(status == 0);
637 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
638 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
640 status = rte_lpm_delete(lpm, ip, depth);
641 TEST_LPM_ASSERT(status == 0);
643 rte_lpm_delete_all(lpm);
645 /* Add rule that extends a TBL24 valid entry & lookup for both rules
646 * (& delete & lookup) */
648 ip = IPv4(128, 0, 0, 0);
652 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
653 TEST_LPM_ASSERT(status == 0);
655 ip = IPv4(128, 0, 0, 10);
659 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
660 TEST_LPM_ASSERT(status == 0);
662 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
663 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
665 ip = IPv4(128, 0, 0, 0);
668 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
669 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
671 ip = IPv4(128, 0, 0, 0);
674 status = rte_lpm_delete(lpm, ip, depth);
675 TEST_LPM_ASSERT(status == 0);
677 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
678 TEST_LPM_ASSERT(status == -ENOENT);
680 ip = IPv4(128, 0, 0, 10);
683 status = rte_lpm_delete(lpm, ip, depth);
684 TEST_LPM_ASSERT(status == 0);
686 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
687 TEST_LPM_ASSERT(status == -ENOENT);
689 rte_lpm_delete_all(lpm);
691 /* Add rule that updates the next hop in TBL24 & lookup
692 * (& delete & lookup) */
694 ip = IPv4(128, 0, 0, 0);
698 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
699 TEST_LPM_ASSERT(status == 0);
701 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
702 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
706 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
707 TEST_LPM_ASSERT(status == 0);
709 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
710 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
712 status = rte_lpm_delete(lpm, ip, depth);
713 TEST_LPM_ASSERT(status == 0);
715 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
716 TEST_LPM_ASSERT(status == -ENOENT);
718 rte_lpm_delete_all(lpm);
720 /* Add rule that updates the next hop in TBL8 & lookup
721 * (& delete & lookup) */
723 ip = IPv4(128, 0, 0, 0);
727 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
728 TEST_LPM_ASSERT(status == 0);
730 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
731 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
735 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
736 TEST_LPM_ASSERT(status == 0);
738 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
739 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
741 status = rte_lpm_delete(lpm, ip, depth);
742 TEST_LPM_ASSERT(status == 0);
744 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
745 TEST_LPM_ASSERT(status == -ENOENT);
747 rte_lpm_delete_all(lpm);
749 /* Delete a rule that is not present in the TBL24 & lookup */
751 ip = IPv4(128, 0, 0, 0);
754 status = rte_lpm_delete(lpm, ip, depth);
755 TEST_LPM_ASSERT(status < 0);
757 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
758 TEST_LPM_ASSERT(status == -ENOENT);
760 rte_lpm_delete_all(lpm);
762 /* Delete a rule that is not present in the TBL8 & lookup */
764 ip = IPv4(128, 0, 0, 0);
767 status = rte_lpm_delete(lpm, ip, depth);
768 TEST_LPM_ASSERT(status < 0);
770 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
771 TEST_LPM_ASSERT(status == -ENOENT);
779 * Add two rules, lookup to hit the more specific one, lookup to hit the less
780 * specific one delete the less specific rule and lookup previous values again;
781 * add a more specific rule than the existing rule, lookup again
788 struct rte_lpm *lpm = NULL;
789 uint32_t ip, next_hop_add, next_hop_return;
793 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
794 TEST_LPM_ASSERT(lpm != NULL);
796 ip = IPv4(128, 0, 0, 0);
800 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
801 TEST_LPM_ASSERT(status == 0);
803 ip = IPv4(128, 0, 0, 10);
807 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
808 TEST_LPM_ASSERT(status == 0);
810 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
811 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
813 ip = IPv4(128, 0, 0, 0);
816 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
817 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
819 ip = IPv4(128, 0, 0, 0);
822 status = rte_lpm_delete(lpm, ip, depth);
823 TEST_LPM_ASSERT(status == 0);
825 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
826 TEST_LPM_ASSERT(status == -ENOENT);
828 ip = IPv4(128, 0, 0, 10);
831 status = rte_lpm_delete(lpm, ip, depth);
832 TEST_LPM_ASSERT(status == 0);
834 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
835 TEST_LPM_ASSERT(status == -ENOENT);
843 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
844 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
854 struct rte_lpm *lpm = NULL;
855 uint32_t ip, i, next_hop_add, next_hop_return;
859 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
860 TEST_LPM_ASSERT(lpm != NULL);
862 ip = IPv4(128, 0, 0, 0);
866 for (i = 0; i < 1000; i++) {
867 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
868 TEST_LPM_ASSERT(status == 0);
870 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
871 TEST_LPM_ASSERT((status == 0) &&
872 (next_hop_return == next_hop_add));
874 ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
875 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
876 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
877 TEST_LPM_ASSERT(hop[1] == next_hop_add);
878 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
879 TEST_LPM_ASSERT(hop[3] == next_hop_add);
881 status = rte_lpm_delete(lpm, ip, depth);
882 TEST_LPM_ASSERT(status == 0);
884 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
885 TEST_LPM_ASSERT(status == -ENOENT);
894 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
895 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
896 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
897 * extension and contraction.
904 struct rte_lpm *lpm = NULL;
905 uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
909 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
910 TEST_LPM_ASSERT(lpm != NULL);
912 ip = IPv4(128, 0, 0, 0);
914 next_hop_add_1 = 100;
916 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
917 TEST_LPM_ASSERT(status == 0);
919 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
920 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
923 next_hop_add_2 = 101;
925 for (i = 0; i < 1000; i++) {
926 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
927 TEST_LPM_ASSERT(status == 0);
929 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
930 TEST_LPM_ASSERT((status == 0) &&
931 (next_hop_return == next_hop_add_2));
933 status = rte_lpm_delete(lpm, ip, depth);
934 TEST_LPM_ASSERT(status == 0);
936 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
937 TEST_LPM_ASSERT((status == 0) &&
938 (next_hop_return == next_hop_add_1));
943 status = rte_lpm_delete(lpm, ip, depth);
944 TEST_LPM_ASSERT(status == 0);
946 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
947 TEST_LPM_ASSERT(status == -ENOENT);
955 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
956 * No more tbl8 extensions will be allowed. Now add one more rule that required
957 * a tbl8 extension and get fail.
963 /* We only use depth = 32 in the loop below so we must make sure
964 * that we have enough storage for all rules at that depth*/
966 struct rte_lpm *lpm = NULL;
967 uint32_t ip, next_hop_add, next_hop_return;
971 /* Add enough space for 256 rules for every depth */
972 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
973 TEST_LPM_ASSERT(lpm != NULL);
977 ip = IPv4(0, 0, 0, 0);
979 /* Add 256 rules that require a tbl8 extension */
980 for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
981 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
982 TEST_LPM_ASSERT(status == 0);
984 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
985 TEST_LPM_ASSERT((status == 0) &&
986 (next_hop_return == next_hop_add));
989 /* All tbl8 extensions have been used above. Try to add one more and
991 ip = IPv4(1, 0, 0, 0);
994 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
995 TEST_LPM_ASSERT(status < 0);
1003 * Sequence of operations for find existing lpm table
1006 * - find existing table: hit
1007 * - find non-existing table: miss
1013 struct rte_lpm *lpm = NULL, *result = NULL;
1016 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
1017 TEST_LPM_ASSERT(lpm != NULL);
1019 /* Try to find existing lpm */
1020 result = rte_lpm_find_existing("lpm_find_existing");
1021 TEST_LPM_ASSERT(result == lpm);
1023 /* Try to find non-existing lpm */
1024 result = rte_lpm_find_existing("lpm_find_non_existing");
1025 TEST_LPM_ASSERT(result == NULL);
1028 rte_lpm_delete_all(lpm);
1035 * test failure condition of overloading the tbl8 so no more will fit
1036 * Check we get an error return value in that case
1042 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1045 /* ip loops through all possibilities for top 24 bits of address */
1046 for (ip = 0; ip < 0xFFFFFF; ip++) {
1047 /* add an entry within a different tbl8 each time, since
1048 * depth >24 and the top 24 bits are different */
1049 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1053 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1054 printf("Error, unexpected failure with filling tbl8 groups\n");
1055 printf("Failed after %u additions, expected after %u\n",
1056 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1064 * Test for overwriting of tbl8:
1065 * - add rule /32 and lookup
1066 * - add new rule /24 and lookup
1067 * - add third rule /25 and lookup
1068 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1073 struct rte_lpm *lpm = NULL;
1074 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1075 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1076 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1077 const uint8_t d_ip_10_32 = 32,
1080 const uint32_t next_hop_ip_10_32 = 100,
1081 next_hop_ip_10_24 = 105,
1082 next_hop_ip_20_25 = 111;
1083 uint32_t next_hop_return = 0;
1086 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
1087 TEST_LPM_ASSERT(lpm != NULL);
1089 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1090 next_hop_ip_10_32)) < 0)
1093 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1094 uint32_t test_hop_10_32 = next_hop_return;
1095 TEST_LPM_ASSERT(status == 0);
1096 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1098 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1099 next_hop_ip_10_24)) < 0)
1102 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1103 uint32_t test_hop_10_24 = next_hop_return;
1104 TEST_LPM_ASSERT(status == 0);
1105 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1107 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1108 next_hop_ip_20_25)) < 0)
1111 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1112 uint32_t test_hop_20_25 = next_hop_return;
1113 TEST_LPM_ASSERT(status == 0);
1114 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1116 if (test_hop_10_32 == test_hop_10_24) {
1117 printf("Next hop return equal\n");
1121 if (test_hop_10_24 == test_hop_20_25) {
1122 printf("Next hop return equal\n");
1126 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1127 TEST_LPM_ASSERT(status == 0);
1128 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1130 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1131 TEST_LPM_ASSERT(status == 0);
1132 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1140 * Lookup performance test
1143 #define ITERATIONS (1 << 10)
1144 #define BATCH_SIZE (1 << 12)
1145 #define BULK_SIZE 32
1148 print_route_distribution(const struct route_rule *table, uint32_t n)
1152 printf("Route distribution per prefix width: \n");
1153 printf("DEPTH QUANTITY (PERCENT)\n");
1154 printf("--------------------------- \n");
1157 for (i = 1; i <= 32; i++) {
1158 unsigned depth_counter = 0;
1159 double percent_hits;
1161 for (j = 0; j < n; j++)
1162 if (table[j].depth == (uint8_t) i)
1165 percent_hits = ((double)depth_counter)/((double)n) * 100;
1166 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1174 struct rte_lpm *lpm = NULL;
1175 uint64_t begin, total_time, lpm_used_entries = 0;
1177 uint32_t next_hop_add = 0xAA, next_hop_return = 0;
1179 uint64_t cache_line_counter = 0;
1182 rte_srand(rte_rdtsc());
1184 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1186 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1188 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
1189 TEST_LPM_ASSERT(lpm != NULL);
1192 begin = rte_rdtsc();
1194 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1195 if (rte_lpm_add(lpm, large_route_table[i].ip,
1196 large_route_table[i].depth, next_hop_add) == 0)
1200 total_time = rte_rdtsc() - begin;
1202 printf("Unique added entries = %d\n", status);
1203 /* Obtain add statistics. */
1204 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1205 if (lpm->tbl24[i].valid)
1209 if ((uint64_t)count < lpm_used_entries) {
1210 cache_line_counter++;
1211 count = lpm_used_entries;
1216 printf("Used table 24 entries = %u (%g%%)\n",
1217 (unsigned) lpm_used_entries,
1218 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1219 printf("64 byte Cache entries used = %u (%u bytes)\n",
1220 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1222 printf("Average LPM Add: %g cycles\n",
1223 (double)total_time / NUM_ROUTE_ENTRIES);
1225 /* Measure single Lookup */
1229 for (i = 0; i < ITERATIONS; i++) {
1230 static uint32_t ip_batch[BATCH_SIZE];
1232 for (j = 0; j < BATCH_SIZE; j++)
1233 ip_batch[j] = rte_rand();
1235 /* Lookup per batch */
1236 begin = rte_rdtsc();
1238 for (j = 0; j < BATCH_SIZE; j++) {
1239 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1243 total_time += rte_rdtsc() - begin;
1246 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1247 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1248 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1250 /* Measure bulk Lookup */
1253 for (i = 0; i < ITERATIONS; i++) {
1254 static uint32_t ip_batch[BATCH_SIZE];
1255 uint32_t next_hops[BULK_SIZE];
1257 /* Create array of random IP addresses */
1258 for (j = 0; j < BATCH_SIZE; j++)
1259 ip_batch[j] = rte_rand();
1261 /* Lookup per batch */
1262 begin = rte_rdtsc();
1263 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1265 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1266 for (k = 0; k < BULK_SIZE; k++)
1267 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1271 total_time += rte_rdtsc() - begin;
1273 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1274 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1275 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1277 /* Measure LookupX4 */
1280 for (i = 0; i < ITERATIONS; i++) {
1281 static uint32_t ip_batch[BATCH_SIZE];
1282 uint32_t next_hops[4];
1284 /* Create array of random IP addresses */
1285 for (j = 0; j < BATCH_SIZE; j++)
1286 ip_batch[j] = rte_rand();
1288 /* Lookup per batch */
1289 begin = rte_rdtsc();
1290 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
1294 ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
1295 ipx4 = *(__m128i *)(ip_batch + j);
1296 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
1297 for (k = 0; k < RTE_DIM(next_hops); k++)
1298 if (unlikely(next_hops[k] == UINT32_MAX))
1302 total_time += rte_rdtsc() - begin;
1304 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
1305 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1306 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1310 begin = rte_rdtsc();
1312 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1313 /* rte_lpm_delete(lpm, ip, depth) */
1314 status += rte_lpm_delete(lpm, large_route_table[i].ip,
1315 large_route_table[i].depth);
1318 total_time += rte_rdtsc() - begin;
1320 printf("Average LPM Delete: %g cycles\n",
1321 (double)total_time / NUM_ROUTE_ENTRIES);
1323 rte_lpm_delete_all(lpm);
1330 * Do all unit and performance tests.
1337 int status, global_status = 0;
1339 for (i = 0; i < NUM_LPM_TESTS; i++) {
1340 status = tests[i]();
1342 printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1343 global_status = status;
1347 return global_status;
1350 static struct test_command lpm_cmd = {
1351 .command = "lpm_autotest",
1352 .callback = test_lpm,
1354 REGISTER_TEST_COMMAND(lpm_cmd);