4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_cycles.h>
42 #include <rte_memory.h>
43 #include <rte_random.h>
44 #include <rte_branch_prediction.h>
51 #include "test_lpm_routes.h"
53 #define TEST_LPM_ASSERT(cond) do { \
55 printf("Error at line %d: \n", __LINE__); \
60 typedef int32_t (* rte_lpm_test)(void);
62 static int32_t test0(void);
63 static int32_t test1(void);
64 static int32_t test2(void);
65 static int32_t test3(void);
66 static int32_t test4(void);
67 static int32_t test5(void);
68 static int32_t test6(void);
69 static int32_t test7(void);
70 static int32_t test8(void);
71 static int32_t test9(void);
72 static int32_t test10(void);
73 static int32_t test11(void);
74 static int32_t test12(void);
75 static int32_t test13(void);
76 static int32_t test14(void);
77 static int32_t test15(void);
78 static int32_t test16(void);
79 static int32_t test17(void);
80 static int32_t perf_test(void);
82 rte_lpm_test tests[] = {
105 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
107 #define MAX_RULES 256
111 * Check that rte_lpm_create fails gracefully for incorrect user input
117 struct rte_lpm *lpm = NULL;
119 /* rte_lpm_create: lpm name == NULL */
120 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
121 TEST_LPM_ASSERT(lpm == NULL);
123 /* rte_lpm_create: max_rules = 0 */
124 /* Note: __func__ inserts the function name, in this case "test0". */
125 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
126 TEST_LPM_ASSERT(lpm == NULL);
128 /* socket_id < -1 is invalid */
129 lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
130 TEST_LPM_ASSERT(lpm == NULL);
136 * Create lpm table then delete lpm table 100 times
137 * Use a slightly different rules size each time
142 struct rte_lpm *lpm = NULL;
145 /* rte_lpm_free: Free NULL */
146 for (i = 0; i < 100; i++) {
147 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
148 TEST_LPM_ASSERT(lpm != NULL);
153 /* Can not test free so return success */
158 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
159 * therefore it is impossible to check for failure but this test is added to
160 * increase function coverage metrics and to validate that freeing null does
166 struct rte_lpm *lpm = NULL;
168 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
169 TEST_LPM_ASSERT(lpm != NULL);
177 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
182 struct rte_lpm *lpm = NULL;
183 uint32_t ip = IPv4(0, 0, 0, 0);
184 uint8_t depth = 24, next_hop = 100;
187 /* rte_lpm_add: lpm == NULL */
188 status = rte_lpm_add(NULL, ip, depth, next_hop);
189 TEST_LPM_ASSERT(status < 0);
191 /*Create vaild lpm to use in rest of test. */
192 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
193 TEST_LPM_ASSERT(lpm != NULL);
195 /* rte_lpm_add: depth < 1 */
196 status = rte_lpm_add(lpm, ip, 0, next_hop);
197 TEST_LPM_ASSERT(status < 0);
199 /* rte_lpm_add: depth > MAX_DEPTH */
200 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
201 TEST_LPM_ASSERT(status < 0);
209 * Check that rte_lpm_delete fails gracefully for incorrect user input
215 struct rte_lpm *lpm = NULL;
216 uint32_t ip = IPv4(0, 0, 0, 0);
220 /* rte_lpm_delete: lpm == NULL */
221 status = rte_lpm_delete(NULL, ip, depth);
222 TEST_LPM_ASSERT(status < 0);
224 /*Create vaild lpm to use in rest of test. */
225 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
226 TEST_LPM_ASSERT(lpm != NULL);
228 /* rte_lpm_delete: depth < 1 */
229 status = rte_lpm_delete(lpm, ip, 0);
230 TEST_LPM_ASSERT(status < 0);
232 /* rte_lpm_delete: depth > MAX_DEPTH */
233 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
234 TEST_LPM_ASSERT(status < 0);
242 * Check that rte_lpm_lookup fails gracefully for incorrect user input
248 #if defined(RTE_LIBRTE_LPM_DEBUG)
249 struct rte_lpm *lpm = NULL;
250 uint32_t ip = IPv4(0, 0, 0, 0);
251 uint8_t next_hop_return = 0;
254 /* rte_lpm_lookup: lpm == NULL */
255 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
256 TEST_LPM_ASSERT(status < 0);
258 /*Create vaild lpm to use in rest of test. */
259 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
260 TEST_LPM_ASSERT(lpm != NULL);
262 /* rte_lpm_lookup: depth < 1 */
263 status = rte_lpm_lookup(lpm, ip, NULL);
264 TEST_LPM_ASSERT(status < 0);
274 * Call add, lookup and delete for a single rule with depth <= 24
279 struct rte_lpm *lpm = NULL;
280 uint32_t ip = IPv4(0, 0, 0, 0);
281 uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
284 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
285 TEST_LPM_ASSERT(lpm != NULL);
287 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
288 TEST_LPM_ASSERT(status == 0);
290 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
291 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
293 status = rte_lpm_delete(lpm, ip, depth);
294 TEST_LPM_ASSERT(status == 0);
296 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
297 TEST_LPM_ASSERT(status == -ENOENT);
305 * Call add, lookup and delete for a single rule with depth > 24
313 struct rte_lpm *lpm = NULL;
314 uint32_t ip = IPv4(0, 0, 0, 0);
315 uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
318 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
319 TEST_LPM_ASSERT(lpm != NULL);
321 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
322 TEST_LPM_ASSERT(status == 0);
324 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
325 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
327 ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
328 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
329 TEST_LPM_ASSERT(hop[0] == next_hop_add);
330 TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
331 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
332 TEST_LPM_ASSERT(hop[3] == next_hop_add);
334 status = rte_lpm_delete(lpm, ip, depth);
335 TEST_LPM_ASSERT(status == 0);
337 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
338 TEST_LPM_ASSERT(status == -ENOENT);
346 * Use rte_lpm_add to add rules which effect only the second half of the lpm
347 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
348 * depth. Check lookup hit for on every add and check for lookup miss on the
349 * first half of the lpm table after each add. Finally delete all rules going
350 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
351 * delete. The lookup should return the next_hop_add value related to the
352 * previous depth value (i.e. depth -1).
359 struct rte_lpm *lpm = NULL;
360 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
361 uint8_t depth, next_hop_add, next_hop_return;
364 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
365 TEST_LPM_ASSERT(lpm != NULL);
367 /* Loop with rte_lpm_add. */
368 for (depth = 1; depth <= 32; depth++) {
369 /* Let the next_hop_add value = depth. Just for change. */
370 next_hop_add = depth;
372 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
373 TEST_LPM_ASSERT(status == 0);
375 /* Check IP in first half of tbl24 which should be empty. */
376 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
377 TEST_LPM_ASSERT(status == -ENOENT);
379 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
380 TEST_LPM_ASSERT((status == 0) &&
381 (next_hop_return == next_hop_add));
383 ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
384 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
385 TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
386 TEST_LPM_ASSERT(hop[1] == next_hop_add);
387 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
388 TEST_LPM_ASSERT(hop[3] == next_hop_add);
391 /* Loop with rte_lpm_delete. */
392 for (depth = 32; depth >= 1; depth--) {
393 next_hop_add = (uint8_t) (depth - 1);
395 status = rte_lpm_delete(lpm, ip2, depth);
396 TEST_LPM_ASSERT(status == 0);
398 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
401 TEST_LPM_ASSERT((status == 0) &&
402 (next_hop_return == next_hop_add));
405 TEST_LPM_ASSERT(status == -ENOENT);
408 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
409 TEST_LPM_ASSERT(status == -ENOENT);
411 ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
412 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
414 TEST_LPM_ASSERT(hop[0] == next_hop_add);
415 TEST_LPM_ASSERT(hop[1] == next_hop_add);
417 TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
418 TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
420 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
421 TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
430 * - Add & lookup to hit invalid TBL24 entry
431 * - Add & lookup to hit valid TBL24 entry not extended
432 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
433 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
439 struct rte_lpm *lpm = NULL;
440 uint32_t ip, ip_1, ip_2;
441 uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
442 next_hop_add_2, next_hop_return;
445 /* Add & lookup to hit invalid TBL24 entry */
446 ip = IPv4(128, 0, 0, 0);
450 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
451 TEST_LPM_ASSERT(lpm != NULL);
453 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
454 TEST_LPM_ASSERT(status == 0);
456 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
457 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
459 status = rte_lpm_delete(lpm, ip, depth);
460 TEST_LPM_ASSERT(status == 0);
462 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
463 TEST_LPM_ASSERT(status == -ENOENT);
465 rte_lpm_delete_all(lpm);
467 /* Add & lookup to hit valid TBL24 entry not extended */
468 ip = IPv4(128, 0, 0, 0);
472 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
473 TEST_LPM_ASSERT(status == 0);
475 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
476 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
481 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
482 TEST_LPM_ASSERT(status == 0);
484 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
485 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
489 status = rte_lpm_delete(lpm, ip, depth);
490 TEST_LPM_ASSERT(status == 0);
494 status = rte_lpm_delete(lpm, ip, depth);
495 TEST_LPM_ASSERT(status == 0);
497 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
498 TEST_LPM_ASSERT(status == -ENOENT);
500 rte_lpm_delete_all(lpm);
502 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
504 ip = IPv4(128, 0, 0, 0);
508 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
509 TEST_LPM_ASSERT(status == 0);
511 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
512 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
514 ip = IPv4(128, 0, 0, 5);
518 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
519 TEST_LPM_ASSERT(status == 0);
521 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
522 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
524 status = rte_lpm_delete(lpm, ip, depth);
525 TEST_LPM_ASSERT(status == 0);
527 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
528 TEST_LPM_ASSERT(status == -ENOENT);
530 ip = IPv4(128, 0, 0, 0);
534 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
535 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
537 status = rte_lpm_delete(lpm, ip, depth);
538 TEST_LPM_ASSERT(status == 0);
540 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
541 TEST_LPM_ASSERT(status == -ENOENT);
543 rte_lpm_delete_all(lpm);
545 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
547 ip_1 = IPv4(128, 0, 0, 0);
549 next_hop_add_1 = 101;
551 ip_2 = IPv4(128, 0, 0, 5);
553 next_hop_add_2 = 102;
557 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
558 TEST_LPM_ASSERT(status == 0);
560 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
561 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
563 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
564 TEST_LPM_ASSERT(status == 0);
566 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
567 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
569 status = rte_lpm_delete(lpm, ip_2, depth_2);
570 TEST_LPM_ASSERT(status == 0);
572 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
573 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
575 status = rte_lpm_delete(lpm, ip_1, depth_1);
576 TEST_LPM_ASSERT(status == 0);
578 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
579 TEST_LPM_ASSERT(status == -ENOENT);
588 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
590 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
591 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
593 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
594 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
595 * - Delete a rule that is not present in the TBL24 & lookup
596 * - Delete a rule that is not present in the TBL8 & lookup
603 struct rte_lpm *lpm = NULL;
605 uint8_t depth, next_hop_add, next_hop_return;
608 /* Add rule that covers a TBL24 range previously invalid & lookup
609 * (& delete & lookup) */
610 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
611 TEST_LPM_ASSERT(lpm != NULL);
613 ip = IPv4(128, 0, 0, 0);
617 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
618 TEST_LPM_ASSERT(status == 0);
620 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
621 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
623 status = rte_lpm_delete(lpm, ip, depth);
624 TEST_LPM_ASSERT(status == 0);
626 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
627 TEST_LPM_ASSERT(status == -ENOENT);
629 rte_lpm_delete_all(lpm);
631 ip = IPv4(128, 0, 0, 0);
635 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
636 TEST_LPM_ASSERT(status == 0);
638 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
639 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
641 status = rte_lpm_delete(lpm, ip, depth);
642 TEST_LPM_ASSERT(status == 0);
644 rte_lpm_delete_all(lpm);
646 /* Add rule that extends a TBL24 valid entry & lookup for both rules
647 * (& delete & lookup) */
649 ip = IPv4(128, 0, 0, 0);
653 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
654 TEST_LPM_ASSERT(status == 0);
656 ip = IPv4(128, 0, 0, 10);
660 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
661 TEST_LPM_ASSERT(status == 0);
663 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
664 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
666 ip = IPv4(128, 0, 0, 0);
669 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
670 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
672 ip = IPv4(128, 0, 0, 0);
675 status = rte_lpm_delete(lpm, ip, depth);
676 TEST_LPM_ASSERT(status == 0);
678 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
679 TEST_LPM_ASSERT(status == -ENOENT);
681 ip = IPv4(128, 0, 0, 10);
684 status = rte_lpm_delete(lpm, ip, depth);
685 TEST_LPM_ASSERT(status == 0);
687 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
688 TEST_LPM_ASSERT(status == -ENOENT);
690 rte_lpm_delete_all(lpm);
692 /* Add rule that updates the next hop in TBL24 & lookup
693 * (& delete & lookup) */
695 ip = IPv4(128, 0, 0, 0);
699 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
700 TEST_LPM_ASSERT(status == 0);
702 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
703 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
707 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
708 TEST_LPM_ASSERT(status == 0);
710 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
711 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
713 status = rte_lpm_delete(lpm, ip, depth);
714 TEST_LPM_ASSERT(status == 0);
716 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
717 TEST_LPM_ASSERT(status == -ENOENT);
719 rte_lpm_delete_all(lpm);
721 /* Add rule that updates the next hop in TBL8 & lookup
722 * (& delete & lookup) */
724 ip = IPv4(128, 0, 0, 0);
728 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
729 TEST_LPM_ASSERT(status == 0);
731 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
732 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
736 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
737 TEST_LPM_ASSERT(status == 0);
739 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
740 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
742 status = rte_lpm_delete(lpm, ip, depth);
743 TEST_LPM_ASSERT(status == 0);
745 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
746 TEST_LPM_ASSERT(status == -ENOENT);
748 rte_lpm_delete_all(lpm);
750 /* Delete a rule that is not present in the TBL24 & lookup */
752 ip = IPv4(128, 0, 0, 0);
755 status = rte_lpm_delete(lpm, ip, depth);
756 TEST_LPM_ASSERT(status < 0);
758 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
759 TEST_LPM_ASSERT(status == -ENOENT);
761 rte_lpm_delete_all(lpm);
763 /* Delete a rule that is not present in the TBL8 & lookup */
765 ip = IPv4(128, 0, 0, 0);
768 status = rte_lpm_delete(lpm, ip, depth);
769 TEST_LPM_ASSERT(status < 0);
771 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
772 TEST_LPM_ASSERT(status == -ENOENT);
780 * Add two rules, lookup to hit the more specific one, lookup to hit the less
781 * specific one delete the less specific rule and lookup previous values again;
782 * add a more specific rule than the existing rule, lookup again
789 struct rte_lpm *lpm = NULL;
791 uint8_t depth, next_hop_add, next_hop_return;
794 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
795 TEST_LPM_ASSERT(lpm != NULL);
797 ip = IPv4(128, 0, 0, 0);
801 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
802 TEST_LPM_ASSERT(status == 0);
804 ip = IPv4(128, 0, 0, 10);
808 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
809 TEST_LPM_ASSERT(status == 0);
811 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
812 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
814 ip = IPv4(128, 0, 0, 0);
817 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
818 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
820 ip = IPv4(128, 0, 0, 0);
823 status = rte_lpm_delete(lpm, ip, depth);
824 TEST_LPM_ASSERT(status == 0);
826 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
827 TEST_LPM_ASSERT(status == -ENOENT);
829 ip = IPv4(128, 0, 0, 10);
832 status = rte_lpm_delete(lpm, ip, depth);
833 TEST_LPM_ASSERT(status == 0);
835 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
836 TEST_LPM_ASSERT(status == -ENOENT);
844 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
845 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
855 struct rte_lpm *lpm = NULL;
857 uint8_t depth, next_hop_add, next_hop_return;
860 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
861 TEST_LPM_ASSERT(lpm != NULL);
863 ip = IPv4(128, 0, 0, 0);
867 for (i = 0; i < 1000; i++) {
868 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
869 TEST_LPM_ASSERT(status == 0);
871 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
872 TEST_LPM_ASSERT((status == 0) &&
873 (next_hop_return == next_hop_add));
875 ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
876 rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
877 TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
878 TEST_LPM_ASSERT(hop[1] == next_hop_add);
879 TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
880 TEST_LPM_ASSERT(hop[3] == next_hop_add);
882 status = rte_lpm_delete(lpm, ip, depth);
883 TEST_LPM_ASSERT(status == 0);
885 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
886 TEST_LPM_ASSERT(status == -ENOENT);
895 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
896 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
897 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
898 * extension and contraction.
905 struct rte_lpm *lpm = NULL;
907 uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
910 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
911 TEST_LPM_ASSERT(lpm != NULL);
913 ip = IPv4(128, 0, 0, 0);
915 next_hop_add_1 = 100;
917 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
918 TEST_LPM_ASSERT(status == 0);
920 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
921 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
924 next_hop_add_2 = 101;
926 for (i = 0; i < 1000; i++) {
927 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
928 TEST_LPM_ASSERT(status == 0);
930 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
931 TEST_LPM_ASSERT((status == 0) &&
932 (next_hop_return == next_hop_add_2));
934 status = rte_lpm_delete(lpm, ip, depth);
935 TEST_LPM_ASSERT(status == 0);
937 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
938 TEST_LPM_ASSERT((status == 0) &&
939 (next_hop_return == next_hop_add_1));
944 status = rte_lpm_delete(lpm, ip, depth);
945 TEST_LPM_ASSERT(status == 0);
947 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
948 TEST_LPM_ASSERT(status == -ENOENT);
956 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
957 * No more tbl8 extensions will be allowed. Now add one more rule that required
958 * a tbl8 extension and get fail.
964 /* We only use depth = 32 in the loop below so we must make sure
965 * that we have enough storage for all rules at that depth*/
967 struct rte_lpm *lpm = NULL;
969 uint8_t depth, next_hop_add, next_hop_return;
972 /* Add enough space for 256 rules for every depth */
973 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
974 TEST_LPM_ASSERT(lpm != NULL);
978 ip = IPv4(0, 0, 0, 0);
980 /* Add 256 rules that require a tbl8 extension */
981 for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
982 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
983 TEST_LPM_ASSERT(status == 0);
985 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
986 TEST_LPM_ASSERT((status == 0) &&
987 (next_hop_return == next_hop_add));
990 /* All tbl8 extensions have been used above. Try to add one more and
992 ip = IPv4(1, 0, 0, 0);
995 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
996 TEST_LPM_ASSERT(status < 0);
1004 * Sequence of operations for find existing lpm table
1007 * - find existing table: hit
1008 * - find non-existing table: miss
1014 struct rte_lpm *lpm = NULL, *result = NULL;
1017 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
1018 TEST_LPM_ASSERT(lpm != NULL);
1020 /* Try to find existing lpm */
1021 result = rte_lpm_find_existing("lpm_find_existing");
1022 TEST_LPM_ASSERT(result == lpm);
1024 /* Try to find non-existing lpm */
1025 result = rte_lpm_find_existing("lpm_find_non_existing");
1026 TEST_LPM_ASSERT(result == NULL);
1029 rte_lpm_delete_all(lpm);
1036 * test failure condition of overloading the tbl8 so no more will fit
1037 * Check we get an error return value in that case
1043 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
1046 /* ip loops through all possibilities for top 24 bits of address */
1047 for (ip = 0; ip < 0xFFFFFF; ip++){
1048 /* add an entry within a different tbl8 each time, since
1049 * depth >24 and the top 24 bits are different */
1050 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1054 if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
1055 printf("Error, unexpected failure with filling tbl8 groups\n");
1056 printf("Failed after %u additions, expected after %u\n",
1057 (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
1065 * Test for overwriting of tbl8:
1066 * - add rule /32 and lookup
1067 * - add new rule /24 and lookup
1068 * - add third rule /25 and lookup
1069 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1074 struct rte_lpm *lpm = NULL;
1075 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1076 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1077 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1078 const uint8_t d_ip_10_32 = 32,
1081 const uint8_t next_hop_ip_10_32 = 100,
1082 next_hop_ip_10_24 = 105,
1083 next_hop_ip_20_25 = 111;
1084 uint8_t next_hop_return = 0;
1087 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
1088 TEST_LPM_ASSERT(lpm != NULL);
1090 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1091 next_hop_ip_10_32)) < 0)
1094 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1095 uint8_t test_hop_10_32 = next_hop_return;
1096 TEST_LPM_ASSERT(status == 0);
1097 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1099 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1100 next_hop_ip_10_24)) < 0)
1103 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1104 uint8_t test_hop_10_24 = next_hop_return;
1105 TEST_LPM_ASSERT(status == 0);
1106 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1108 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1109 next_hop_ip_20_25)) < 0)
1112 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1113 uint8_t test_hop_20_25 = next_hop_return;
1114 TEST_LPM_ASSERT(status == 0);
1115 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1117 if (test_hop_10_32 == test_hop_10_24) {
1118 printf("Next hop return equal\n");
1122 if (test_hop_10_24 == test_hop_20_25){
1123 printf("Next hop return equal\n");
1127 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1128 TEST_LPM_ASSERT(status == 0);
1129 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1131 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1132 TEST_LPM_ASSERT(status == 0);
1133 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1141 * Lookup performance test
1144 #define ITERATIONS (1 << 10)
1145 #define BATCH_SIZE (1 << 12)
1146 #define BULK_SIZE 32
1149 print_route_distribution(const struct route_rule *table, uint32_t n)
1153 printf("Route distribution per prefix width: \n");
1154 printf("DEPTH QUANTITY (PERCENT)\n");
1155 printf("--------------------------- \n");
1158 for(i = 1; i <= 32; i++) {
1159 unsigned depth_counter = 0;
1160 double percent_hits;
1162 for (j = 0; j < n; j++)
1163 if (table[j].depth == (uint8_t) i)
1166 percent_hits = ((double)depth_counter)/((double)n) * 100;
1167 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1175 struct rte_lpm *lpm = NULL;
1176 uint64_t begin, total_time, lpm_used_entries = 0;
1178 uint8_t next_hop_add = 0xAA, next_hop_return = 0;
1180 uint64_t cache_line_counter = 0;
1183 rte_srand(rte_rdtsc());
1185 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1187 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1189 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
1190 TEST_LPM_ASSERT(lpm != NULL);
1193 begin = rte_rdtsc();
1195 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1196 if (rte_lpm_add(lpm, large_route_table[i].ip,
1197 large_route_table[i].depth, next_hop_add) == 0)
1201 total_time = rte_rdtsc() - begin;
1203 printf("Unique added entries = %d\n", status);
1204 /* Obtain add statistics. */
1205 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1206 if (lpm->tbl24[i].valid)
1210 if ((uint64_t)count < lpm_used_entries) {
1211 cache_line_counter++;
1212 count = lpm_used_entries;
1217 printf("Used table 24 entries = %u (%g%%)\n",
1218 (unsigned) lpm_used_entries,
1219 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1220 printf("64 byte Cache entries used = %u (%u bytes)\n",
1221 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1223 printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
1225 /* Measure single Lookup */
1229 for (i = 0; i < ITERATIONS; i ++) {
1230 static uint32_t ip_batch[BATCH_SIZE];
1232 for (j = 0; j < BATCH_SIZE; j ++)
1233 ip_batch[j] = rte_rand();
1235 /* Lookup per batch */
1236 begin = rte_rdtsc();
1238 for (j = 0; j < BATCH_SIZE; j ++) {
1239 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1243 total_time += rte_rdtsc() - begin;
1246 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1247 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1248 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1250 /* Measure bulk Lookup */
1253 for (i = 0; i < ITERATIONS; i ++) {
1254 static uint32_t ip_batch[BATCH_SIZE];
1255 uint16_t next_hops[BULK_SIZE];
1257 /* Create array of random IP addresses */
1258 for (j = 0; j < BATCH_SIZE; j ++)
1259 ip_batch[j] = rte_rand();
1261 /* Lookup per batch */
1262 begin = rte_rdtsc();
1263 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1265 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1266 for (k = 0; k < BULK_SIZE; k++)
1267 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1271 total_time += rte_rdtsc() - begin;
1273 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1274 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1275 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1277 /* Measure LookupX4 */
1280 for (i = 0; i < ITERATIONS; i++) {
1281 static uint32_t ip_batch[BATCH_SIZE];
1282 uint16_t next_hops[4];
1284 /* Create array of random IP addresses */
1285 for (j = 0; j < BATCH_SIZE; j++)
1286 ip_batch[j] = rte_rand();
1288 /* Lookup per batch */
1289 begin = rte_rdtsc();
1290 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
1294 ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
1295 ipx4 = *(__m128i *)(ip_batch + j);
1296 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
1297 for (k = 0; k < RTE_DIM(next_hops); k++)
1298 if (unlikely(next_hops[k] == UINT16_MAX))
1302 total_time += rte_rdtsc() - begin;
1304 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
1305 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1306 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1310 begin = rte_rdtsc();
1312 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1313 /* rte_lpm_delete(lpm, ip, depth) */
1314 status += rte_lpm_delete(lpm, large_route_table[i].ip,
1315 large_route_table[i].depth);
1318 total_time += rte_rdtsc() - begin;
1320 printf("Average LPM Delete: %g cycles\n",
1321 (double)total_time / NUM_ROUTE_ENTRIES);
1323 rte_lpm_delete_all(lpm);
1330 * Do all unit and performance tests.
1337 int status, global_status = 0;
1339 for (i = 0; i < NUM_LPM_TESTS; i++) {
1340 status = tests[i]();
1342 printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1343 global_status = status;
1347 return global_status;
1350 static struct test_command lpm_cmd = {
1351 .command = "lpm_autotest",
1352 .callback = test_lpm,
1354 REGISTER_TEST_COMMAND(lpm_cmd);