4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_cycles.h>
42 #include <rte_memory.h>
43 #include <rte_random.h>
44 #include <rte_branch_prediction.h>
51 #include "test_lpm_routes.h"
53 #define TEST_LPM_ASSERT(cond) do { \
55 printf("Error at line %d: \n", __LINE__); \
60 typedef int32_t (*rte_lpm_test)(void);
62 static int32_t test0(void);
63 static int32_t test1(void);
64 static int32_t test2(void);
65 static int32_t test3(void);
66 static int32_t test4(void);
67 static int32_t test5(void);
68 static int32_t test6(void);
69 static int32_t test7(void);
70 static int32_t test8(void);
71 static int32_t test9(void);
72 static int32_t test10(void);
73 static int32_t test11(void);
74 static int32_t test12(void);
75 static int32_t test13(void);
76 static int32_t test14(void);
77 static int32_t test15(void);
78 static int32_t test16(void);
79 static int32_t test17(void);
80 static int32_t perf_test(void);
82 rte_lpm_test tests[] = {
105 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
107 #define MAX_RULES 256
108 #define NUMBER_TBL8S 256
112 * Check that rte_lpm_create fails gracefully for incorrect user input
118 struct rte_lpm *lpm = NULL;
119 struct rte_lpm_config config;
121 config.max_rules = MAX_RULES;
122 config.number_tbl8s = NUMBER_TBL8S;
125 /* rte_lpm_create: lpm name == NULL */
126 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
127 TEST_LPM_ASSERT(lpm == NULL);
129 /* rte_lpm_create: max_rules = 0 */
130 /* Note: __func__ inserts the function name, in this case "test0". */
131 config.max_rules = 0;
132 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
133 TEST_LPM_ASSERT(lpm == NULL);
135 /* socket_id < -1 is invalid */
136 config.max_rules = MAX_RULES;
137 lpm = rte_lpm_create(__func__, -2, &config);
138 TEST_LPM_ASSERT(lpm == NULL);
144 * Create lpm table then delete lpm table 100 times
145 * Use a slightly different rules size each time
150 struct rte_lpm *lpm = NULL;
151 struct rte_lpm_config config;
153 config.number_tbl8s = NUMBER_TBL8S;
157 /* rte_lpm_free: Free NULL */
158 for (i = 0; i < 100; i++) {
159 config.max_rules = MAX_RULES - i;
160 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
161 TEST_LPM_ASSERT(lpm != NULL);
166 /* Can not test free so return success */
171 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
172 * therefore it is impossible to check for failure but this test is added to
173 * increase function coverage metrics and to validate that freeing null does
179 struct rte_lpm *lpm = NULL;
180 struct rte_lpm_config config;
182 config.max_rules = MAX_RULES;
183 config.number_tbl8s = NUMBER_TBL8S;
186 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
187 TEST_LPM_ASSERT(lpm != NULL);
195 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
200 struct rte_lpm *lpm = NULL;
201 struct rte_lpm_config config;
203 config.max_rules = MAX_RULES;
204 config.number_tbl8s = NUMBER_TBL8S;
206 uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
210 /* rte_lpm_add: lpm == NULL */
211 status = rte_lpm_add(NULL, ip, depth, next_hop);
212 TEST_LPM_ASSERT(status < 0);
214 /*Create vaild lpm to use in rest of test. */
215 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
216 TEST_LPM_ASSERT(lpm != NULL);
218 /* rte_lpm_add: depth < 1 */
219 status = rte_lpm_add(lpm, ip, 0, next_hop);
220 TEST_LPM_ASSERT(status < 0);
222 /* rte_lpm_add: depth > MAX_DEPTH */
223 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
224 TEST_LPM_ASSERT(status < 0);
232 * Check that rte_lpm_delete fails gracefully for incorrect user input
238 struct rte_lpm *lpm = NULL;
239 struct rte_lpm_config config;
241 config.max_rules = MAX_RULES;
242 config.number_tbl8s = NUMBER_TBL8S;
244 uint32_t ip = IPv4(0, 0, 0, 0);
248 /* rte_lpm_delete: lpm == NULL */
249 status = rte_lpm_delete(NULL, ip, depth);
250 TEST_LPM_ASSERT(status < 0);
252 /*Create vaild lpm to use in rest of test. */
253 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
254 TEST_LPM_ASSERT(lpm != NULL);
256 /* rte_lpm_delete: depth < 1 */
257 status = rte_lpm_delete(lpm, ip, 0);
258 TEST_LPM_ASSERT(status < 0);
260 /* rte_lpm_delete: depth > MAX_DEPTH */
261 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
262 TEST_LPM_ASSERT(status < 0);
270 * Check that rte_lpm_lookup fails gracefully for incorrect user input
276 #if defined(RTE_LIBRTE_LPM_DEBUG)
277 struct rte_lpm *lpm = NULL;
278 struct rte_lpm_config config;
280 config.max_rules = MAX_RULES;
281 config.number_tbl8s = NUMBER_TBL8S;
283 uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
286 /* rte_lpm_lookup: lpm == NULL */
287 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
288 TEST_LPM_ASSERT(status < 0);
290 /*Create vaild lpm to use in rest of test. */
291 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
292 TEST_LPM_ASSERT(lpm != NULL);
294 /* rte_lpm_lookup: depth < 1 */
295 status = rte_lpm_lookup(lpm, ip, NULL);
296 TEST_LPM_ASSERT(status < 0);
306 * Call add, lookup and delete for a single rule with depth <= 24
311 struct rte_lpm *lpm = NULL;
312 struct rte_lpm_config config;
314 config.max_rules = MAX_RULES;
315 config.number_tbl8s = NUMBER_TBL8S;
317 uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
321 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
322 TEST_LPM_ASSERT(lpm != NULL);
324 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
325 TEST_LPM_ASSERT(status == 0);
327 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
328 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
330 status = rte_lpm_delete(lpm, ip, depth);
331 TEST_LPM_ASSERT(status == 0);
333 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
334 TEST_LPM_ASSERT(status == -ENOENT);
342 * Call add, lookup and delete for a single rule with depth > 24
350 struct rte_lpm *lpm = NULL;
351 struct rte_lpm_config config;
353 config.max_rules = MAX_RULES;
354 config.number_tbl8s = NUMBER_TBL8S;
356 uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
360 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
361 TEST_LPM_ASSERT(lpm != NULL);
363 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
364 TEST_LPM_ASSERT(status == 0);
366 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
367 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
369 ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
370 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
371 TEST_LPM_ASSERT(hop[0] == next_hop_add);
372 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
373 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
374 TEST_LPM_ASSERT(hop[3] == next_hop_add);
376 status = rte_lpm_delete(lpm, ip, depth);
377 TEST_LPM_ASSERT(status == 0);
379 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
380 TEST_LPM_ASSERT(status == -ENOENT);
388 * Use rte_lpm_add to add rules which effect only the second half of the lpm
389 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
390 * depth. Check lookup hit for on every add and check for lookup miss on the
391 * first half of the lpm table after each add. Finally delete all rules going
392 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
393 * delete. The lookup should return the next_hop_add value related to the
394 * previous depth value (i.e. depth -1).
401 struct rte_lpm *lpm = NULL;
402 struct rte_lpm_config config;
404 config.max_rules = MAX_RULES;
405 config.number_tbl8s = NUMBER_TBL8S;
407 uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
408 uint32_t next_hop_add, next_hop_return;
412 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
413 TEST_LPM_ASSERT(lpm != NULL);
415 /* Loop with rte_lpm_add. */
416 for (depth = 1; depth <= 32; depth++) {
417 /* Let the next_hop_add value = depth. Just for change. */
418 next_hop_add = depth;
420 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
421 TEST_LPM_ASSERT(status == 0);
423 /* Check IP in first half of tbl24 which should be empty. */
424 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
425 TEST_LPM_ASSERT(status == -ENOENT);
427 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
428 TEST_LPM_ASSERT((status == 0) &&
429 (next_hop_return == next_hop_add));
431 ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
432 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
433 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
434 TEST_LPM_ASSERT(hop[1] == next_hop_add);
435 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
436 TEST_LPM_ASSERT(hop[3] == next_hop_add);
439 /* Loop with rte_lpm_delete. */
440 for (depth = 32; depth >= 1; depth--) {
441 next_hop_add = (uint8_t) (depth - 1);
443 status = rte_lpm_delete(lpm, ip2, depth);
444 TEST_LPM_ASSERT(status == 0);
446 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
449 TEST_LPM_ASSERT((status == 0) &&
450 (next_hop_return == next_hop_add));
452 TEST_LPM_ASSERT(status == -ENOENT);
455 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
456 TEST_LPM_ASSERT(status == -ENOENT);
458 ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
459 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
461 TEST_LPM_ASSERT(hop[0] == next_hop_add);
462 TEST_LPM_ASSERT(hop[1] == next_hop_add);
464 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
465 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
467 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
468 TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
477 * - Add & lookup to hit invalid TBL24 entry
478 * - Add & lookup to hit valid TBL24 entry not extended
479 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
480 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
486 struct rte_lpm *lpm = NULL;
487 struct rte_lpm_config config;
489 config.max_rules = MAX_RULES;
490 config.number_tbl8s = NUMBER_TBL8S;
492 uint32_t ip, ip_1, ip_2;
493 uint8_t depth, depth_1, depth_2;
494 uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
497 /* Add & lookup to hit invalid TBL24 entry */
498 ip = IPv4(128, 0, 0, 0);
502 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
503 TEST_LPM_ASSERT(lpm != NULL);
505 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
506 TEST_LPM_ASSERT(status == 0);
508 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
509 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
511 status = rte_lpm_delete(lpm, ip, depth);
512 TEST_LPM_ASSERT(status == 0);
514 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
515 TEST_LPM_ASSERT(status == -ENOENT);
517 rte_lpm_delete_all(lpm);
519 /* Add & lookup to hit valid TBL24 entry not extended */
520 ip = IPv4(128, 0, 0, 0);
524 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
525 TEST_LPM_ASSERT(status == 0);
527 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
528 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
533 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
534 TEST_LPM_ASSERT(status == 0);
536 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
537 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
541 status = rte_lpm_delete(lpm, ip, depth);
542 TEST_LPM_ASSERT(status == 0);
546 status = rte_lpm_delete(lpm, ip, depth);
547 TEST_LPM_ASSERT(status == 0);
549 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
550 TEST_LPM_ASSERT(status == -ENOENT);
552 rte_lpm_delete_all(lpm);
554 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
556 ip = IPv4(128, 0, 0, 0);
560 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
561 TEST_LPM_ASSERT(status == 0);
563 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
564 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
566 ip = IPv4(128, 0, 0, 5);
570 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
571 TEST_LPM_ASSERT(status == 0);
573 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
574 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
576 status = rte_lpm_delete(lpm, ip, depth);
577 TEST_LPM_ASSERT(status == 0);
579 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
580 TEST_LPM_ASSERT(status == -ENOENT);
582 ip = IPv4(128, 0, 0, 0);
586 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
587 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
589 status = rte_lpm_delete(lpm, ip, depth);
590 TEST_LPM_ASSERT(status == 0);
592 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
593 TEST_LPM_ASSERT(status == -ENOENT);
595 rte_lpm_delete_all(lpm);
597 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
599 ip_1 = IPv4(128, 0, 0, 0);
601 next_hop_add_1 = 101;
603 ip_2 = IPv4(128, 0, 0, 5);
605 next_hop_add_2 = 102;
609 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
610 TEST_LPM_ASSERT(status == 0);
612 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
613 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
615 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
616 TEST_LPM_ASSERT(status == 0);
618 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
619 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
621 status = rte_lpm_delete(lpm, ip_2, depth_2);
622 TEST_LPM_ASSERT(status == 0);
624 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
625 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
627 status = rte_lpm_delete(lpm, ip_1, depth_1);
628 TEST_LPM_ASSERT(status == 0);
630 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
631 TEST_LPM_ASSERT(status == -ENOENT);
640 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
642 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
643 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
645 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
646 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
647 * - Delete a rule that is not present in the TBL24 & lookup
648 * - Delete a rule that is not present in the TBL8 & lookup
655 struct rte_lpm *lpm = NULL;
656 struct rte_lpm_config config;
658 config.max_rules = MAX_RULES;
659 config.number_tbl8s = NUMBER_TBL8S;
661 uint32_t ip, next_hop_add, next_hop_return;
665 /* Add rule that covers a TBL24 range previously invalid & lookup
666 * (& delete & lookup) */
667 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
668 TEST_LPM_ASSERT(lpm != NULL);
670 ip = IPv4(128, 0, 0, 0);
674 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
675 TEST_LPM_ASSERT(status == 0);
677 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
678 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
680 status = rte_lpm_delete(lpm, ip, depth);
681 TEST_LPM_ASSERT(status == 0);
683 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
684 TEST_LPM_ASSERT(status == -ENOENT);
686 rte_lpm_delete_all(lpm);
688 ip = IPv4(128, 0, 0, 0);
692 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
693 TEST_LPM_ASSERT(status == 0);
695 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
696 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
698 status = rte_lpm_delete(lpm, ip, depth);
699 TEST_LPM_ASSERT(status == 0);
701 rte_lpm_delete_all(lpm);
703 /* Add rule that extends a TBL24 valid entry & lookup for both rules
704 * (& delete & lookup) */
706 ip = IPv4(128, 0, 0, 0);
710 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
711 TEST_LPM_ASSERT(status == 0);
713 ip = IPv4(128, 0, 0, 10);
717 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
718 TEST_LPM_ASSERT(status == 0);
720 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
721 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
723 ip = IPv4(128, 0, 0, 0);
726 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
727 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
729 ip = IPv4(128, 0, 0, 0);
732 status = rte_lpm_delete(lpm, ip, depth);
733 TEST_LPM_ASSERT(status == 0);
735 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
736 TEST_LPM_ASSERT(status == -ENOENT);
738 ip = IPv4(128, 0, 0, 10);
741 status = rte_lpm_delete(lpm, ip, depth);
742 TEST_LPM_ASSERT(status == 0);
744 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
745 TEST_LPM_ASSERT(status == -ENOENT);
747 rte_lpm_delete_all(lpm);
749 /* Add rule that updates the next hop in TBL24 & lookup
750 * (& delete & lookup) */
752 ip = IPv4(128, 0, 0, 0);
756 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
757 TEST_LPM_ASSERT(status == 0);
759 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
760 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
764 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
765 TEST_LPM_ASSERT(status == 0);
767 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
768 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
770 status = rte_lpm_delete(lpm, ip, depth);
771 TEST_LPM_ASSERT(status == 0);
773 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
774 TEST_LPM_ASSERT(status == -ENOENT);
776 rte_lpm_delete_all(lpm);
778 /* Add rule that updates the next hop in TBL8 & lookup
779 * (& delete & lookup) */
781 ip = IPv4(128, 0, 0, 0);
785 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
786 TEST_LPM_ASSERT(status == 0);
788 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
789 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
793 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
794 TEST_LPM_ASSERT(status == 0);
796 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
797 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
799 status = rte_lpm_delete(lpm, ip, depth);
800 TEST_LPM_ASSERT(status == 0);
802 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
803 TEST_LPM_ASSERT(status == -ENOENT);
805 rte_lpm_delete_all(lpm);
807 /* Delete a rule that is not present in the TBL24 & lookup */
809 ip = IPv4(128, 0, 0, 0);
812 status = rte_lpm_delete(lpm, ip, depth);
813 TEST_LPM_ASSERT(status < 0);
815 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
816 TEST_LPM_ASSERT(status == -ENOENT);
818 rte_lpm_delete_all(lpm);
820 /* Delete a rule that is not present in the TBL8 & lookup */
822 ip = IPv4(128, 0, 0, 0);
825 status = rte_lpm_delete(lpm, ip, depth);
826 TEST_LPM_ASSERT(status < 0);
828 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
829 TEST_LPM_ASSERT(status == -ENOENT);
837 * Add two rules, lookup to hit the more specific one, lookup to hit the less
838 * specific one delete the less specific rule and lookup previous values again;
839 * add a more specific rule than the existing rule, lookup again
846 struct rte_lpm *lpm = NULL;
847 struct rte_lpm_config config;
849 config.max_rules = MAX_RULES;
850 config.number_tbl8s = NUMBER_TBL8S;
852 uint32_t ip, next_hop_add, next_hop_return;
856 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
857 TEST_LPM_ASSERT(lpm != NULL);
859 ip = IPv4(128, 0, 0, 0);
863 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
864 TEST_LPM_ASSERT(status == 0);
866 ip = IPv4(128, 0, 0, 10);
870 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
871 TEST_LPM_ASSERT(status == 0);
873 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
874 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
876 ip = IPv4(128, 0, 0, 0);
879 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
880 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
882 ip = IPv4(128, 0, 0, 0);
885 status = rte_lpm_delete(lpm, ip, depth);
886 TEST_LPM_ASSERT(status == 0);
888 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
889 TEST_LPM_ASSERT(status == -ENOENT);
891 ip = IPv4(128, 0, 0, 10);
894 status = rte_lpm_delete(lpm, ip, depth);
895 TEST_LPM_ASSERT(status == 0);
897 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
898 TEST_LPM_ASSERT(status == -ENOENT);
906 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
907 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
917 struct rte_lpm *lpm = NULL;
918 struct rte_lpm_config config;
920 config.max_rules = MAX_RULES;
921 config.number_tbl8s = NUMBER_TBL8S;
923 uint32_t ip, i, next_hop_add, next_hop_return;
927 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
928 TEST_LPM_ASSERT(lpm != NULL);
930 ip = IPv4(128, 0, 0, 0);
934 for (i = 0; i < 1000; i++) {
935 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
936 TEST_LPM_ASSERT(status == 0);
938 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
939 TEST_LPM_ASSERT((status == 0) &&
940 (next_hop_return == next_hop_add));
942 ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
943 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
944 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
945 TEST_LPM_ASSERT(hop[1] == next_hop_add);
946 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
947 TEST_LPM_ASSERT(hop[3] == next_hop_add);
949 status = rte_lpm_delete(lpm, ip, depth);
950 TEST_LPM_ASSERT(status == 0);
952 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
953 TEST_LPM_ASSERT(status == -ENOENT);
962 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
963 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
964 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
965 * extension and contraction.
972 struct rte_lpm *lpm = NULL;
973 struct rte_lpm_config config;
975 config.max_rules = MAX_RULES;
976 config.number_tbl8s = NUMBER_TBL8S;
978 uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
982 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
983 TEST_LPM_ASSERT(lpm != NULL);
985 ip = IPv4(128, 0, 0, 0);
987 next_hop_add_1 = 100;
989 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
990 TEST_LPM_ASSERT(status == 0);
992 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
993 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
996 next_hop_add_2 = 101;
998 for (i = 0; i < 1000; i++) {
999 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
1000 TEST_LPM_ASSERT(status == 0);
1002 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1003 TEST_LPM_ASSERT((status == 0) &&
1004 (next_hop_return == next_hop_add_2));
1006 status = rte_lpm_delete(lpm, ip, depth);
1007 TEST_LPM_ASSERT(status == 0);
1009 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1010 TEST_LPM_ASSERT((status == 0) &&
1011 (next_hop_return == next_hop_add_1));
1016 status = rte_lpm_delete(lpm, ip, depth);
1017 TEST_LPM_ASSERT(status == 0);
1019 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1020 TEST_LPM_ASSERT(status == -ENOENT);
1028 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
1029 * No more tbl8 extensions will be allowed. Now add one more rule that required
1030 * a tbl8 extension and get fail.
1036 /* We only use depth = 32 in the loop below so we must make sure
1037 * that we have enough storage for all rules at that depth*/
1039 struct rte_lpm *lpm = NULL;
1040 struct rte_lpm_config config;
1042 config.max_rules = 256 * 32;
1043 config.number_tbl8s = NUMBER_TBL8S;
1045 uint32_t ip, next_hop_add, next_hop_return;
1049 /* Add enough space for 256 rules for every depth */
1050 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1051 TEST_LPM_ASSERT(lpm != NULL);
1055 ip = IPv4(0, 0, 0, 0);
1057 /* Add 256 rules that require a tbl8 extension */
1058 for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
1059 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
1060 TEST_LPM_ASSERT(status == 0);
1062 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1063 TEST_LPM_ASSERT((status == 0) &&
1064 (next_hop_return == next_hop_add));
1067 /* All tbl8 extensions have been used above. Try to add one more and
1069 ip = IPv4(1, 0, 0, 0);
1072 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
1073 TEST_LPM_ASSERT(status < 0);
1081 * Sequence of operations for find existing lpm table
1084 * - find existing table: hit
1085 * - find non-existing table: miss
1091 struct rte_lpm *lpm = NULL, *result = NULL;
1092 struct rte_lpm_config config;
1094 config.max_rules = 256 * 32;
1095 config.number_tbl8s = NUMBER_TBL8S;
1099 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
1100 TEST_LPM_ASSERT(lpm != NULL);
1102 /* Try to find existing lpm */
1103 result = rte_lpm_find_existing("lpm_find_existing");
1104 TEST_LPM_ASSERT(result == lpm);
1106 /* Try to find non-existing lpm */
1107 result = rte_lpm_find_existing("lpm_find_non_existing");
1108 TEST_LPM_ASSERT(result == NULL);
1111 rte_lpm_delete_all(lpm);
1118 * test failure condition of overloading the tbl8 so no more will fit
1119 * Check we get an error return value in that case
1125 struct rte_lpm_config config;
1127 config.max_rules = 256 * 32;
1128 config.number_tbl8s = NUMBER_TBL8S;
1130 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1132 /* ip loops through all possibilities for top 24 bits of address */
1133 for (ip = 0; ip < 0xFFFFFF; ip++) {
1134 /* add an entry within a different tbl8 each time, since
1135 * depth >24 and the top 24 bits are different */
1136 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1140 if (ip != NUMBER_TBL8S) {
1141 printf("Error, unexpected failure with filling tbl8 groups\n");
1142 printf("Failed after %u additions, expected after %u\n",
1143 (unsigned)ip, (unsigned)NUMBER_TBL8S);
1151 * Test for overwriting of tbl8:
1152 * - add rule /32 and lookup
1153 * - add new rule /24 and lookup
1154 * - add third rule /25 and lookup
1155 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1160 struct rte_lpm *lpm = NULL;
1161 struct rte_lpm_config config;
1163 config.max_rules = MAX_RULES;
1164 config.number_tbl8s = NUMBER_TBL8S;
1166 const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1167 const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1168 const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1169 const uint8_t d_ip_10_32 = 32,
1172 const uint32_t next_hop_ip_10_32 = 100,
1173 next_hop_ip_10_24 = 105,
1174 next_hop_ip_20_25 = 111;
1175 uint32_t next_hop_return = 0;
1178 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1179 TEST_LPM_ASSERT(lpm != NULL);
1181 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1182 next_hop_ip_10_32)) < 0)
1185 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1186 uint32_t test_hop_10_32 = next_hop_return;
1187 TEST_LPM_ASSERT(status == 0);
1188 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1190 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1191 next_hop_ip_10_24)) < 0)
1194 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1195 uint32_t test_hop_10_24 = next_hop_return;
1196 TEST_LPM_ASSERT(status == 0);
1197 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1199 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1200 next_hop_ip_20_25)) < 0)
1203 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1204 uint32_t test_hop_20_25 = next_hop_return;
1205 TEST_LPM_ASSERT(status == 0);
1206 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1208 if (test_hop_10_32 == test_hop_10_24) {
1209 printf("Next hop return equal\n");
1213 if (test_hop_10_24 == test_hop_20_25) {
1214 printf("Next hop return equal\n");
1218 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1219 TEST_LPM_ASSERT(status == 0);
1220 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1222 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1223 TEST_LPM_ASSERT(status == 0);
1224 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1232 * Lookup performance test
1235 #define ITERATIONS (1 << 10)
1236 #define BATCH_SIZE (1 << 12)
1237 #define BULK_SIZE 32
1240 print_route_distribution(const struct route_rule *table, uint32_t n)
1244 printf("Route distribution per prefix width: \n");
1245 printf("DEPTH QUANTITY (PERCENT)\n");
1246 printf("--------------------------- \n");
1249 for (i = 1; i <= 32; i++) {
1250 unsigned depth_counter = 0;
1251 double percent_hits;
1253 for (j = 0; j < n; j++)
1254 if (table[j].depth == (uint8_t) i)
1257 percent_hits = ((double)depth_counter)/((double)n) * 100;
1258 printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1266 struct rte_lpm *lpm = NULL;
1267 struct rte_lpm_config config;
1269 config.max_rules = 1000000;
1270 config.number_tbl8s = NUMBER_TBL8S;
1272 uint64_t begin, total_time, lpm_used_entries = 0;
1274 uint32_t next_hop_add = 0xAA, next_hop_return = 0;
1276 uint64_t cache_line_counter = 0;
1279 rte_srand(rte_rdtsc());
1281 printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1283 print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1285 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1286 TEST_LPM_ASSERT(lpm != NULL);
1289 begin = rte_rdtsc();
1291 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1292 if (rte_lpm_add(lpm, large_route_table[i].ip,
1293 large_route_table[i].depth, next_hop_add) == 0)
1297 total_time = rte_rdtsc() - begin;
1299 printf("Unique added entries = %d\n", status);
1300 /* Obtain add statistics. */
1301 for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1302 if (lpm->tbl24[i].valid)
1306 if ((uint64_t)count < lpm_used_entries) {
1307 cache_line_counter++;
1308 count = lpm_used_entries;
1313 printf("Used table 24 entries = %u (%g%%)\n",
1314 (unsigned) lpm_used_entries,
1315 (lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1316 printf("64 byte Cache entries used = %u (%u bytes)\n",
1317 (unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1319 printf("Average LPM Add: %g cycles\n",
1320 (double)total_time / NUM_ROUTE_ENTRIES);
1322 /* Measure single Lookup */
1326 for (i = 0; i < ITERATIONS; i++) {
1327 static uint32_t ip_batch[BATCH_SIZE];
1329 for (j = 0; j < BATCH_SIZE; j++)
1330 ip_batch[j] = rte_rand();
1332 /* Lookup per batch */
1333 begin = rte_rdtsc();
1335 for (j = 0; j < BATCH_SIZE; j++) {
1336 if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1340 total_time += rte_rdtsc() - begin;
1343 printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1344 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1345 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1347 /* Measure bulk Lookup */
1350 for (i = 0; i < ITERATIONS; i++) {
1351 static uint32_t ip_batch[BATCH_SIZE];
1352 uint32_t next_hops[BULK_SIZE];
1354 /* Create array of random IP addresses */
1355 for (j = 0; j < BATCH_SIZE; j++)
1356 ip_batch[j] = rte_rand();
1358 /* Lookup per batch */
1359 begin = rte_rdtsc();
1360 for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1362 rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1363 for (k = 0; k < BULK_SIZE; k++)
1364 if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1368 total_time += rte_rdtsc() - begin;
1370 printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1371 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1372 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1374 /* Measure LookupX4 */
1377 for (i = 0; i < ITERATIONS; i++) {
1378 static uint32_t ip_batch[BATCH_SIZE];
1379 uint32_t next_hops[4];
1381 /* Create array of random IP addresses */
1382 for (j = 0; j < BATCH_SIZE; j++)
1383 ip_batch[j] = rte_rand();
1385 /* Lookup per batch */
1386 begin = rte_rdtsc();
1387 for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
1391 ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
1392 ipx4 = *(__m128i *)(ip_batch + j);
1393 rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
1394 for (k = 0; k < RTE_DIM(next_hops); k++)
1395 if (unlikely(next_hops[k] == UINT32_MAX))
1399 total_time += rte_rdtsc() - begin;
1401 printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
1402 (double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1403 (count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1407 begin = rte_rdtsc();
1409 for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1410 /* rte_lpm_delete(lpm, ip, depth) */
1411 status += rte_lpm_delete(lpm, large_route_table[i].ip,
1412 large_route_table[i].depth);
1415 total_time += rte_rdtsc() - begin;
1417 printf("Average LPM Delete: %g cycles\n",
1418 (double)total_time / NUM_ROUTE_ENTRIES);
1420 rte_lpm_delete_all(lpm);
1427 * Do all unit and performance tests.
1434 int status, global_status = 0;
1436 for (i = 0; i < NUM_LPM_TESTS; i++) {
1437 status = tests[i]();
1439 printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1440 global_status = status;
1444 return global_status;
1447 static struct test_command lpm_cmd = {
1448 .command = "lpm_autotest",
1449 .callback = test_lpm,
1451 REGISTER_TEST_COMMAND(lpm_cmd);