1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include "test_xmmt_ops.h"
15 #define TEST_LPM_ASSERT(cond) do { \
17 printf("Error at line %d: \n", __LINE__); \
22 typedef int32_t (*rte_lpm_test)(void);
24 static int32_t test0(void);
25 static int32_t test1(void);
26 static int32_t test2(void);
27 static int32_t test3(void);
28 static int32_t test4(void);
29 static int32_t test5(void);
30 static int32_t test6(void);
31 static int32_t test7(void);
32 static int32_t test8(void);
33 static int32_t test9(void);
34 static int32_t test10(void);
35 static int32_t test11(void);
36 static int32_t test12(void);
37 static int32_t test13(void);
38 static int32_t test14(void);
39 static int32_t test15(void);
40 static int32_t test16(void);
41 static int32_t test17(void);
42 static int32_t test18(void);
44 rte_lpm_test tests[] = {
69 #define NUMBER_TBL8S 256
73 * Check that rte_lpm_create fails gracefully for incorrect user input
79 struct rte_lpm *lpm = NULL;
80 struct rte_lpm_config config;
82 config.max_rules = MAX_RULES;
83 config.number_tbl8s = NUMBER_TBL8S;
86 /* rte_lpm_create: lpm name == NULL */
87 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
88 TEST_LPM_ASSERT(lpm == NULL);
90 /* rte_lpm_create: max_rules = 0 */
91 /* Note: __func__ inserts the function name, in this case "test0". */
93 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
94 TEST_LPM_ASSERT(lpm == NULL);
96 /* socket_id < -1 is invalid */
97 config.max_rules = MAX_RULES;
98 lpm = rte_lpm_create(__func__, -2, &config);
99 TEST_LPM_ASSERT(lpm == NULL);
105 * Create lpm table then delete lpm table 100 times
106 * Use a slightly different rules size each time
111 struct rte_lpm *lpm = NULL;
112 struct rte_lpm_config config;
114 config.number_tbl8s = NUMBER_TBL8S;
118 /* rte_lpm_free: Free NULL */
119 for (i = 0; i < 100; i++) {
120 config.max_rules = MAX_RULES - i;
121 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
122 TEST_LPM_ASSERT(lpm != NULL);
127 /* Can not test free so return success */
132 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
133 * therefore it is impossible to check for failure but this test is added to
134 * increase function coverage metrics and to validate that freeing null does
140 struct rte_lpm *lpm = NULL;
141 struct rte_lpm_config config;
143 config.max_rules = MAX_RULES;
144 config.number_tbl8s = NUMBER_TBL8S;
147 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
148 TEST_LPM_ASSERT(lpm != NULL);
156 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
161 struct rte_lpm *lpm = NULL;
162 struct rte_lpm_config config;
164 config.max_rules = MAX_RULES;
165 config.number_tbl8s = NUMBER_TBL8S;
167 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop = 100;
171 /* rte_lpm_add: lpm == NULL */
172 status = rte_lpm_add(NULL, ip, depth, next_hop);
173 TEST_LPM_ASSERT(status < 0);
175 /*Create vaild lpm to use in rest of test. */
176 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
177 TEST_LPM_ASSERT(lpm != NULL);
179 /* rte_lpm_add: depth < 1 */
180 status = rte_lpm_add(lpm, ip, 0, next_hop);
181 TEST_LPM_ASSERT(status < 0);
183 /* rte_lpm_add: depth > MAX_DEPTH */
184 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
185 TEST_LPM_ASSERT(status < 0);
193 * Check that rte_lpm_delete fails gracefully for incorrect user input
199 struct rte_lpm *lpm = NULL;
200 struct rte_lpm_config config;
202 config.max_rules = MAX_RULES;
203 config.number_tbl8s = NUMBER_TBL8S;
205 uint32_t ip = RTE_IPV4(0, 0, 0, 0);
209 /* rte_lpm_delete: lpm == NULL */
210 status = rte_lpm_delete(NULL, ip, depth);
211 TEST_LPM_ASSERT(status < 0);
213 /*Create vaild lpm to use in rest of test. */
214 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
215 TEST_LPM_ASSERT(lpm != NULL);
217 /* rte_lpm_delete: depth < 1 */
218 status = rte_lpm_delete(lpm, ip, 0);
219 TEST_LPM_ASSERT(status < 0);
221 /* rte_lpm_delete: depth > MAX_DEPTH */
222 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
223 TEST_LPM_ASSERT(status < 0);
231 * Check that rte_lpm_lookup fails gracefully for incorrect user input
237 #if defined(RTE_LIBRTE_LPM_DEBUG)
238 struct rte_lpm *lpm = NULL;
239 struct rte_lpm_config config;
241 config.max_rules = MAX_RULES;
242 config.number_tbl8s = NUMBER_TBL8S;
244 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_return = 0;
247 /* rte_lpm_lookup: lpm == NULL */
248 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
249 TEST_LPM_ASSERT(status < 0);
251 /*Create vaild lpm to use in rest of test. */
252 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
253 TEST_LPM_ASSERT(lpm != NULL);
255 /* rte_lpm_lookup: depth < 1 */
256 status = rte_lpm_lookup(lpm, ip, NULL);
257 TEST_LPM_ASSERT(status < 0);
267 * Call add, lookup and delete for a single rule with depth <= 24
272 struct rte_lpm *lpm = NULL;
273 struct rte_lpm_config config;
275 config.max_rules = MAX_RULES;
276 config.number_tbl8s = NUMBER_TBL8S;
278 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
282 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
283 TEST_LPM_ASSERT(lpm != NULL);
285 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
286 TEST_LPM_ASSERT(status == 0);
288 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
289 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
291 status = rte_lpm_delete(lpm, ip, depth);
292 TEST_LPM_ASSERT(status == 0);
294 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
295 TEST_LPM_ASSERT(status == -ENOENT);
303 * Call add, lookup and delete for a single rule with depth > 24
311 struct rte_lpm *lpm = NULL;
312 struct rte_lpm_config config;
314 config.max_rules = MAX_RULES;
315 config.number_tbl8s = NUMBER_TBL8S;
317 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
321 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
322 TEST_LPM_ASSERT(lpm != NULL);
324 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
325 TEST_LPM_ASSERT(status == 0);
327 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
328 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
330 ipx4 = vect_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
331 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
332 TEST_LPM_ASSERT(hop[0] == next_hop_add);
333 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
334 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
335 TEST_LPM_ASSERT(hop[3] == next_hop_add);
337 status = rte_lpm_delete(lpm, ip, depth);
338 TEST_LPM_ASSERT(status == 0);
340 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
341 TEST_LPM_ASSERT(status == -ENOENT);
349 * Use rte_lpm_add to add rules which effect only the second half of the lpm
350 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
351 * depth. Check lookup hit for on every add and check for lookup miss on the
352 * first half of the lpm table after each add. Finally delete all rules going
353 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
354 * delete. The lookup should return the next_hop_add value related to the
355 * previous depth value (i.e. depth -1).
362 struct rte_lpm *lpm = NULL;
363 struct rte_lpm_config config;
365 config.max_rules = MAX_RULES;
366 config.number_tbl8s = NUMBER_TBL8S;
368 uint32_t ip1 = RTE_IPV4(127, 255, 255, 255), ip2 = RTE_IPV4(128, 0, 0, 0);
369 uint32_t next_hop_add, next_hop_return;
373 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
374 TEST_LPM_ASSERT(lpm != NULL);
376 /* Loop with rte_lpm_add. */
377 for (depth = 1; depth <= 32; depth++) {
378 /* Let the next_hop_add value = depth. Just for change. */
379 next_hop_add = depth;
381 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
382 TEST_LPM_ASSERT(status == 0);
384 /* Check IP in first half of tbl24 which should be empty. */
385 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
386 TEST_LPM_ASSERT(status == -ENOENT);
388 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
389 TEST_LPM_ASSERT((status == 0) &&
390 (next_hop_return == next_hop_add));
392 ipx4 = vect_set_epi32(ip2, ip1, ip2, ip1);
393 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
394 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
395 TEST_LPM_ASSERT(hop[1] == next_hop_add);
396 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
397 TEST_LPM_ASSERT(hop[3] == next_hop_add);
400 /* Loop with rte_lpm_delete. */
401 for (depth = 32; depth >= 1; depth--) {
402 next_hop_add = (uint8_t) (depth - 1);
404 status = rte_lpm_delete(lpm, ip2, depth);
405 TEST_LPM_ASSERT(status == 0);
407 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
410 TEST_LPM_ASSERT((status == 0) &&
411 (next_hop_return == next_hop_add));
413 TEST_LPM_ASSERT(status == -ENOENT);
416 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
417 TEST_LPM_ASSERT(status == -ENOENT);
419 ipx4 = vect_set_epi32(ip1, ip1, ip2, ip2);
420 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
422 TEST_LPM_ASSERT(hop[0] == next_hop_add);
423 TEST_LPM_ASSERT(hop[1] == next_hop_add);
425 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
426 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
428 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
429 TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
438 * - Add & lookup to hit invalid TBL24 entry
439 * - Add & lookup to hit valid TBL24 entry not extended
440 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
441 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
447 struct rte_lpm *lpm = NULL;
448 struct rte_lpm_config config;
450 config.max_rules = MAX_RULES;
451 config.number_tbl8s = NUMBER_TBL8S;
453 uint32_t ip, ip_1, ip_2;
454 uint8_t depth, depth_1, depth_2;
455 uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
458 /* Add & lookup to hit invalid TBL24 entry */
459 ip = RTE_IPV4(128, 0, 0, 0);
463 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
464 TEST_LPM_ASSERT(lpm != NULL);
466 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
467 TEST_LPM_ASSERT(status == 0);
469 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
470 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
472 status = rte_lpm_delete(lpm, ip, depth);
473 TEST_LPM_ASSERT(status == 0);
475 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
476 TEST_LPM_ASSERT(status == -ENOENT);
478 rte_lpm_delete_all(lpm);
480 /* Add & lookup to hit valid TBL24 entry not extended */
481 ip = RTE_IPV4(128, 0, 0, 0);
485 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
486 TEST_LPM_ASSERT(status == 0);
488 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
489 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
494 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
495 TEST_LPM_ASSERT(status == 0);
497 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
498 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
502 status = rte_lpm_delete(lpm, ip, depth);
503 TEST_LPM_ASSERT(status == 0);
507 status = rte_lpm_delete(lpm, ip, depth);
508 TEST_LPM_ASSERT(status == 0);
510 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
511 TEST_LPM_ASSERT(status == -ENOENT);
513 rte_lpm_delete_all(lpm);
515 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
517 ip = RTE_IPV4(128, 0, 0, 0);
521 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
522 TEST_LPM_ASSERT(status == 0);
524 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
525 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
527 ip = RTE_IPV4(128, 0, 0, 5);
531 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
532 TEST_LPM_ASSERT(status == 0);
534 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
535 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
537 status = rte_lpm_delete(lpm, ip, depth);
538 TEST_LPM_ASSERT(status == 0);
540 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
541 TEST_LPM_ASSERT(status == -ENOENT);
543 ip = RTE_IPV4(128, 0, 0, 0);
547 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
548 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
550 status = rte_lpm_delete(lpm, ip, depth);
551 TEST_LPM_ASSERT(status == 0);
553 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
554 TEST_LPM_ASSERT(status == -ENOENT);
556 rte_lpm_delete_all(lpm);
558 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
560 ip_1 = RTE_IPV4(128, 0, 0, 0);
562 next_hop_add_1 = 101;
564 ip_2 = RTE_IPV4(128, 0, 0, 5);
566 next_hop_add_2 = 102;
570 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
571 TEST_LPM_ASSERT(status == 0);
573 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
574 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
576 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
577 TEST_LPM_ASSERT(status == 0);
579 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
580 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
582 status = rte_lpm_delete(lpm, ip_2, depth_2);
583 TEST_LPM_ASSERT(status == 0);
585 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
586 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
588 status = rte_lpm_delete(lpm, ip_1, depth_1);
589 TEST_LPM_ASSERT(status == 0);
591 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
592 TEST_LPM_ASSERT(status == -ENOENT);
601 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
603 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
604 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
606 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
607 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
608 * - Delete a rule that is not present in the TBL24 & lookup
609 * - Delete a rule that is not present in the TBL8 & lookup
616 struct rte_lpm *lpm = NULL;
617 struct rte_lpm_config config;
619 config.max_rules = MAX_RULES;
620 config.number_tbl8s = NUMBER_TBL8S;
622 uint32_t ip, next_hop_add, next_hop_return;
626 /* Add rule that covers a TBL24 range previously invalid & lookup
627 * (& delete & lookup) */
628 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
629 TEST_LPM_ASSERT(lpm != NULL);
631 ip = RTE_IPV4(128, 0, 0, 0);
635 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
636 TEST_LPM_ASSERT(status == 0);
638 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
639 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
641 status = rte_lpm_delete(lpm, ip, depth);
642 TEST_LPM_ASSERT(status == 0);
644 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
645 TEST_LPM_ASSERT(status == -ENOENT);
647 rte_lpm_delete_all(lpm);
649 ip = RTE_IPV4(128, 0, 0, 0);
653 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
654 TEST_LPM_ASSERT(status == 0);
656 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
657 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
659 status = rte_lpm_delete(lpm, ip, depth);
660 TEST_LPM_ASSERT(status == 0);
662 rte_lpm_delete_all(lpm);
664 /* Add rule that extends a TBL24 valid entry & lookup for both rules
665 * (& delete & lookup) */
667 ip = RTE_IPV4(128, 0, 0, 0);
671 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
672 TEST_LPM_ASSERT(status == 0);
674 ip = RTE_IPV4(128, 0, 0, 10);
678 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
679 TEST_LPM_ASSERT(status == 0);
681 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
682 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
684 ip = RTE_IPV4(128, 0, 0, 0);
687 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
688 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
690 ip = RTE_IPV4(128, 0, 0, 0);
693 status = rte_lpm_delete(lpm, ip, depth);
694 TEST_LPM_ASSERT(status == 0);
696 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
697 TEST_LPM_ASSERT(status == -ENOENT);
699 ip = RTE_IPV4(128, 0, 0, 10);
702 status = rte_lpm_delete(lpm, ip, depth);
703 TEST_LPM_ASSERT(status == 0);
705 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
706 TEST_LPM_ASSERT(status == -ENOENT);
708 rte_lpm_delete_all(lpm);
710 /* Add rule that updates the next hop in TBL24 & lookup
711 * (& delete & lookup) */
713 ip = RTE_IPV4(128, 0, 0, 0);
717 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
718 TEST_LPM_ASSERT(status == 0);
720 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
721 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
725 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
726 TEST_LPM_ASSERT(status == 0);
728 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
729 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
731 status = rte_lpm_delete(lpm, ip, depth);
732 TEST_LPM_ASSERT(status == 0);
734 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
735 TEST_LPM_ASSERT(status == -ENOENT);
737 rte_lpm_delete_all(lpm);
739 /* Add rule that updates the next hop in TBL8 & lookup
740 * (& delete & lookup) */
742 ip = RTE_IPV4(128, 0, 0, 0);
746 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
747 TEST_LPM_ASSERT(status == 0);
749 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
750 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
754 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
755 TEST_LPM_ASSERT(status == 0);
757 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
758 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
760 status = rte_lpm_delete(lpm, ip, depth);
761 TEST_LPM_ASSERT(status == 0);
763 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
764 TEST_LPM_ASSERT(status == -ENOENT);
766 rte_lpm_delete_all(lpm);
768 /* Delete a rule that is not present in the TBL24 & lookup */
770 ip = RTE_IPV4(128, 0, 0, 0);
773 status = rte_lpm_delete(lpm, ip, depth);
774 TEST_LPM_ASSERT(status < 0);
776 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
777 TEST_LPM_ASSERT(status == -ENOENT);
779 rte_lpm_delete_all(lpm);
781 /* Delete a rule that is not present in the TBL8 & lookup */
783 ip = RTE_IPV4(128, 0, 0, 0);
786 status = rte_lpm_delete(lpm, ip, depth);
787 TEST_LPM_ASSERT(status < 0);
789 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
790 TEST_LPM_ASSERT(status == -ENOENT);
798 * Add two rules, lookup to hit the more specific one, lookup to hit the less
799 * specific one delete the less specific rule and lookup previous values again;
800 * add a more specific rule than the existing rule, lookup again
807 struct rte_lpm *lpm = NULL;
808 struct rte_lpm_config config;
810 config.max_rules = MAX_RULES;
811 config.number_tbl8s = NUMBER_TBL8S;
813 uint32_t ip, next_hop_add, next_hop_return;
817 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
818 TEST_LPM_ASSERT(lpm != NULL);
820 ip = RTE_IPV4(128, 0, 0, 0);
824 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
825 TEST_LPM_ASSERT(status == 0);
827 ip = RTE_IPV4(128, 0, 0, 10);
831 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
832 TEST_LPM_ASSERT(status == 0);
834 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
835 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
837 ip = RTE_IPV4(128, 0, 0, 0);
840 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
841 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
843 ip = RTE_IPV4(128, 0, 0, 0);
846 status = rte_lpm_delete(lpm, ip, depth);
847 TEST_LPM_ASSERT(status == 0);
849 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
850 TEST_LPM_ASSERT(status == -ENOENT);
852 ip = RTE_IPV4(128, 0, 0, 10);
855 status = rte_lpm_delete(lpm, ip, depth);
856 TEST_LPM_ASSERT(status == 0);
858 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
859 TEST_LPM_ASSERT(status == -ENOENT);
867 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
868 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
878 struct rte_lpm *lpm = NULL;
879 struct rte_lpm_config config;
881 config.max_rules = MAX_RULES;
882 config.number_tbl8s = NUMBER_TBL8S;
884 uint32_t ip, i, next_hop_add, next_hop_return;
888 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
889 TEST_LPM_ASSERT(lpm != NULL);
891 ip = RTE_IPV4(128, 0, 0, 0);
895 for (i = 0; i < 1000; i++) {
896 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
897 TEST_LPM_ASSERT(status == 0);
899 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
900 TEST_LPM_ASSERT((status == 0) &&
901 (next_hop_return == next_hop_add));
903 ipx4 = vect_set_epi32(ip, ip + 1, ip, ip - 1);
904 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
905 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
906 TEST_LPM_ASSERT(hop[1] == next_hop_add);
907 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
908 TEST_LPM_ASSERT(hop[3] == next_hop_add);
910 status = rte_lpm_delete(lpm, ip, depth);
911 TEST_LPM_ASSERT(status == 0);
913 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
914 TEST_LPM_ASSERT(status == -ENOENT);
923 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
924 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
925 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
926 * extension and contraction.
933 struct rte_lpm *lpm = NULL;
934 struct rte_lpm_config config;
936 config.max_rules = MAX_RULES;
937 config.number_tbl8s = NUMBER_TBL8S;
939 uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
943 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
944 TEST_LPM_ASSERT(lpm != NULL);
946 ip = RTE_IPV4(128, 0, 0, 0);
948 next_hop_add_1 = 100;
950 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
951 TEST_LPM_ASSERT(status == 0);
953 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
954 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
957 next_hop_add_2 = 101;
959 for (i = 0; i < 1000; i++) {
960 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
961 TEST_LPM_ASSERT(status == 0);
963 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
964 TEST_LPM_ASSERT((status == 0) &&
965 (next_hop_return == next_hop_add_2));
967 status = rte_lpm_delete(lpm, ip, depth);
968 TEST_LPM_ASSERT(status == 0);
970 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
971 TEST_LPM_ASSERT((status == 0) &&
972 (next_hop_return == next_hop_add_1));
977 status = rte_lpm_delete(lpm, ip, depth);
978 TEST_LPM_ASSERT(status == 0);
980 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
981 TEST_LPM_ASSERT(status == -ENOENT);
989 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
990 * No more tbl8 extensions will be allowed. Now add one more rule that required
991 * a tbl8 extension and get fail.
997 /* We only use depth = 32 in the loop below so we must make sure
998 * that we have enough storage for all rules at that depth*/
1000 struct rte_lpm *lpm = NULL;
1001 struct rte_lpm_config config;
1003 config.max_rules = 256 * 32;
1004 config.number_tbl8s = NUMBER_TBL8S;
1006 uint32_t ip, next_hop_add, next_hop_return;
1010 /* Add enough space for 256 rules for every depth */
1011 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1012 TEST_LPM_ASSERT(lpm != NULL);
1016 ip = RTE_IPV4(0, 0, 0, 0);
1018 /* Add 256 rules that require a tbl8 extension */
1019 for (; ip <= RTE_IPV4(0, 0, 255, 0); ip += 256) {
1020 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
1021 TEST_LPM_ASSERT(status == 0);
1023 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1024 TEST_LPM_ASSERT((status == 0) &&
1025 (next_hop_return == next_hop_add));
1028 /* All tbl8 extensions have been used above. Try to add one more and
1030 ip = RTE_IPV4(1, 0, 0, 0);
1033 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
1034 TEST_LPM_ASSERT(status < 0);
1042 * Sequence of operations for find existing lpm table
1045 * - find existing table: hit
1046 * - find non-existing table: miss
1052 struct rte_lpm *lpm = NULL, *result = NULL;
1053 struct rte_lpm_config config;
1055 config.max_rules = 256 * 32;
1056 config.number_tbl8s = NUMBER_TBL8S;
1060 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
1061 TEST_LPM_ASSERT(lpm != NULL);
1063 /* Try to find existing lpm */
1064 result = rte_lpm_find_existing("lpm_find_existing");
1065 TEST_LPM_ASSERT(result == lpm);
1067 /* Try to find non-existing lpm */
1068 result = rte_lpm_find_existing("lpm_find_non_existing");
1069 TEST_LPM_ASSERT(result == NULL);
1072 rte_lpm_delete_all(lpm);
1079 * test failure condition of overloading the tbl8 so no more will fit
1080 * Check we get an error return value in that case
1086 struct rte_lpm_config config;
1088 config.max_rules = 256 * 32;
1089 config.number_tbl8s = NUMBER_TBL8S;
1091 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1093 /* ip loops through all possibilities for top 24 bits of address */
1094 for (ip = 0; ip < 0xFFFFFF; ip++) {
1095 /* add an entry within a different tbl8 each time, since
1096 * depth >24 and the top 24 bits are different */
1097 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1101 if (ip != NUMBER_TBL8S) {
1102 printf("Error, unexpected failure with filling tbl8 groups\n");
1103 printf("Failed after %u additions, expected after %u\n",
1104 (unsigned)ip, (unsigned)NUMBER_TBL8S);
1112 * Test for overwriting of tbl8:
1113 * - add rule /32 and lookup
1114 * - add new rule /24 and lookup
1115 * - add third rule /25 and lookup
1116 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1121 struct rte_lpm *lpm = NULL;
1122 struct rte_lpm_config config;
1124 config.max_rules = MAX_RULES;
1125 config.number_tbl8s = NUMBER_TBL8S;
1127 const uint32_t ip_10_32 = RTE_IPV4(10, 10, 10, 2);
1128 const uint32_t ip_10_24 = RTE_IPV4(10, 10, 10, 0);
1129 const uint32_t ip_20_25 = RTE_IPV4(10, 10, 20, 2);
1130 const uint8_t d_ip_10_32 = 32,
1133 const uint32_t next_hop_ip_10_32 = 100,
1134 next_hop_ip_10_24 = 105,
1135 next_hop_ip_20_25 = 111;
1136 uint32_t next_hop_return = 0;
1139 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1140 TEST_LPM_ASSERT(lpm != NULL);
1142 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1143 next_hop_ip_10_32)) < 0)
1146 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1147 uint32_t test_hop_10_32 = next_hop_return;
1148 TEST_LPM_ASSERT(status == 0);
1149 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1151 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1152 next_hop_ip_10_24)) < 0)
1155 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1156 uint32_t test_hop_10_24 = next_hop_return;
1157 TEST_LPM_ASSERT(status == 0);
1158 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1160 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1161 next_hop_ip_20_25)) < 0)
1164 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1165 uint32_t test_hop_20_25 = next_hop_return;
1166 TEST_LPM_ASSERT(status == 0);
1167 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1169 if (test_hop_10_32 == test_hop_10_24) {
1170 printf("Next hop return equal\n");
1174 if (test_hop_10_24 == test_hop_20_25) {
1175 printf("Next hop return equal\n");
1179 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1180 TEST_LPM_ASSERT(status == 0);
1181 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1183 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1184 TEST_LPM_ASSERT(status == 0);
1185 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1193 * Test for recycle of tbl8
1194 * - step 1: add a rule with depth=28 (> 24)
1195 * - step 2: add a rule with same 24-bit prefix and depth=23 (< 24)
1196 * - step 3: delete the first rule
1197 * - step 4: check tbl8 is freed
1198 * - step 5: add a rule same as the first one (depth=28)
1199 * - step 6: check same tbl8 is allocated
1200 * - step 7: add a rule with same 24-bit prefix and depth=24
1201 * - step 8: delete the rule (depth=28) added in step 5
1202 * - step 9: check tbl8 is freed
1203 * - step 10: add a rule with same 24-bit prefix and depth = 28
1204 * - setp 11: check same tbl8 is allocated again
1209 #define group_idx next_hop
1210 struct rte_lpm *lpm = NULL;
1211 struct rte_lpm_config config;
1212 uint32_t ip, next_hop;
1214 uint32_t tbl8_group_index;
1216 config.max_rules = MAX_RULES;
1217 config.number_tbl8s = NUMBER_TBL8S;
1220 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1221 TEST_LPM_ASSERT(lpm != NULL);
1223 ip = RTE_IPV4(192, 168, 100, 100);
1226 rte_lpm_add(lpm, ip, depth, next_hop);
1228 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1229 tbl8_group_index = lpm->tbl24[ip>>8].group_idx;
1233 rte_lpm_add(lpm, ip, depth, next_hop);
1234 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1237 rte_lpm_delete(lpm, ip, depth);
1239 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
1242 rte_lpm_add(lpm, ip, depth, next_hop);
1244 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1245 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
1249 rte_lpm_add(lpm, ip, depth, next_hop);
1250 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1253 rte_lpm_delete(lpm, ip, depth);
1255 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
1258 rte_lpm_add(lpm, ip, depth, next_hop);
1260 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1261 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
1269 * Do all unit tests.
1276 int status, global_status = 0;
1278 for (i = 0; i < RTE_DIM(tests); i++) {
1279 status = tests[i]();
1281 printf("ERROR: LPM Test %u: FAIL\n", i);
1282 global_status = status;
1286 return global_status;
1289 REGISTER_TEST_COMMAND(lpm_autotest, test_lpm);