1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7 #ifdef RTE_EXEC_ENV_WINDOWS
11 printf("lpm not supported on Windows, skipping test\n");
23 #include <rte_malloc.h>
25 #include "test_xmmt_ops.h"
27 #define TEST_LPM_ASSERT(cond) do { \
29 printf("Error at line %d: \n", __LINE__); \
34 typedef int32_t (*rte_lpm_test)(void);
36 static int32_t test0(void);
37 static int32_t test1(void);
38 static int32_t test2(void);
39 static int32_t test3(void);
40 static int32_t test4(void);
41 static int32_t test5(void);
42 static int32_t test6(void);
43 static int32_t test7(void);
44 static int32_t test8(void);
45 static int32_t test9(void);
46 static int32_t test10(void);
47 static int32_t test11(void);
48 static int32_t test12(void);
49 static int32_t test13(void);
50 static int32_t test14(void);
51 static int32_t test15(void);
52 static int32_t test16(void);
53 static int32_t test17(void);
54 static int32_t test18(void);
55 static int32_t test19(void);
56 static int32_t test20(void);
57 static int32_t test21(void);
59 rte_lpm_test tests[] = {
87 #define NUMBER_TBL8S 256
91 * Check that rte_lpm_create fails gracefully for incorrect user input
97 struct rte_lpm *lpm = NULL;
98 struct rte_lpm_config config;
100 config.max_rules = MAX_RULES;
101 config.number_tbl8s = NUMBER_TBL8S;
104 /* rte_lpm_create: lpm name == NULL */
105 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
106 TEST_LPM_ASSERT(lpm == NULL);
108 /* rte_lpm_create: max_rules = 0 */
109 /* Note: __func__ inserts the function name, in this case "test0". */
110 config.max_rules = 0;
111 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
112 TEST_LPM_ASSERT(lpm == NULL);
114 /* socket_id < -1 is invalid */
115 config.max_rules = MAX_RULES;
116 lpm = rte_lpm_create(__func__, -2, &config);
117 TEST_LPM_ASSERT(lpm == NULL);
123 * Create lpm table then delete lpm table 100 times
124 * Use a slightly different rules size each time
129 struct rte_lpm *lpm = NULL;
130 struct rte_lpm_config config;
132 config.number_tbl8s = NUMBER_TBL8S;
136 /* rte_lpm_free: Free NULL */
137 for (i = 0; i < 100; i++) {
138 config.max_rules = MAX_RULES - i;
139 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
140 TEST_LPM_ASSERT(lpm != NULL);
145 /* Can not test free so return success */
150 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
151 * therefore it is impossible to check for failure but this test is added to
152 * increase function coverage metrics and to validate that freeing null does
158 struct rte_lpm *lpm = NULL;
159 struct rte_lpm_config config;
161 config.max_rules = MAX_RULES;
162 config.number_tbl8s = NUMBER_TBL8S;
165 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
166 TEST_LPM_ASSERT(lpm != NULL);
174 * Check that rte_lpm_add fails gracefully for incorrect user input arguments
179 struct rte_lpm *lpm = NULL;
180 struct rte_lpm_config config;
182 config.max_rules = MAX_RULES;
183 config.number_tbl8s = NUMBER_TBL8S;
185 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop = 100;
189 /* rte_lpm_add: lpm == NULL */
190 status = rte_lpm_add(NULL, ip, depth, next_hop);
191 TEST_LPM_ASSERT(status < 0);
193 /*Create valid lpm to use in rest of test. */
194 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
195 TEST_LPM_ASSERT(lpm != NULL);
197 /* rte_lpm_add: depth < 1 */
198 status = rte_lpm_add(lpm, ip, 0, next_hop);
199 TEST_LPM_ASSERT(status < 0);
201 /* rte_lpm_add: depth > MAX_DEPTH */
202 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
203 TEST_LPM_ASSERT(status < 0);
211 * Check that rte_lpm_delete fails gracefully for incorrect user input
217 struct rte_lpm *lpm = NULL;
218 struct rte_lpm_config config;
220 config.max_rules = MAX_RULES;
221 config.number_tbl8s = NUMBER_TBL8S;
223 uint32_t ip = RTE_IPV4(0, 0, 0, 0);
227 /* rte_lpm_delete: lpm == NULL */
228 status = rte_lpm_delete(NULL, ip, depth);
229 TEST_LPM_ASSERT(status < 0);
231 /*Create valid lpm to use in rest of test. */
232 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
233 TEST_LPM_ASSERT(lpm != NULL);
235 /* rte_lpm_delete: depth < 1 */
236 status = rte_lpm_delete(lpm, ip, 0);
237 TEST_LPM_ASSERT(status < 0);
239 /* rte_lpm_delete: depth > MAX_DEPTH */
240 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
241 TEST_LPM_ASSERT(status < 0);
249 * Check that rte_lpm_lookup fails gracefully for incorrect user input
255 #if defined(RTE_LIBRTE_LPM_DEBUG)
256 struct rte_lpm *lpm = NULL;
257 struct rte_lpm_config config;
259 config.max_rules = MAX_RULES;
260 config.number_tbl8s = NUMBER_TBL8S;
262 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_return = 0;
265 /* rte_lpm_lookup: lpm == NULL */
266 status = rte_lpm_lookup(NULL, ip, &next_hop_return);
267 TEST_LPM_ASSERT(status < 0);
269 /*Create valid lpm to use in rest of test. */
270 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
271 TEST_LPM_ASSERT(lpm != NULL);
273 /* rte_lpm_lookup: depth < 1 */
274 status = rte_lpm_lookup(lpm, ip, NULL);
275 TEST_LPM_ASSERT(status < 0);
285 * Call add, lookup and delete for a single rule with depth <= 24
290 struct rte_lpm *lpm = NULL;
291 struct rte_lpm_config config;
293 config.max_rules = MAX_RULES;
294 config.number_tbl8s = NUMBER_TBL8S;
296 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
300 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
301 TEST_LPM_ASSERT(lpm != NULL);
303 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
304 TEST_LPM_ASSERT(status == 0);
306 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
307 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
309 status = rte_lpm_delete(lpm, ip, depth);
310 TEST_LPM_ASSERT(status == 0);
312 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
313 TEST_LPM_ASSERT(status == -ENOENT);
321 * Call add, lookup and delete for a single rule with depth > 24
329 struct rte_lpm *lpm = NULL;
330 struct rte_lpm_config config;
332 config.max_rules = MAX_RULES;
333 config.number_tbl8s = NUMBER_TBL8S;
335 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
339 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
340 TEST_LPM_ASSERT(lpm != NULL);
342 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
343 TEST_LPM_ASSERT(status == 0);
345 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
346 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
348 ipx4 = vect_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
349 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
350 TEST_LPM_ASSERT(hop[0] == next_hop_add);
351 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
352 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
353 TEST_LPM_ASSERT(hop[3] == next_hop_add);
355 status = rte_lpm_delete(lpm, ip, depth);
356 TEST_LPM_ASSERT(status == 0);
358 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
359 TEST_LPM_ASSERT(status == -ENOENT);
367 * Use rte_lpm_add to add rules which effect only the second half of the lpm
368 * table. Use all possible depths ranging from 1..32. Set the next hop = to the
369 * depth. Check lookup hit for on every add and check for lookup miss on the
370 * first half of the lpm table after each add. Finally delete all rules going
371 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
372 * delete. The lookup should return the next_hop_add value related to the
373 * previous depth value (i.e. depth -1).
380 struct rte_lpm *lpm = NULL;
381 struct rte_lpm_config config;
383 config.max_rules = MAX_RULES;
384 config.number_tbl8s = NUMBER_TBL8S;
386 uint32_t ip1 = RTE_IPV4(127, 255, 255, 255), ip2 = RTE_IPV4(128, 0, 0, 0);
387 uint32_t next_hop_add, next_hop_return;
391 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
392 TEST_LPM_ASSERT(lpm != NULL);
394 /* Loop with rte_lpm_add. */
395 for (depth = 1; depth <= 32; depth++) {
396 /* Let the next_hop_add value = depth. Just for change. */
397 next_hop_add = depth;
399 status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
400 TEST_LPM_ASSERT(status == 0);
402 /* Check IP in first half of tbl24 which should be empty. */
403 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
404 TEST_LPM_ASSERT(status == -ENOENT);
406 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
407 TEST_LPM_ASSERT((status == 0) &&
408 (next_hop_return == next_hop_add));
410 ipx4 = vect_set_epi32(ip2, ip1, ip2, ip1);
411 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
412 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
413 TEST_LPM_ASSERT(hop[1] == next_hop_add);
414 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
415 TEST_LPM_ASSERT(hop[3] == next_hop_add);
418 /* Loop with rte_lpm_delete. */
419 for (depth = 32; depth >= 1; depth--) {
420 next_hop_add = (uint8_t) (depth - 1);
422 status = rte_lpm_delete(lpm, ip2, depth);
423 TEST_LPM_ASSERT(status == 0);
425 status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
428 TEST_LPM_ASSERT((status == 0) &&
429 (next_hop_return == next_hop_add));
431 TEST_LPM_ASSERT(status == -ENOENT);
434 status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
435 TEST_LPM_ASSERT(status == -ENOENT);
437 ipx4 = vect_set_epi32(ip1, ip1, ip2, ip2);
438 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
440 TEST_LPM_ASSERT(hop[0] == next_hop_add);
441 TEST_LPM_ASSERT(hop[1] == next_hop_add);
443 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
444 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
446 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
447 TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
456 * - Add & lookup to hit invalid TBL24 entry
457 * - Add & lookup to hit valid TBL24 entry not extended
458 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
459 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
465 struct rte_lpm *lpm = NULL;
466 struct rte_lpm_config config;
468 config.max_rules = MAX_RULES;
469 config.number_tbl8s = NUMBER_TBL8S;
471 uint32_t ip, ip_1, ip_2;
472 uint8_t depth, depth_1, depth_2;
473 uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
476 /* Add & lookup to hit invalid TBL24 entry */
477 ip = RTE_IPV4(128, 0, 0, 0);
481 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
482 TEST_LPM_ASSERT(lpm != NULL);
484 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
485 TEST_LPM_ASSERT(status == 0);
487 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
488 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
490 status = rte_lpm_delete(lpm, ip, depth);
491 TEST_LPM_ASSERT(status == 0);
493 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
494 TEST_LPM_ASSERT(status == -ENOENT);
496 rte_lpm_delete_all(lpm);
498 /* Add & lookup to hit valid TBL24 entry not extended */
499 ip = RTE_IPV4(128, 0, 0, 0);
503 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
504 TEST_LPM_ASSERT(status == 0);
506 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
507 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
512 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
513 TEST_LPM_ASSERT(status == 0);
515 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
516 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
520 status = rte_lpm_delete(lpm, ip, depth);
521 TEST_LPM_ASSERT(status == 0);
525 status = rte_lpm_delete(lpm, ip, depth);
526 TEST_LPM_ASSERT(status == 0);
528 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
529 TEST_LPM_ASSERT(status == -ENOENT);
531 rte_lpm_delete_all(lpm);
533 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
535 ip = RTE_IPV4(128, 0, 0, 0);
539 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
540 TEST_LPM_ASSERT(status == 0);
542 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
543 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
545 ip = RTE_IPV4(128, 0, 0, 5);
549 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
550 TEST_LPM_ASSERT(status == 0);
552 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
553 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
555 status = rte_lpm_delete(lpm, ip, depth);
556 TEST_LPM_ASSERT(status == 0);
558 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
559 TEST_LPM_ASSERT(status == -ENOENT);
561 ip = RTE_IPV4(128, 0, 0, 0);
565 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
566 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
568 status = rte_lpm_delete(lpm, ip, depth);
569 TEST_LPM_ASSERT(status == 0);
571 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
572 TEST_LPM_ASSERT(status == -ENOENT);
574 rte_lpm_delete_all(lpm);
576 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
578 ip_1 = RTE_IPV4(128, 0, 0, 0);
580 next_hop_add_1 = 101;
582 ip_2 = RTE_IPV4(128, 0, 0, 5);
584 next_hop_add_2 = 102;
588 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
589 TEST_LPM_ASSERT(status == 0);
591 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
592 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
594 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
595 TEST_LPM_ASSERT(status == 0);
597 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
598 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
600 status = rte_lpm_delete(lpm, ip_2, depth_2);
601 TEST_LPM_ASSERT(status == 0);
603 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
604 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
606 status = rte_lpm_delete(lpm, ip_1, depth_1);
607 TEST_LPM_ASSERT(status == 0);
609 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
610 TEST_LPM_ASSERT(status == -ENOENT);
619 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
621 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
622 * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
624 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
625 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
626 * - Delete a rule that is not present in the TBL24 & lookup
627 * - Delete a rule that is not present in the TBL8 & lookup
634 struct rte_lpm *lpm = NULL;
635 struct rte_lpm_config config;
637 config.max_rules = MAX_RULES;
638 config.number_tbl8s = NUMBER_TBL8S;
640 uint32_t ip, next_hop_add, next_hop_return;
644 /* Add rule that covers a TBL24 range previously invalid & lookup
645 * (& delete & lookup) */
646 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
647 TEST_LPM_ASSERT(lpm != NULL);
649 ip = RTE_IPV4(128, 0, 0, 0);
653 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
654 TEST_LPM_ASSERT(status == 0);
656 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
657 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
659 status = rte_lpm_delete(lpm, ip, depth);
660 TEST_LPM_ASSERT(status == 0);
662 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
663 TEST_LPM_ASSERT(status == -ENOENT);
665 rte_lpm_delete_all(lpm);
667 ip = RTE_IPV4(128, 0, 0, 0);
671 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
672 TEST_LPM_ASSERT(status == 0);
674 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
675 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
677 status = rte_lpm_delete(lpm, ip, depth);
678 TEST_LPM_ASSERT(status == 0);
680 rte_lpm_delete_all(lpm);
682 /* Add rule that extends a TBL24 valid entry & lookup for both rules
683 * (& delete & lookup) */
685 ip = RTE_IPV4(128, 0, 0, 0);
689 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
690 TEST_LPM_ASSERT(status == 0);
692 ip = RTE_IPV4(128, 0, 0, 10);
696 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
697 TEST_LPM_ASSERT(status == 0);
699 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
700 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
702 ip = RTE_IPV4(128, 0, 0, 0);
705 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
706 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
708 ip = RTE_IPV4(128, 0, 0, 0);
711 status = rte_lpm_delete(lpm, ip, depth);
712 TEST_LPM_ASSERT(status == 0);
714 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
715 TEST_LPM_ASSERT(status == -ENOENT);
717 ip = RTE_IPV4(128, 0, 0, 10);
720 status = rte_lpm_delete(lpm, ip, depth);
721 TEST_LPM_ASSERT(status == 0);
723 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
724 TEST_LPM_ASSERT(status == -ENOENT);
726 rte_lpm_delete_all(lpm);
728 /* Add rule that updates the next hop in TBL24 & lookup
729 * (& delete & lookup) */
731 ip = RTE_IPV4(128, 0, 0, 0);
735 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
736 TEST_LPM_ASSERT(status == 0);
738 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
739 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
743 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
744 TEST_LPM_ASSERT(status == 0);
746 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
747 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
749 status = rte_lpm_delete(lpm, ip, depth);
750 TEST_LPM_ASSERT(status == 0);
752 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
753 TEST_LPM_ASSERT(status == -ENOENT);
755 rte_lpm_delete_all(lpm);
757 /* Add rule that updates the next hop in TBL8 & lookup
758 * (& delete & lookup) */
760 ip = RTE_IPV4(128, 0, 0, 0);
764 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
765 TEST_LPM_ASSERT(status == 0);
767 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
768 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
772 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
773 TEST_LPM_ASSERT(status == 0);
775 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
776 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
778 status = rte_lpm_delete(lpm, ip, depth);
779 TEST_LPM_ASSERT(status == 0);
781 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
782 TEST_LPM_ASSERT(status == -ENOENT);
784 rte_lpm_delete_all(lpm);
786 /* Delete a rule that is not present in the TBL24 & lookup */
788 ip = RTE_IPV4(128, 0, 0, 0);
791 status = rte_lpm_delete(lpm, ip, depth);
792 TEST_LPM_ASSERT(status < 0);
794 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
795 TEST_LPM_ASSERT(status == -ENOENT);
797 rte_lpm_delete_all(lpm);
799 /* Delete a rule that is not present in the TBL8 & lookup */
801 ip = RTE_IPV4(128, 0, 0, 0);
804 status = rte_lpm_delete(lpm, ip, depth);
805 TEST_LPM_ASSERT(status < 0);
807 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
808 TEST_LPM_ASSERT(status == -ENOENT);
816 * Add two rules, lookup to hit the more specific one, lookup to hit the less
817 * specific one delete the less specific rule and lookup previous values again;
818 * add a more specific rule than the existing rule, lookup again
825 struct rte_lpm *lpm = NULL;
826 struct rte_lpm_config config;
828 config.max_rules = MAX_RULES;
829 config.number_tbl8s = NUMBER_TBL8S;
831 uint32_t ip, next_hop_add, next_hop_return;
835 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
836 TEST_LPM_ASSERT(lpm != NULL);
838 ip = RTE_IPV4(128, 0, 0, 0);
842 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
843 TEST_LPM_ASSERT(status == 0);
845 ip = RTE_IPV4(128, 0, 0, 10);
849 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
850 TEST_LPM_ASSERT(status == 0);
852 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
853 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
855 ip = RTE_IPV4(128, 0, 0, 0);
858 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
859 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
861 ip = RTE_IPV4(128, 0, 0, 0);
864 status = rte_lpm_delete(lpm, ip, depth);
865 TEST_LPM_ASSERT(status == 0);
867 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
868 TEST_LPM_ASSERT(status == -ENOENT);
870 ip = RTE_IPV4(128, 0, 0, 10);
873 status = rte_lpm_delete(lpm, ip, depth);
874 TEST_LPM_ASSERT(status == 0);
876 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
877 TEST_LPM_ASSERT(status == -ENOENT);
885 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
886 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
896 struct rte_lpm *lpm = NULL;
897 struct rte_lpm_config config;
899 config.max_rules = MAX_RULES;
900 config.number_tbl8s = NUMBER_TBL8S;
902 uint32_t ip, i, next_hop_add, next_hop_return;
906 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
907 TEST_LPM_ASSERT(lpm != NULL);
909 ip = RTE_IPV4(128, 0, 0, 0);
913 for (i = 0; i < 1000; i++) {
914 status = rte_lpm_add(lpm, ip, depth, next_hop_add);
915 TEST_LPM_ASSERT(status == 0);
917 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
918 TEST_LPM_ASSERT((status == 0) &&
919 (next_hop_return == next_hop_add));
921 ipx4 = vect_set_epi32(ip, ip + 1, ip, ip - 1);
922 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
923 TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
924 TEST_LPM_ASSERT(hop[1] == next_hop_add);
925 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
926 TEST_LPM_ASSERT(hop[3] == next_hop_add);
928 status = rte_lpm_delete(lpm, ip, depth);
929 TEST_LPM_ASSERT(status == 0);
931 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
932 TEST_LPM_ASSERT(status == -ENOENT);
941 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
942 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
943 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
944 * extension and contraction.
951 struct rte_lpm *lpm = NULL;
952 struct rte_lpm_config config;
954 config.max_rules = MAX_RULES;
955 config.number_tbl8s = NUMBER_TBL8S;
957 uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
961 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
962 TEST_LPM_ASSERT(lpm != NULL);
964 ip = RTE_IPV4(128, 0, 0, 0);
966 next_hop_add_1 = 100;
968 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
969 TEST_LPM_ASSERT(status == 0);
971 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
972 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
975 next_hop_add_2 = 101;
977 for (i = 0; i < 1000; i++) {
978 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
979 TEST_LPM_ASSERT(status == 0);
981 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
982 TEST_LPM_ASSERT((status == 0) &&
983 (next_hop_return == next_hop_add_2));
985 status = rte_lpm_delete(lpm, ip, depth);
986 TEST_LPM_ASSERT(status == 0);
988 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
989 TEST_LPM_ASSERT((status == 0) &&
990 (next_hop_return == next_hop_add_1));
995 status = rte_lpm_delete(lpm, ip, depth);
996 TEST_LPM_ASSERT(status == 0);
998 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
999 TEST_LPM_ASSERT(status == -ENOENT);
1007 * For TBL8 extension exhaustion. Add 512 rules that require a tbl8 extension.
1008 * No more tbl8 extensions will be allowed. Now add one more rule that required
1009 * a tbl8 extension and get fail.
1015 /* We only use depth = 32 in the loop below so we must make sure
1016 * that we have enough storage for all rules at that depth*/
1018 struct rte_lpm *lpm = NULL;
1019 struct rte_lpm_config config;
1021 config.max_rules = 256 * 32;
1022 config.number_tbl8s = 512;
1024 uint32_t ip, next_hop_base, next_hop_return;
1030 /* Add enough space for 256 rules for every depth */
1031 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1032 TEST_LPM_ASSERT(lpm != NULL);
1035 next_hop_base = 100;
1036 ip = RTE_IPV4(0, 0, 0, 0);
1038 /* Add 256 rules that require a tbl8 extension */
1039 for (; ip <= RTE_IPV4(0, 1, 255, 0); ip += 256) {
1040 status = rte_lpm_add(lpm, ip, depth, next_hop_base + ip);
1041 TEST_LPM_ASSERT(status == 0);
1043 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1044 TEST_LPM_ASSERT((status == 0) &&
1045 (next_hop_return == next_hop_base + ip));
1047 ipx4 = vect_set_epi32(ip + 3, ip + 2, ip + 1, ip);
1048 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
1049 TEST_LPM_ASSERT(hop[0] == next_hop_base + ip);
1050 TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
1051 TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
1052 TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
1055 /* All tbl8 extensions have been used above. Try to add one more and
1057 ip = RTE_IPV4(1, 0, 0, 0);
1060 status = rte_lpm_add(lpm, ip, depth, next_hop_base + ip);
1061 TEST_LPM_ASSERT(status < 0);
1069 * Sequence of operations for find existing lpm table
1072 * - find existing table: hit
1073 * - find non-existing table: miss
1079 struct rte_lpm *lpm = NULL, *result = NULL;
1080 struct rte_lpm_config config;
1082 config.max_rules = 256 * 32;
1083 config.number_tbl8s = NUMBER_TBL8S;
1087 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
1088 TEST_LPM_ASSERT(lpm != NULL);
1090 /* Try to find existing lpm */
1091 result = rte_lpm_find_existing("lpm_find_existing");
1092 TEST_LPM_ASSERT(result == lpm);
1094 /* Try to find non-existing lpm */
1095 result = rte_lpm_find_existing("lpm_find_non_existing");
1096 TEST_LPM_ASSERT(result == NULL);
1099 rte_lpm_delete_all(lpm);
1106 * test failure condition of overloading the tbl8 so no more will fit
1107 * Check we get an error return value in that case
1113 struct rte_lpm_config config;
1115 config.max_rules = 256 * 32;
1116 config.number_tbl8s = NUMBER_TBL8S;
1118 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1120 /* ip loops through all possibilities for top 24 bits of address */
1121 for (ip = 0; ip < 0xFFFFFF; ip++) {
1122 /* add an entry within a different tbl8 each time, since
1123 * depth >24 and the top 24 bits are different */
1124 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1128 if (ip != NUMBER_TBL8S) {
1129 printf("Error, unexpected failure with filling tbl8 groups\n");
1130 printf("Failed after %u additions, expected after %u\n",
1131 (unsigned)ip, (unsigned)NUMBER_TBL8S);
1139 * Test for overwriting of tbl8:
1140 * - add rule /32 and lookup
1141 * - add new rule /24 and lookup
1142 * - add third rule /25 and lookup
1143 * - lookup /32 and /24 rule to ensure the table has not been overwritten.
1148 struct rte_lpm *lpm = NULL;
1149 struct rte_lpm_config config;
1151 config.max_rules = MAX_RULES;
1152 config.number_tbl8s = NUMBER_TBL8S;
1154 const uint32_t ip_10_32 = RTE_IPV4(10, 10, 10, 2);
1155 const uint32_t ip_10_24 = RTE_IPV4(10, 10, 10, 0);
1156 const uint32_t ip_20_25 = RTE_IPV4(10, 10, 20, 2);
1157 const uint8_t d_ip_10_32 = 32,
1160 const uint32_t next_hop_ip_10_32 = 100,
1161 next_hop_ip_10_24 = 105,
1162 next_hop_ip_20_25 = 111;
1163 uint32_t next_hop_return = 0;
1166 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1167 TEST_LPM_ASSERT(lpm != NULL);
1169 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1170 next_hop_ip_10_32)) < 0)
1173 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1174 uint32_t test_hop_10_32 = next_hop_return;
1175 TEST_LPM_ASSERT(status == 0);
1176 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1178 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1179 next_hop_ip_10_24)) < 0)
1182 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1183 uint32_t test_hop_10_24 = next_hop_return;
1184 TEST_LPM_ASSERT(status == 0);
1185 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1187 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1188 next_hop_ip_20_25)) < 0)
1191 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1192 uint32_t test_hop_20_25 = next_hop_return;
1193 TEST_LPM_ASSERT(status == 0);
1194 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1196 if (test_hop_10_32 == test_hop_10_24) {
1197 printf("Next hop return equal\n");
1201 if (test_hop_10_24 == test_hop_20_25) {
1202 printf("Next hop return equal\n");
1206 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1207 TEST_LPM_ASSERT(status == 0);
1208 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1210 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1211 TEST_LPM_ASSERT(status == 0);
1212 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1220 * Test for recycle of tbl8
1221 * - step 1: add a rule with depth=28 (> 24)
1222 * - step 2: add a rule with same 24-bit prefix and depth=23 (< 24)
1223 * - step 3: delete the first rule
1224 * - step 4: check tbl8 is freed
1225 * - step 5: add a rule same as the first one (depth=28)
1226 * - step 6: check same tbl8 is allocated
1227 * - step 7: add a rule with same 24-bit prefix and depth=24
1228 * - step 8: delete the rule (depth=28) added in step 5
1229 * - step 9: check tbl8 is freed
1230 * - step 10: add a rule with same 24-bit prefix and depth = 28
1231 * - setp 11: check same tbl8 is allocated again
1236 #define group_idx next_hop
1237 struct rte_lpm *lpm = NULL;
1238 struct rte_lpm_config config;
1239 uint32_t ip, next_hop;
1241 uint32_t tbl8_group_index;
1243 config.max_rules = MAX_RULES;
1244 config.number_tbl8s = NUMBER_TBL8S;
1247 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1248 TEST_LPM_ASSERT(lpm != NULL);
1250 ip = RTE_IPV4(192, 168, 100, 100);
1253 rte_lpm_add(lpm, ip, depth, next_hop);
1255 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1256 tbl8_group_index = lpm->tbl24[ip>>8].group_idx;
1260 rte_lpm_add(lpm, ip, depth, next_hop);
1261 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1264 rte_lpm_delete(lpm, ip, depth);
1266 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
1269 rte_lpm_add(lpm, ip, depth, next_hop);
1271 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1272 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
1276 rte_lpm_add(lpm, ip, depth, next_hop);
1277 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1280 rte_lpm_delete(lpm, ip, depth);
1282 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
1285 rte_lpm_add(lpm, ip, depth, next_hop);
1287 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1288 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
1296 * rte_lpm_rcu_qsbr_add positive and negative tests.
1297 * - Add RCU QSBR variable to LPM
1298 * - Add another RCU QSBR variable to LPM
1304 struct rte_lpm *lpm = NULL;
1305 struct rte_lpm_config config;
1307 struct rte_rcu_qsbr *qsv;
1308 struct rte_rcu_qsbr *qsv2;
1310 struct rte_lpm_rcu_config rcu_cfg = {0};
1312 config.max_rules = MAX_RULES;
1313 config.number_tbl8s = NUMBER_TBL8S;
1316 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1317 TEST_LPM_ASSERT(lpm != NULL);
1319 /* Create RCU QSBR variable */
1320 sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
1321 qsv = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1322 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1323 TEST_LPM_ASSERT(qsv != NULL);
1325 status = rte_rcu_qsbr_init(qsv, RTE_MAX_LCORE);
1326 TEST_LPM_ASSERT(status == 0);
1329 /* Invalid QSBR mode */
1331 status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1332 TEST_LPM_ASSERT(status != 0);
1334 rcu_cfg.mode = RTE_LPM_QSBR_MODE_DQ;
1335 /* Attach RCU QSBR to LPM table */
1336 status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1337 TEST_LPM_ASSERT(status == 0);
1339 /* Create and attach another RCU QSBR to LPM table */
1340 qsv2 = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1341 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1342 TEST_LPM_ASSERT(qsv2 != NULL);
1345 rcu_cfg.mode = RTE_LPM_QSBR_MODE_SYNC;
1346 status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1347 TEST_LPM_ASSERT(status != 0);
1357 * rte_lpm_rcu_qsbr_add DQ mode functional test.
1358 * Reader and writer are in the same thread in this test.
1359 * - Create LPM which supports 1 tbl8 group at max
1360 * - Add RCU QSBR variable to LPM
1361 * - Add a rule with depth=28 (> 24)
1362 * - Register a reader thread (not a real thread)
1363 * - Reader lookup existing rule
1364 * - Writer delete the rule
1365 * - Reader lookup the rule
1366 * - Writer re-add the rule (no available tbl8 group)
1367 * - Reader report quiescent state and unregister
1368 * - Writer re-add the rule
1369 * - Reader lookup the rule
1374 struct rte_lpm *lpm = NULL;
1375 struct rte_lpm_config config;
1377 struct rte_rcu_qsbr *qsv;
1379 uint32_t ip, next_hop, next_hop_return;
1381 struct rte_lpm_rcu_config rcu_cfg = {0};
1383 config.max_rules = MAX_RULES;
1384 config.number_tbl8s = 1;
1387 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1388 TEST_LPM_ASSERT(lpm != NULL);
1390 /* Create RCU QSBR variable */
1391 sz = rte_rcu_qsbr_get_memsize(1);
1392 qsv = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1393 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1394 TEST_LPM_ASSERT(qsv != NULL);
1396 status = rte_rcu_qsbr_init(qsv, 1);
1397 TEST_LPM_ASSERT(status == 0);
1400 rcu_cfg.mode = RTE_LPM_QSBR_MODE_DQ;
1401 /* Attach RCU QSBR to LPM table */
1402 status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
1403 TEST_LPM_ASSERT(status == 0);
1405 ip = RTE_IPV4(192, 0, 2, 100);
1408 status = rte_lpm_add(lpm, ip, depth, next_hop);
1409 TEST_LPM_ASSERT(status == 0);
1410 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
1412 /* Register pseudo reader */
1413 status = rte_rcu_qsbr_thread_register(qsv, 0);
1414 TEST_LPM_ASSERT(status == 0);
1415 rte_rcu_qsbr_thread_online(qsv, 0);
1417 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1418 TEST_LPM_ASSERT(status == 0);
1419 TEST_LPM_ASSERT(next_hop_return == next_hop);
1422 status = rte_lpm_delete(lpm, ip, depth);
1423 TEST_LPM_ASSERT(status == 0);
1424 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid);
1426 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1427 TEST_LPM_ASSERT(status != 0);
1429 status = rte_lpm_add(lpm, ip, depth, next_hop);
1430 TEST_LPM_ASSERT(status != 0);
1432 /* Reader quiescent */
1433 rte_rcu_qsbr_quiescent(qsv, 0);
1435 status = rte_lpm_add(lpm, ip, depth, next_hop);
1436 TEST_LPM_ASSERT(status == 0);
1438 rte_rcu_qsbr_thread_offline(qsv, 0);
1439 status = rte_rcu_qsbr_thread_unregister(qsv, 0);
1440 TEST_LPM_ASSERT(status == 0);
1442 status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1443 TEST_LPM_ASSERT(status == 0);
1444 TEST_LPM_ASSERT(next_hop_return == next_hop);
1452 static struct rte_lpm *g_lpm;
1453 static struct rte_rcu_qsbr *g_v;
1454 static uint32_t g_ip = RTE_IPV4(192, 0, 2, 100);
1455 static volatile uint8_t writer_done;
1456 /* Report quiescent state interval every 1024 lookups. Larger critical
1457 * sections in reader will result in writer polling multiple times.
1459 #define QSBR_REPORTING_INTERVAL 1024
1460 #define WRITER_ITERATIONS 512
1463 * Reader thread using rte_lpm data structure with RCU.
1466 test_lpm_rcu_qsbr_reader(void *arg)
1469 uint32_t next_hop_return = 0;
1472 /* Register this thread to report quiescent state */
1473 rte_rcu_qsbr_thread_register(g_v, 0);
1474 rte_rcu_qsbr_thread_online(g_v, 0);
1477 for (i = 0; i < QSBR_REPORTING_INTERVAL; i++)
1478 rte_lpm_lookup(g_lpm, g_ip, &next_hop_return);
1480 /* Update quiescent state */
1481 rte_rcu_qsbr_quiescent(g_v, 0);
1482 } while (!writer_done);
1484 rte_rcu_qsbr_thread_offline(g_v, 0);
1485 rte_rcu_qsbr_thread_unregister(g_v, 0);
1491 * rte_lpm_rcu_qsbr_add sync mode functional test.
1492 * 1 Reader and 1 writer. They cannot be in the same thread in this test.
1493 * - Create LPM which supports 1 tbl8 group at max
1494 * - Add RCU QSBR variable with sync mode to LPM
1495 * - Register a reader thread. Reader keeps looking up a specific rule.
1496 * - Writer keeps adding and deleting a specific rule with depth=28 (> 24)
1501 struct rte_lpm_config config;
1504 uint32_t i, next_hop;
1506 struct rte_lpm_rcu_config rcu_cfg = {0};
1508 if (rte_lcore_count() < 2) {
1509 printf("Not enough cores for %s, expecting at least 2\n",
1511 return TEST_SKIPPED;
1514 config.max_rules = MAX_RULES;
1515 config.number_tbl8s = 1;
1518 g_lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1519 TEST_LPM_ASSERT(g_lpm != NULL);
1521 /* Create RCU QSBR variable */
1522 sz = rte_rcu_qsbr_get_memsize(1);
1523 g_v = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
1524 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
1525 TEST_LPM_ASSERT(g_v != NULL);
1527 status = rte_rcu_qsbr_init(g_v, 1);
1528 TEST_LPM_ASSERT(status == 0);
1531 rcu_cfg.mode = RTE_LPM_QSBR_MODE_SYNC;
1532 /* Attach RCU QSBR to LPM table */
1533 status = rte_lpm_rcu_qsbr_add(g_lpm, &rcu_cfg);
1534 TEST_LPM_ASSERT(status == 0);
1537 /* Launch reader thread */
1538 rte_eal_remote_launch(test_lpm_rcu_qsbr_reader, NULL,
1539 rte_get_next_lcore(-1, 1, 0));
1543 status = rte_lpm_add(g_lpm, g_ip, depth, next_hop);
1545 printf("%s: Failed to add rule\n", __func__);
1550 for (i = 0; i < WRITER_ITERATIONS; i++) {
1551 status = rte_lpm_delete(g_lpm, g_ip, depth);
1553 printf("%s: Failed to delete rule at iteration %d\n",
1558 status = rte_lpm_add(g_lpm, g_ip, depth, next_hop);
1560 printf("%s: Failed to add rule at iteration %d\n",
1568 /* Wait until reader exited. */
1569 rte_eal_mp_wait_lcore();
1571 rte_lpm_free(g_lpm);
1574 return (status == 0) ? PASS : -1;
1578 * Do all unit tests.
1585 int status, global_status = 0;
1587 for (i = 0; i < RTE_DIM(tests); i++) {
1588 status = tests[i]();
1590 printf("ERROR: LPM Test %u: FAIL\n", i);
1591 global_status = status;
1595 return global_status;
1598 #endif /* !RTE_EXEC_ENV_WINDOWS */
1600 REGISTER_TEST_COMMAND(lpm_autotest, test_lpm);