1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <sys/queue.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_common.h>
15 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
16 #include <rte_malloc.h>
18 #include <rte_eal_memconfig.h>
19 #include <rte_per_lcore.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_rwlock.h>
23 #include <rte_spinlock.h>
27 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
29 static struct rte_tailq_elem rte_lpm_tailq = {
32 EAL_REGISTER_TAILQ(rte_lpm_tailq)
34 #define MAX_DEPTH_TBL24 24
41 /* Macro to enable/disable run-time checks. */
42 #if defined(RTE_LIBRTE_LPM_DEBUG)
43 #include <rte_debug.h>
44 #define VERIFY_DEPTH(depth) do { \
45 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
46 rte_panic("LPM: Invalid depth (%u) at line %d", \
47 (unsigned)(depth), __LINE__); \
50 #define VERIFY_DEPTH(depth)
54 * Converts a given depth value to its corresponding mask value.
56 * depth (IN) : range = 1 - 32
57 * mask (OUT) : 32bit mask
59 static uint32_t __attribute__((pure))
60 depth_to_mask(uint8_t depth)
64 /* To calculate a mask start with a 1 on the left hand side and right
65 * shift while populating the left hand side with 1's
67 return (int)0x80000000 >> (depth - 1);
71 * Converts given depth value to its corresponding range value.
73 static inline uint32_t __attribute__((pure))
74 depth_to_range(uint8_t depth)
79 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
81 if (depth <= MAX_DEPTH_TBL24)
82 return 1 << (MAX_DEPTH_TBL24 - depth);
84 /* Else if depth is greater than 24 */
85 return 1 << (RTE_LPM_MAX_DEPTH - depth);
89 * Find an existing lpm table and return a pointer to it.
92 rte_lpm_find_existing_v20(const char *name)
94 struct rte_lpm_v20 *l = NULL;
95 struct rte_tailq_entry *te;
96 struct rte_lpm_list *lpm_list;
98 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
100 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
101 TAILQ_FOREACH(te, lpm_list, next) {
103 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
106 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
115 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
118 rte_lpm_find_existing_v1604(const char *name)
120 struct rte_lpm *l = NULL;
121 struct rte_tailq_entry *te;
122 struct rte_lpm_list *lpm_list;
124 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
126 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
127 TAILQ_FOREACH(te, lpm_list, next) {
129 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
132 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
141 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
142 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
143 rte_lpm_find_existing_v1604);
146 * Allocates memory for LPM object
149 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
150 __rte_unused int flags)
152 char mem_name[RTE_LPM_NAMESIZE];
153 struct rte_lpm_v20 *lpm = NULL;
154 struct rte_tailq_entry *te;
156 struct rte_lpm_list *lpm_list;
158 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
160 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
162 /* Check user arguments. */
163 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
168 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
170 /* Determine the amount of memory to allocate. */
171 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
173 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
175 /* guarantee there's no existing */
176 TAILQ_FOREACH(te, lpm_list, next) {
178 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
187 /* allocate tailq entry */
188 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
190 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
195 /* Allocate memory to store the LPM data structures. */
196 lpm = rte_zmalloc_socket(mem_name, mem_size,
197 RTE_CACHE_LINE_SIZE, socket_id);
199 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
205 /* Save user arguments. */
206 lpm->max_rules = max_rules;
207 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
211 TAILQ_INSERT_TAIL(lpm_list, te, next);
214 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
218 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
221 rte_lpm_create_v1604(const char *name, int socket_id,
222 const struct rte_lpm_config *config)
224 char mem_name[RTE_LPM_NAMESIZE];
225 struct rte_lpm *lpm = NULL;
226 struct rte_tailq_entry *te;
227 uint32_t mem_size, rules_size, tbl8s_size;
228 struct rte_lpm_list *lpm_list;
230 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
232 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
234 /* Check user arguments. */
235 if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
236 || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
241 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
243 /* Determine the amount of memory to allocate. */
244 mem_size = sizeof(*lpm);
245 rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
246 tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
247 RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
249 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
251 /* guarantee there's no existing */
252 TAILQ_FOREACH(te, lpm_list, next) {
254 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
263 /* allocate tailq entry */
264 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
266 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
271 /* Allocate memory to store the LPM data structures. */
272 lpm = rte_zmalloc_socket(mem_name, mem_size,
273 RTE_CACHE_LINE_SIZE, socket_id);
275 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
281 lpm->rules_tbl = rte_zmalloc_socket(NULL,
282 (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
284 if (lpm->rules_tbl == NULL) {
285 RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
293 lpm->tbl8 = rte_zmalloc_socket(NULL,
294 (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
296 if (lpm->tbl8 == NULL) {
297 RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
298 rte_free(lpm->rules_tbl);
306 /* Save user arguments. */
307 lpm->max_rules = config->max_rules;
308 lpm->number_tbl8s = config->number_tbl8s;
309 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
313 TAILQ_INSERT_TAIL(lpm_list, te, next);
316 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
320 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
322 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
323 const struct rte_lpm_config *config), rte_lpm_create_v1604);
326 * Deallocates memory for given LPM table.
329 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
331 struct rte_lpm_list *lpm_list;
332 struct rte_tailq_entry *te;
334 /* Check user arguments. */
338 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
340 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
342 /* find our tailq entry */
343 TAILQ_FOREACH(te, lpm_list, next) {
344 if (te->data == (void *) lpm)
348 TAILQ_REMOVE(lpm_list, te, next);
350 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
355 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
358 rte_lpm_free_v1604(struct rte_lpm *lpm)
360 struct rte_lpm_list *lpm_list;
361 struct rte_tailq_entry *te;
363 /* Check user arguments. */
367 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
369 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
371 /* find our tailq entry */
372 TAILQ_FOREACH(te, lpm_list, next) {
373 if (te->data == (void *) lpm)
377 TAILQ_REMOVE(lpm_list, te, next);
379 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
382 rte_free(lpm->rules_tbl);
386 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
387 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
391 * Adds a rule to the rule table.
393 * NOTE: The rule table is split into 32 groups. Each group contains rules that
394 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
395 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
396 * to refer to depth 1 because even though the depth range is 1 - 32, depths
397 * are stored in the rule table from 0 - 31.
398 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
400 static inline int32_t
401 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
404 uint32_t rule_gindex, rule_index, last_rule;
409 /* Scan through rule group to see if rule already exists. */
410 if (lpm->rule_info[depth - 1].used_rules > 0) {
412 /* rule_gindex stands for rule group index. */
413 rule_gindex = lpm->rule_info[depth - 1].first_rule;
414 /* Initialise rule_index to point to start of rule group. */
415 rule_index = rule_gindex;
416 /* Last rule = Last used rule in this rule group. */
417 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
419 for (; rule_index < last_rule; rule_index++) {
421 /* If rule already exists update its next_hop and return. */
422 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
423 lpm->rules_tbl[rule_index].next_hop = next_hop;
429 if (rule_index == lpm->max_rules)
432 /* Calculate the position in which the rule will be stored. */
435 for (i = depth - 1; i > 0; i--) {
436 if (lpm->rule_info[i - 1].used_rules > 0) {
437 rule_index = lpm->rule_info[i - 1].first_rule
438 + lpm->rule_info[i - 1].used_rules;
442 if (rule_index == lpm->max_rules)
445 lpm->rule_info[depth - 1].first_rule = rule_index;
448 /* Make room for the new rule in the array. */
449 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
450 if (lpm->rule_info[i - 1].first_rule
451 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
454 if (lpm->rule_info[i - 1].used_rules > 0) {
455 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
456 + lpm->rule_info[i - 1].used_rules]
457 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
458 lpm->rule_info[i - 1].first_rule++;
462 /* Add the new rule. */
463 lpm->rules_tbl[rule_index].ip = ip_masked;
464 lpm->rules_tbl[rule_index].next_hop = next_hop;
466 /* Increment the used rules counter for this rule group. */
467 lpm->rule_info[depth - 1].used_rules++;
472 static inline int32_t
473 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
476 uint32_t rule_gindex, rule_index, last_rule;
481 /* Scan through rule group to see if rule already exists. */
482 if (lpm->rule_info[depth - 1].used_rules > 0) {
484 /* rule_gindex stands for rule group index. */
485 rule_gindex = lpm->rule_info[depth - 1].first_rule;
486 /* Initialise rule_index to point to start of rule group. */
487 rule_index = rule_gindex;
488 /* Last rule = Last used rule in this rule group. */
489 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
491 for (; rule_index < last_rule; rule_index++) {
493 /* If rule already exists update its next_hop and return. */
494 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
495 lpm->rules_tbl[rule_index].next_hop = next_hop;
501 if (rule_index == lpm->max_rules)
504 /* Calculate the position in which the rule will be stored. */
507 for (i = depth - 1; i > 0; i--) {
508 if (lpm->rule_info[i - 1].used_rules > 0) {
509 rule_index = lpm->rule_info[i - 1].first_rule
510 + lpm->rule_info[i - 1].used_rules;
514 if (rule_index == lpm->max_rules)
517 lpm->rule_info[depth - 1].first_rule = rule_index;
520 /* Make room for the new rule in the array. */
521 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
522 if (lpm->rule_info[i - 1].first_rule
523 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
526 if (lpm->rule_info[i - 1].used_rules > 0) {
527 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
528 + lpm->rule_info[i - 1].used_rules]
529 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
530 lpm->rule_info[i - 1].first_rule++;
534 /* Add the new rule. */
535 lpm->rules_tbl[rule_index].ip = ip_masked;
536 lpm->rules_tbl[rule_index].next_hop = next_hop;
538 /* Increment the used rules counter for this rule group. */
539 lpm->rule_info[depth - 1].used_rules++;
545 * Delete a rule from the rule table.
546 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
549 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
555 lpm->rules_tbl[rule_index] =
556 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
557 + lpm->rule_info[depth - 1].used_rules - 1];
559 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
560 if (lpm->rule_info[i].used_rules > 0) {
561 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
562 lpm->rules_tbl[lpm->rule_info[i].first_rule
563 + lpm->rule_info[i].used_rules - 1];
564 lpm->rule_info[i].first_rule--;
568 lpm->rule_info[depth - 1].used_rules--;
572 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
578 lpm->rules_tbl[rule_index] =
579 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
580 + lpm->rule_info[depth - 1].used_rules - 1];
582 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
583 if (lpm->rule_info[i].used_rules > 0) {
584 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
585 lpm->rules_tbl[lpm->rule_info[i].first_rule
586 + lpm->rule_info[i].used_rules - 1];
587 lpm->rule_info[i].first_rule--;
591 lpm->rule_info[depth - 1].used_rules--;
595 * Finds a rule in rule table.
596 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
598 static inline int32_t
599 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
601 uint32_t rule_gindex, last_rule, rule_index;
605 rule_gindex = lpm->rule_info[depth - 1].first_rule;
606 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
608 /* Scan used rules at given depth to find rule. */
609 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
610 /* If rule is found return the rule index. */
611 if (lpm->rules_tbl[rule_index].ip == ip_masked)
615 /* If rule is not found return -EINVAL. */
619 static inline int32_t
620 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
622 uint32_t rule_gindex, last_rule, rule_index;
626 rule_gindex = lpm->rule_info[depth - 1].first_rule;
627 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
629 /* Scan used rules at given depth to find rule. */
630 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
631 /* If rule is found return the rule index. */
632 if (lpm->rules_tbl[rule_index].ip == ip_masked)
636 /* If rule is not found return -EINVAL. */
641 * Find, clean and allocate a tbl8.
643 static inline int32_t
644 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
646 uint32_t group_idx; /* tbl8 group index. */
647 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
649 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
650 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
652 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
653 /* If a free tbl8 group is found clean it and set as VALID. */
654 if (!tbl8_entry->valid_group) {
655 memset(&tbl8_entry[0], 0,
656 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
657 sizeof(tbl8_entry[0]));
659 tbl8_entry->valid_group = VALID;
661 /* Return group index for allocated tbl8 group. */
666 /* If there are no tbl8 groups free then return error. */
670 static inline int32_t
671 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
673 uint32_t group_idx; /* tbl8 group index. */
674 struct rte_lpm_tbl_entry *tbl8_entry;
676 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
677 for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
678 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
679 /* If a free tbl8 group is found clean it and set as VALID. */
680 if (!tbl8_entry->valid_group) {
681 memset(&tbl8_entry[0], 0,
682 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
683 sizeof(tbl8_entry[0]));
685 tbl8_entry->valid_group = VALID;
687 /* Return group index for allocated tbl8 group. */
692 /* If there are no tbl8 groups free then return error. */
697 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
699 /* Set tbl8 group invalid*/
700 tbl8[tbl8_group_start].valid_group = INVALID;
704 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
706 /* Set tbl8 group invalid*/
707 tbl8[tbl8_group_start].valid_group = INVALID;
710 static inline int32_t
711 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
714 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
716 /* Calculate the index into Table24. */
717 tbl24_index = ip >> 8;
718 tbl24_range = depth_to_range(depth);
720 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
722 * For invalid OR valid and non-extended tbl 24 entries set
725 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
726 lpm->tbl24[i].depth <= depth)) {
728 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
733 new_tbl24_entry.next_hop = next_hop;
735 /* Setting tbl24 entry in one go to avoid race
738 lpm->tbl24[i] = new_tbl24_entry;
743 if (lpm->tbl24[i].valid_group == 1) {
744 /* If tbl24 entry is valid and extended calculate the
747 tbl8_index = lpm->tbl24[i].group_idx *
748 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
749 tbl8_group_end = tbl8_index +
750 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
752 for (j = tbl8_index; j < tbl8_group_end; j++) {
753 if (!lpm->tbl8[j].valid ||
754 lpm->tbl8[j].depth <= depth) {
755 struct rte_lpm_tbl_entry_v20
758 .valid_group = VALID,
761 new_tbl8_entry.next_hop = next_hop;
764 * Setting tbl8 entry in one go to avoid
767 lpm->tbl8[j] = new_tbl8_entry;
778 static inline int32_t
779 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
782 #define group_idx next_hop
783 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
785 /* Calculate the index into Table24. */
786 tbl24_index = ip >> 8;
787 tbl24_range = depth_to_range(depth);
789 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
791 * For invalid OR valid and non-extended tbl 24 entries set
794 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
795 lpm->tbl24[i].depth <= depth)) {
797 struct rte_lpm_tbl_entry new_tbl24_entry = {
798 .next_hop = next_hop,
804 /* Setting tbl24 entry in one go to avoid race
807 lpm->tbl24[i] = new_tbl24_entry;
812 if (lpm->tbl24[i].valid_group == 1) {
813 /* If tbl24 entry is valid and extended calculate the
816 tbl8_index = lpm->tbl24[i].group_idx *
817 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
818 tbl8_group_end = tbl8_index +
819 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
821 for (j = tbl8_index; j < tbl8_group_end; j++) {
822 if (!lpm->tbl8[j].valid ||
823 lpm->tbl8[j].depth <= depth) {
824 struct rte_lpm_tbl_entry
827 .valid_group = VALID,
829 .next_hop = next_hop,
833 * Setting tbl8 entry in one go to avoid
836 lpm->tbl8[j] = new_tbl8_entry;
847 static inline int32_t
848 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
851 uint32_t tbl24_index;
852 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
855 tbl24_index = (ip_masked >> 8);
856 tbl8_range = depth_to_range(depth);
858 if (!lpm->tbl24[tbl24_index].valid) {
859 /* Search for a free tbl8 group. */
860 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
862 /* Check tbl8 allocation was successful. */
863 if (tbl8_group_index < 0) {
864 return tbl8_group_index;
867 /* Find index into tbl8 and range. */
868 tbl8_index = (tbl8_group_index *
869 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
872 /* Set tbl8 entry. */
873 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
874 lpm->tbl8[i].depth = depth;
875 lpm->tbl8[i].next_hop = next_hop;
876 lpm->tbl8[i].valid = VALID;
880 * Update tbl24 entry to point to new tbl8 entry. Note: The
881 * ext_flag and tbl8_index need to be updated simultaneously,
882 * so assign whole structure in one go
885 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
886 .group_idx = (uint8_t)tbl8_group_index,
892 lpm->tbl24[tbl24_index] = new_tbl24_entry;
894 } /* If valid entry but not extended calculate the index into Table8. */
895 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
896 /* Search for free tbl8 group. */
897 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
899 if (tbl8_group_index < 0) {
900 return tbl8_group_index;
903 tbl8_group_start = tbl8_group_index *
904 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
905 tbl8_group_end = tbl8_group_start +
906 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
908 /* Populate new tbl8 with tbl24 value. */
909 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
910 lpm->tbl8[i].valid = VALID;
911 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
912 lpm->tbl8[i].next_hop =
913 lpm->tbl24[tbl24_index].next_hop;
916 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
918 /* Insert new rule into the tbl8 entry. */
919 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
920 lpm->tbl8[i].valid = VALID;
921 lpm->tbl8[i].depth = depth;
922 lpm->tbl8[i].next_hop = next_hop;
926 * Update tbl24 entry to point to new tbl8 entry. Note: The
927 * ext_flag and tbl8_index need to be updated simultaneously,
928 * so assign whole structure in one go.
931 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
932 .group_idx = (uint8_t)tbl8_group_index,
938 lpm->tbl24[tbl24_index] = new_tbl24_entry;
941 * If it is valid, extended entry calculate the index into tbl8.
943 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
944 tbl8_group_start = tbl8_group_index *
945 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
946 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
948 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
950 if (!lpm->tbl8[i].valid ||
951 lpm->tbl8[i].depth <= depth) {
952 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
955 .valid_group = lpm->tbl8[i].valid_group,
957 new_tbl8_entry.next_hop = next_hop;
959 * Setting tbl8 entry in one go to avoid race
962 lpm->tbl8[i] = new_tbl8_entry;
972 static inline int32_t
973 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
976 #define group_idx next_hop
977 uint32_t tbl24_index;
978 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
981 tbl24_index = (ip_masked >> 8);
982 tbl8_range = depth_to_range(depth);
984 if (!lpm->tbl24[tbl24_index].valid) {
985 /* Search for a free tbl8 group. */
986 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
988 /* Check tbl8 allocation was successful. */
989 if (tbl8_group_index < 0) {
990 return tbl8_group_index;
993 /* Find index into tbl8 and range. */
994 tbl8_index = (tbl8_group_index *
995 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
998 /* Set tbl8 entry. */
999 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1000 lpm->tbl8[i].depth = depth;
1001 lpm->tbl8[i].next_hop = next_hop;
1002 lpm->tbl8[i].valid = VALID;
1006 * Update tbl24 entry to point to new tbl8 entry. Note: The
1007 * ext_flag and tbl8_index need to be updated simultaneously,
1008 * so assign whole structure in one go
1011 struct rte_lpm_tbl_entry new_tbl24_entry = {
1012 .group_idx = tbl8_group_index,
1018 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1020 } /* If valid entry but not extended calculate the index into Table8. */
1021 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1022 /* Search for free tbl8 group. */
1023 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
1025 if (tbl8_group_index < 0) {
1026 return tbl8_group_index;
1029 tbl8_group_start = tbl8_group_index *
1030 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1031 tbl8_group_end = tbl8_group_start +
1032 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1034 /* Populate new tbl8 with tbl24 value. */
1035 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1036 lpm->tbl8[i].valid = VALID;
1037 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1038 lpm->tbl8[i].next_hop =
1039 lpm->tbl24[tbl24_index].next_hop;
1042 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1044 /* Insert new rule into the tbl8 entry. */
1045 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1046 lpm->tbl8[i].valid = VALID;
1047 lpm->tbl8[i].depth = depth;
1048 lpm->tbl8[i].next_hop = next_hop;
1052 * Update tbl24 entry to point to new tbl8 entry. Note: The
1053 * ext_flag and tbl8_index need to be updated simultaneously,
1054 * so assign whole structure in one go.
1057 struct rte_lpm_tbl_entry new_tbl24_entry = {
1058 .group_idx = tbl8_group_index,
1064 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1067 * If it is valid, extended entry calculate the index into tbl8.
1069 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1070 tbl8_group_start = tbl8_group_index *
1071 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1072 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1074 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1076 if (!lpm->tbl8[i].valid ||
1077 lpm->tbl8[i].depth <= depth) {
1078 struct rte_lpm_tbl_entry new_tbl8_entry = {
1081 .next_hop = next_hop,
1082 .valid_group = lpm->tbl8[i].valid_group,
1086 * Setting tbl8 entry in one go to avoid race
1089 lpm->tbl8[i] = new_tbl8_entry;
1103 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1106 int32_t rule_index, status = 0;
1109 /* Check user arguments. */
1110 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1113 ip_masked = ip & depth_to_mask(depth);
1115 /* Add the rule to the rule table. */
1116 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1118 /* If the is no space available for new rule return error. */
1119 if (rule_index < 0) {
1123 if (depth <= MAX_DEPTH_TBL24) {
1124 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1125 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1126 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1129 * If add fails due to exhaustion of tbl8 extensions delete
1130 * rule that was added to rule table.
1133 rule_delete_v20(lpm, rule_index, depth);
1141 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1144 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1147 int32_t rule_index, status = 0;
1150 /* Check user arguments. */
1151 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1154 ip_masked = ip & depth_to_mask(depth);
1156 /* Add the rule to the rule table. */
1157 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1159 /* If the is no space available for new rule return error. */
1160 if (rule_index < 0) {
1164 if (depth <= MAX_DEPTH_TBL24) {
1165 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1166 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1167 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1170 * If add fails due to exhaustion of tbl8 extensions delete
1171 * rule that was added to rule table.
1174 rule_delete_v1604(lpm, rule_index, depth);
1182 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1183 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1184 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1187 * Look for a rule in the high-level rules table
1190 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1196 /* Check user arguments. */
1197 if ((lpm == NULL) ||
1198 (next_hop == NULL) ||
1199 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1202 /* Look for the rule using rule_find. */
1203 ip_masked = ip & depth_to_mask(depth);
1204 rule_index = rule_find_v20(lpm, ip_masked, depth);
1206 if (rule_index >= 0) {
1207 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1211 /* If rule is not found return 0. */
1214 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1217 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1223 /* Check user arguments. */
1224 if ((lpm == NULL) ||
1225 (next_hop == NULL) ||
1226 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1229 /* Look for the rule using rule_find. */
1230 ip_masked = ip & depth_to_mask(depth);
1231 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1233 if (rule_index >= 0) {
1234 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1238 /* If rule is not found return 0. */
1241 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1242 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1243 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1245 static inline int32_t
1246 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1247 uint8_t *sub_rule_depth)
1253 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1254 ip_masked = ip & depth_to_mask(prev_depth);
1256 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1258 if (rule_index >= 0) {
1259 *sub_rule_depth = prev_depth;
1267 static inline int32_t
1268 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1269 uint8_t *sub_rule_depth)
1275 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1276 ip_masked = ip & depth_to_mask(prev_depth);
1278 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1280 if (rule_index >= 0) {
1281 *sub_rule_depth = prev_depth;
1289 static inline int32_t
1290 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1291 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1293 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1295 /* Calculate the range and index into Table24. */
1296 tbl24_range = depth_to_range(depth);
1297 tbl24_index = (ip_masked >> 8);
1300 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1301 * and a positive number indicates a sub_rule_index.
1303 if (sub_rule_index < 0) {
1305 * If no replacement rule exists then invalidate entries
1306 * associated with this rule.
1308 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1310 if (lpm->tbl24[i].valid_group == 0 &&
1311 lpm->tbl24[i].depth <= depth) {
1312 lpm->tbl24[i].valid = INVALID;
1313 } else if (lpm->tbl24[i].valid_group == 1) {
1315 * If TBL24 entry is extended, then there has
1316 * to be a rule with depth >= 25 in the
1317 * associated TBL8 group.
1320 tbl8_group_index = lpm->tbl24[i].group_idx;
1321 tbl8_index = tbl8_group_index *
1322 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1324 for (j = tbl8_index; j < (tbl8_index +
1325 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1327 if (lpm->tbl8[j].depth <= depth)
1328 lpm->tbl8[j].valid = INVALID;
1334 * If a replacement rule exists then modify entries
1335 * associated with this rule.
1338 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1339 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1342 .depth = sub_rule_depth,
1345 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1347 .valid_group = VALID,
1348 .depth = sub_rule_depth,
1350 new_tbl8_entry.next_hop =
1351 lpm->rules_tbl[sub_rule_index].next_hop;
1353 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1355 if (lpm->tbl24[i].valid_group == 0 &&
1356 lpm->tbl24[i].depth <= depth) {
1357 lpm->tbl24[i] = new_tbl24_entry;
1358 } else if (lpm->tbl24[i].valid_group == 1) {
1360 * If TBL24 entry is extended, then there has
1361 * to be a rule with depth >= 25 in the
1362 * associated TBL8 group.
1365 tbl8_group_index = lpm->tbl24[i].group_idx;
1366 tbl8_index = tbl8_group_index *
1367 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1369 for (j = tbl8_index; j < (tbl8_index +
1370 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1372 if (lpm->tbl8[j].depth <= depth)
1373 lpm->tbl8[j] = new_tbl8_entry;
1382 static inline int32_t
1383 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1384 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1386 #define group_idx next_hop
1387 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1389 /* Calculate the range and index into Table24. */
1390 tbl24_range = depth_to_range(depth);
1391 tbl24_index = (ip_masked >> 8);
1394 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1395 * and a positive number indicates a sub_rule_index.
1397 if (sub_rule_index < 0) {
1399 * If no replacement rule exists then invalidate entries
1400 * associated with this rule.
1402 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1404 if (lpm->tbl24[i].valid_group == 0 &&
1405 lpm->tbl24[i].depth <= depth) {
1406 lpm->tbl24[i].valid = INVALID;
1407 } else if (lpm->tbl24[i].valid_group == 1) {
1409 * If TBL24 entry is extended, then there has
1410 * to be a rule with depth >= 25 in the
1411 * associated TBL8 group.
1414 tbl8_group_index = lpm->tbl24[i].group_idx;
1415 tbl8_index = tbl8_group_index *
1416 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1418 for (j = tbl8_index; j < (tbl8_index +
1419 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1421 if (lpm->tbl8[j].depth <= depth)
1422 lpm->tbl8[j].valid = INVALID;
1428 * If a replacement rule exists then modify entries
1429 * associated with this rule.
1432 struct rte_lpm_tbl_entry new_tbl24_entry = {
1433 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1436 .depth = sub_rule_depth,
1439 struct rte_lpm_tbl_entry new_tbl8_entry = {
1441 .valid_group = VALID,
1442 .depth = sub_rule_depth,
1443 .next_hop = lpm->rules_tbl
1444 [sub_rule_index].next_hop,
1447 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1449 if (lpm->tbl24[i].valid_group == 0 &&
1450 lpm->tbl24[i].depth <= depth) {
1451 lpm->tbl24[i] = new_tbl24_entry;
1452 } else if (lpm->tbl24[i].valid_group == 1) {
1454 * If TBL24 entry is extended, then there has
1455 * to be a rule with depth >= 25 in the
1456 * associated TBL8 group.
1459 tbl8_group_index = lpm->tbl24[i].group_idx;
1460 tbl8_index = tbl8_group_index *
1461 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1463 for (j = tbl8_index; j < (tbl8_index +
1464 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1466 if (lpm->tbl8[j].depth <= depth)
1467 lpm->tbl8[j] = new_tbl8_entry;
1477 * Checks if table 8 group can be recycled.
1479 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1480 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1481 * Return of value > -1 means tbl8 is in use but has all the same values and
1482 * thus can be recycled
1484 static inline int32_t
1485 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1486 uint32_t tbl8_group_start)
1488 uint32_t tbl8_group_end, i;
1489 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1492 * Check the first entry of the given tbl8. If it is invalid we know
1493 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1494 * (As they would affect all entries in a tbl8) and thus this table
1495 * can not be recycled.
1497 if (tbl8[tbl8_group_start].valid) {
1499 * If first entry is valid check if the depth is less than 24
1500 * and if so check the rest of the entries to verify that they
1501 * are all of this depth.
1503 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1504 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1507 if (tbl8[i].depth !=
1508 tbl8[tbl8_group_start].depth) {
1513 /* If all entries are the same return the tb8 index */
1514 return tbl8_group_start;
1520 * If the first entry is invalid check if the rest of the entries in
1521 * the tbl8 are invalid.
1523 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1527 /* If no valid entries are found then return -EINVAL. */
1531 static inline int32_t
1532 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1533 uint32_t tbl8_group_start)
1535 uint32_t tbl8_group_end, i;
1536 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1539 * Check the first entry of the given tbl8. If it is invalid we know
1540 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1541 * (As they would affect all entries in a tbl8) and thus this table
1542 * can not be recycled.
1544 if (tbl8[tbl8_group_start].valid) {
1546 * If first entry is valid check if the depth is less than 24
1547 * and if so check the rest of the entries to verify that they
1548 * are all of this depth.
1550 if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1551 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1554 if (tbl8[i].depth !=
1555 tbl8[tbl8_group_start].depth) {
1560 /* If all entries are the same return the tb8 index */
1561 return tbl8_group_start;
1567 * If the first entry is invalid check if the rest of the entries in
1568 * the tbl8 are invalid.
1570 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1574 /* If no valid entries are found then return -EINVAL. */
1578 static inline int32_t
1579 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1580 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1582 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1584 int32_t tbl8_recycle_index;
1587 * Calculate the index into tbl24 and range. Note: All depths larger
1588 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1590 tbl24_index = ip_masked >> 8;
1592 /* Calculate the index into tbl8 and range. */
1593 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1594 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1595 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1596 tbl8_range = depth_to_range(depth);
1598 if (sub_rule_index < 0) {
1600 * Loop through the range of entries on tbl8 for which the
1601 * rule_to_delete must be removed or modified.
1603 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1604 if (lpm->tbl8[i].depth <= depth)
1605 lpm->tbl8[i].valid = INVALID;
1608 /* Set new tbl8 entry. */
1609 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1611 .depth = sub_rule_depth,
1612 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1615 new_tbl8_entry.next_hop =
1616 lpm->rules_tbl[sub_rule_index].next_hop;
1618 * Loop through the range of entries on tbl8 for which the
1619 * rule_to_delete must be modified.
1621 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1622 if (lpm->tbl8[i].depth <= depth)
1623 lpm->tbl8[i] = new_tbl8_entry;
1628 * Check if there are any valid entries in this tbl8 group. If all
1629 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1630 * associated tbl24 entry.
1633 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1635 if (tbl8_recycle_index == -EINVAL) {
1636 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1637 lpm->tbl24[tbl24_index].valid = 0;
1638 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1639 } else if (tbl8_recycle_index > -1) {
1640 /* Update tbl24 entry. */
1641 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1642 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1645 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1648 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1649 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1650 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1656 static inline int32_t
1657 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1658 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1660 #define group_idx next_hop
1661 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1663 int32_t tbl8_recycle_index;
1666 * Calculate the index into tbl24 and range. Note: All depths larger
1667 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1669 tbl24_index = ip_masked >> 8;
1671 /* Calculate the index into tbl8 and range. */
1672 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1673 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1674 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1675 tbl8_range = depth_to_range(depth);
1677 if (sub_rule_index < 0) {
1679 * Loop through the range of entries on tbl8 for which the
1680 * rule_to_delete must be removed or modified.
1682 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1683 if (lpm->tbl8[i].depth <= depth)
1684 lpm->tbl8[i].valid = INVALID;
1687 /* Set new tbl8 entry. */
1688 struct rte_lpm_tbl_entry new_tbl8_entry = {
1690 .depth = sub_rule_depth,
1691 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1692 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1696 * Loop through the range of entries on tbl8 for which the
1697 * rule_to_delete must be modified.
1699 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1700 if (lpm->tbl8[i].depth <= depth)
1701 lpm->tbl8[i] = new_tbl8_entry;
1706 * Check if there are any valid entries in this tbl8 group. If all
1707 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1708 * associated tbl24 entry.
1711 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1713 if (tbl8_recycle_index == -EINVAL) {
1714 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1715 lpm->tbl24[tbl24_index].valid = 0;
1716 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1717 } else if (tbl8_recycle_index > -1) {
1718 /* Update tbl24 entry. */
1719 struct rte_lpm_tbl_entry new_tbl24_entry = {
1720 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1723 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1726 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1727 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1728 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1738 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1740 int32_t rule_to_delete_index, sub_rule_index;
1742 uint8_t sub_rule_depth;
1744 * Check input arguments. Note: IP must be a positive integer of 32
1745 * bits in length therefore it need not be checked.
1747 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1751 ip_masked = ip & depth_to_mask(depth);
1754 * Find the index of the input rule, that needs to be deleted, in the
1757 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1760 * Check if rule_to_delete_index was found. If no rule was found the
1761 * function rule_find returns -EINVAL.
1763 if (rule_to_delete_index < 0)
1766 /* Delete the rule from the rule table. */
1767 rule_delete_v20(lpm, rule_to_delete_index, depth);
1770 * Find rule to replace the rule_to_delete. If there is no rule to
1771 * replace the rule_to_delete we return -1 and invalidate the table
1772 * entries associated with this rule.
1775 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1778 * If the input depth value is less than 25 use function
1779 * delete_depth_small otherwise use delete_depth_big.
1781 if (depth <= MAX_DEPTH_TBL24) {
1782 return delete_depth_small_v20(lpm, ip_masked, depth,
1783 sub_rule_index, sub_rule_depth);
1784 } else { /* If depth > MAX_DEPTH_TBL24 */
1785 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1789 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1792 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1794 int32_t rule_to_delete_index, sub_rule_index;
1796 uint8_t sub_rule_depth;
1798 * Check input arguments. Note: IP must be a positive integer of 32
1799 * bits in length therefore it need not be checked.
1801 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1805 ip_masked = ip & depth_to_mask(depth);
1808 * Find the index of the input rule, that needs to be deleted, in the
1811 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1814 * Check if rule_to_delete_index was found. If no rule was found the
1815 * function rule_find returns -EINVAL.
1817 if (rule_to_delete_index < 0)
1820 /* Delete the rule from the rule table. */
1821 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1824 * Find rule to replace the rule_to_delete. If there is no rule to
1825 * replace the rule_to_delete we return -1 and invalidate the table
1826 * entries associated with this rule.
1829 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1832 * If the input depth value is less than 25 use function
1833 * delete_depth_small otherwise use delete_depth_big.
1835 if (depth <= MAX_DEPTH_TBL24) {
1836 return delete_depth_small_v1604(lpm, ip_masked, depth,
1837 sub_rule_index, sub_rule_depth);
1838 } else { /* If depth > MAX_DEPTH_TBL24 */
1839 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1843 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1844 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1845 uint8_t depth), rte_lpm_delete_v1604);
1848 * Delete all rules from the LPM table.
1851 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1853 /* Zero rule information. */
1854 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1857 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1860 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1862 /* Delete all rules form the rules table. */
1863 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1865 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1868 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1870 /* Zero rule information. */
1871 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1874 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1877 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1878 * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1880 /* Delete all rules form the rules table. */
1881 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1883 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1884 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1885 rte_lpm_delete_all_v1604);