4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 static struct rte_tailq_elem rte_lpm_tailq = {
63 EAL_REGISTER_TAILQ(rte_lpm_tailq)
65 #define MAX_DEPTH_TBL24 24
72 /* Macro to enable/disable run-time checks. */
73 #if defined(RTE_LIBRTE_LPM_DEBUG)
74 #include <rte_debug.h>
75 #define VERIFY_DEPTH(depth) do { \
76 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
77 rte_panic("LPM: Invalid depth (%u) at line %d", \
78 (unsigned)(depth), __LINE__); \
81 #define VERIFY_DEPTH(depth)
85 * Converts a given depth value to its corresponding mask value.
87 * depth (IN) : range = 1 - 32
88 * mask (OUT) : 32bit mask
90 static uint32_t __attribute__((pure))
91 depth_to_mask(uint8_t depth)
95 /* To calculate a mask start with a 1 on the left hand side and right
96 * shift while populating the left hand side with 1's
98 return (int)0x80000000 >> (depth - 1);
102 * Converts given depth value to its corresponding range value.
104 static inline uint32_t __attribute__((pure))
105 depth_to_range(uint8_t depth)
110 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
112 if (depth <= MAX_DEPTH_TBL24)
113 return 1 << (MAX_DEPTH_TBL24 - depth);
115 /* Else if depth is greater than 24 */
116 return 1 << (RTE_LPM_MAX_DEPTH - depth);
120 * Find an existing lpm table and return a pointer to it.
123 rte_lpm_find_existing_v20(const char *name)
125 struct rte_lpm_v20 *l = NULL;
126 struct rte_tailq_entry *te;
127 struct rte_lpm_list *lpm_list;
129 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm_v20 *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
146 VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
149 rte_lpm_find_existing_v1604(const char *name)
151 struct rte_lpm *l = NULL;
152 struct rte_tailq_entry *te;
153 struct rte_lpm_list *lpm_list;
155 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
158 TAILQ_FOREACH(te, lpm_list, next) {
159 l = (struct rte_lpm *) te->data;
160 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
163 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
172 BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
173 MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
174 rte_lpm_find_existing_v1604);
177 * Allocates memory for LPM object
180 rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
181 __rte_unused int flags)
183 char mem_name[RTE_LPM_NAMESIZE];
184 struct rte_lpm_v20 *lpm = NULL;
185 struct rte_tailq_entry *te;
187 struct rte_lpm_list *lpm_list;
189 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
191 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
193 /* Check user arguments. */
194 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
199 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
201 /* Determine the amount of memory to allocate. */
202 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
204 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
206 /* guarantee there's no existing */
207 TAILQ_FOREACH(te, lpm_list, next) {
208 lpm = (struct rte_lpm_v20 *) te->data;
209 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
215 /* allocate tailq entry */
216 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
218 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
222 /* Allocate memory to store the LPM data structures. */
223 lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
224 RTE_CACHE_LINE_SIZE, socket_id);
226 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
231 /* Save user arguments. */
232 lpm->max_rules = max_rules;
233 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
235 te->data = (void *) lpm;
237 TAILQ_INSERT_TAIL(lpm_list, te, next);
240 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
244 VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
247 rte_lpm_create_v1604(const char *name, int socket_id, int max_rules,
248 __rte_unused int flags)
250 char mem_name[RTE_LPM_NAMESIZE];
251 struct rte_lpm *lpm = NULL;
252 struct rte_tailq_entry *te;
254 struct rte_lpm_list *lpm_list;
256 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
258 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
260 /* Check user arguments. */
261 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
266 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
268 /* Determine the amount of memory to allocate. */
269 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
271 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
273 /* guarantee there's no existing */
274 TAILQ_FOREACH(te, lpm_list, next) {
275 lpm = (struct rte_lpm *) te->data;
276 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
282 /* allocate tailq entry */
283 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
285 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
289 /* Allocate memory to store the LPM data structures. */
290 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
291 RTE_CACHE_LINE_SIZE, socket_id);
293 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
298 /* Save user arguments. */
299 lpm->max_rules = max_rules;
300 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
302 te->data = (void *) lpm;
304 TAILQ_INSERT_TAIL(lpm_list, te, next);
307 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
311 BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
313 struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
314 int max_rules, int flags), rte_lpm_create_v1604);
317 * Deallocates memory for given LPM table.
320 rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
322 struct rte_lpm_list *lpm_list;
323 struct rte_tailq_entry *te;
325 /* Check user arguments. */
329 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
331 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
333 /* find our tailq entry */
334 TAILQ_FOREACH(te, lpm_list, next) {
335 if (te->data == (void *) lpm)
339 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
343 TAILQ_REMOVE(lpm_list, te, next);
345 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
350 VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
353 rte_lpm_free_v1604(struct rte_lpm *lpm)
355 struct rte_lpm_list *lpm_list;
356 struct rte_tailq_entry *te;
358 /* Check user arguments. */
362 lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
364 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
366 /* find our tailq entry */
367 TAILQ_FOREACH(te, lpm_list, next) {
368 if (te->data == (void *) lpm)
372 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
376 TAILQ_REMOVE(lpm_list, te, next);
378 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
383 BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
384 MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
388 * Adds a rule to the rule table.
390 * NOTE: The rule table is split into 32 groups. Each group contains rules that
391 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
392 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
393 * to refer to depth 1 because even though the depth range is 1 - 32, depths
394 * are stored in the rule table from 0 - 31.
395 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
397 static inline int32_t
398 rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
401 uint32_t rule_gindex, rule_index, last_rule;
406 /* Scan through rule group to see if rule already exists. */
407 if (lpm->rule_info[depth - 1].used_rules > 0) {
409 /* rule_gindex stands for rule group index. */
410 rule_gindex = lpm->rule_info[depth - 1].first_rule;
411 /* Initialise rule_index to point to start of rule group. */
412 rule_index = rule_gindex;
413 /* Last rule = Last used rule in this rule group. */
414 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
416 for (; rule_index < last_rule; rule_index++) {
418 /* If rule already exists update its next_hop and return. */
419 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
420 lpm->rules_tbl[rule_index].next_hop = next_hop;
426 if (rule_index == lpm->max_rules)
429 /* Calculate the position in which the rule will be stored. */
432 for (i = depth - 1; i > 0; i--) {
433 if (lpm->rule_info[i - 1].used_rules > 0) {
434 rule_index = lpm->rule_info[i - 1].first_rule
435 + lpm->rule_info[i - 1].used_rules;
439 if (rule_index == lpm->max_rules)
442 lpm->rule_info[depth - 1].first_rule = rule_index;
445 /* Make room for the new rule in the array. */
446 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
447 if (lpm->rule_info[i - 1].first_rule
448 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
451 if (lpm->rule_info[i - 1].used_rules > 0) {
452 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
453 + lpm->rule_info[i - 1].used_rules]
454 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
455 lpm->rule_info[i - 1].first_rule++;
459 /* Add the new rule. */
460 lpm->rules_tbl[rule_index].ip = ip_masked;
461 lpm->rules_tbl[rule_index].next_hop = next_hop;
463 /* Increment the used rules counter for this rule group. */
464 lpm->rule_info[depth - 1].used_rules++;
469 static inline int32_t
470 rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
473 uint32_t rule_gindex, rule_index, last_rule;
478 /* Scan through rule group to see if rule already exists. */
479 if (lpm->rule_info[depth - 1].used_rules > 0) {
481 /* rule_gindex stands for rule group index. */
482 rule_gindex = lpm->rule_info[depth - 1].first_rule;
483 /* Initialise rule_index to point to start of rule group. */
484 rule_index = rule_gindex;
485 /* Last rule = Last used rule in this rule group. */
486 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
488 for (; rule_index < last_rule; rule_index++) {
490 /* If rule already exists update its next_hop and return. */
491 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
492 lpm->rules_tbl[rule_index].next_hop = next_hop;
498 if (rule_index == lpm->max_rules)
501 /* Calculate the position in which the rule will be stored. */
504 for (i = depth - 1; i > 0; i--) {
505 if (lpm->rule_info[i - 1].used_rules > 0) {
506 rule_index = lpm->rule_info[i - 1].first_rule
507 + lpm->rule_info[i - 1].used_rules;
511 if (rule_index == lpm->max_rules)
514 lpm->rule_info[depth - 1].first_rule = rule_index;
517 /* Make room for the new rule in the array. */
518 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
519 if (lpm->rule_info[i - 1].first_rule
520 + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
523 if (lpm->rule_info[i - 1].used_rules > 0) {
524 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
525 + lpm->rule_info[i - 1].used_rules]
526 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
527 lpm->rule_info[i - 1].first_rule++;
531 /* Add the new rule. */
532 lpm->rules_tbl[rule_index].ip = ip_masked;
533 lpm->rules_tbl[rule_index].next_hop = next_hop;
535 /* Increment the used rules counter for this rule group. */
536 lpm->rule_info[depth - 1].used_rules++;
542 * Delete a rule from the rule table.
543 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
546 rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
552 lpm->rules_tbl[rule_index] =
553 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
554 + lpm->rule_info[depth - 1].used_rules - 1];
556 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
557 if (lpm->rule_info[i].used_rules > 0) {
558 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
559 lpm->rules_tbl[lpm->rule_info[i].first_rule
560 + lpm->rule_info[i].used_rules - 1];
561 lpm->rule_info[i].first_rule--;
565 lpm->rule_info[depth - 1].used_rules--;
569 rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
575 lpm->rules_tbl[rule_index] =
576 lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
577 + lpm->rule_info[depth - 1].used_rules - 1];
579 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
580 if (lpm->rule_info[i].used_rules > 0) {
581 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
582 lpm->rules_tbl[lpm->rule_info[i].first_rule
583 + lpm->rule_info[i].used_rules - 1];
584 lpm->rule_info[i].first_rule--;
588 lpm->rule_info[depth - 1].used_rules--;
592 * Finds a rule in rule table.
593 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
595 static inline int32_t
596 rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
598 uint32_t rule_gindex, last_rule, rule_index;
602 rule_gindex = lpm->rule_info[depth - 1].first_rule;
603 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
605 /* Scan used rules at given depth to find rule. */
606 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
607 /* If rule is found return the rule index. */
608 if (lpm->rules_tbl[rule_index].ip == ip_masked)
612 /* If rule is not found return -EINVAL. */
616 static inline int32_t
617 rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
619 uint32_t rule_gindex, last_rule, rule_index;
623 rule_gindex = lpm->rule_info[depth - 1].first_rule;
624 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
626 /* Scan used rules at given depth to find rule. */
627 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
628 /* If rule is found return the rule index. */
629 if (lpm->rules_tbl[rule_index].ip == ip_masked)
633 /* If rule is not found return -EINVAL. */
638 * Find, clean and allocate a tbl8.
640 static inline int32_t
641 tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
643 uint32_t group_idx; /* tbl8 group index. */
644 struct rte_lpm_tbl_entry_v20 *tbl8_entry;
646 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
647 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
649 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
650 /* If a free tbl8 group is found clean it and set as VALID. */
651 if (!tbl8_entry->valid_group) {
652 memset(&tbl8_entry[0], 0,
653 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
654 sizeof(tbl8_entry[0]));
656 tbl8_entry->valid_group = VALID;
658 /* Return group index for allocated tbl8 group. */
663 /* If there are no tbl8 groups free then return error. */
667 static inline int32_t
668 tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8)
670 uint32_t group_idx; /* tbl8 group index. */
671 struct rte_lpm_tbl_entry *tbl8_entry;
673 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
674 for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
676 tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
677 /* If a free tbl8 group is found clean it and set as VALID. */
678 if (!tbl8_entry->valid_group) {
679 memset(&tbl8_entry[0], 0,
680 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
681 sizeof(tbl8_entry[0]));
683 tbl8_entry->valid_group = VALID;
685 /* Return group index for allocated tbl8 group. */
690 /* If there are no tbl8 groups free then return error. */
695 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
697 /* Set tbl8 group invalid*/
698 tbl8[tbl8_group_start].valid_group = INVALID;
702 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
704 /* Set tbl8 group invalid*/
705 tbl8[tbl8_group_start].valid_group = INVALID;
708 static inline int32_t
709 add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
712 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
714 /* Calculate the index into Table24. */
715 tbl24_index = ip >> 8;
716 tbl24_range = depth_to_range(depth);
718 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
720 * For invalid OR valid and non-extended tbl 24 entries set
723 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
724 lpm->tbl24[i].depth <= depth)) {
726 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
727 { .next_hop = next_hop, },
733 /* Setting tbl24 entry in one go to avoid race
736 lpm->tbl24[i] = new_tbl24_entry;
741 if (lpm->tbl24[i].valid_group == 1) {
742 /* If tbl24 entry is valid and extended calculate the
745 tbl8_index = lpm->tbl24[i].group_idx *
746 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
747 tbl8_group_end = tbl8_index +
748 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
750 for (j = tbl8_index; j < tbl8_group_end; j++) {
751 if (!lpm->tbl8[j].valid ||
752 lpm->tbl8[j].depth <= depth) {
753 struct rte_lpm_tbl_entry_v20
756 .valid_group = VALID,
758 .next_hop = next_hop,
762 * Setting tbl8 entry in one go to avoid
765 lpm->tbl8[j] = new_tbl8_entry;
776 static inline int32_t
777 add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
780 #define group_idx next_hop
781 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
783 /* Calculate the index into Table24. */
784 tbl24_index = ip >> 8;
785 tbl24_range = depth_to_range(depth);
787 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
789 * For invalid OR valid and non-extended tbl 24 entries set
792 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
793 lpm->tbl24[i].depth <= depth)) {
795 struct rte_lpm_tbl_entry new_tbl24_entry = {
796 .next_hop = next_hop,
802 /* Setting tbl24 entry in one go to avoid race
805 lpm->tbl24[i] = new_tbl24_entry;
810 if (lpm->tbl24[i].valid_group == 1) {
811 /* If tbl24 entry is valid and extended calculate the
814 tbl8_index = lpm->tbl24[i].group_idx *
815 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
816 tbl8_group_end = tbl8_index +
817 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
819 for (j = tbl8_index; j < tbl8_group_end; j++) {
820 if (!lpm->tbl8[j].valid ||
821 lpm->tbl8[j].depth <= depth) {
822 struct rte_lpm_tbl_entry
825 .valid_group = VALID,
827 .next_hop = next_hop,
831 * Setting tbl8 entry in one go to avoid
834 lpm->tbl8[j] = new_tbl8_entry;
845 static inline int32_t
846 add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
849 uint32_t tbl24_index;
850 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
853 tbl24_index = (ip_masked >> 8);
854 tbl8_range = depth_to_range(depth);
856 if (!lpm->tbl24[tbl24_index].valid) {
857 /* Search for a free tbl8 group. */
858 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
860 /* Check tbl8 allocation was successful. */
861 if (tbl8_group_index < 0) {
862 return tbl8_group_index;
865 /* Find index into tbl8 and range. */
866 tbl8_index = (tbl8_group_index *
867 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
870 /* Set tbl8 entry. */
871 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
872 lpm->tbl8[i].depth = depth;
873 lpm->tbl8[i].next_hop = next_hop;
874 lpm->tbl8[i].valid = VALID;
878 * Update tbl24 entry to point to new tbl8 entry. Note: The
879 * ext_flag and tbl8_index need to be updated simultaneously,
880 * so assign whole structure in one go
883 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
884 { .group_idx = (uint8_t)tbl8_group_index, },
890 lpm->tbl24[tbl24_index] = new_tbl24_entry;
892 } /* If valid entry but not extended calculate the index into Table8. */
893 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
894 /* Search for free tbl8 group. */
895 tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
897 if (tbl8_group_index < 0) {
898 return tbl8_group_index;
901 tbl8_group_start = tbl8_group_index *
902 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
903 tbl8_group_end = tbl8_group_start +
904 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
906 /* Populate new tbl8 with tbl24 value. */
907 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
908 lpm->tbl8[i].valid = VALID;
909 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
910 lpm->tbl8[i].next_hop =
911 lpm->tbl24[tbl24_index].next_hop;
914 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
916 /* Insert new rule into the tbl8 entry. */
917 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
918 if (!lpm->tbl8[i].valid ||
919 lpm->tbl8[i].depth <= depth) {
920 lpm->tbl8[i].valid = VALID;
921 lpm->tbl8[i].depth = depth;
922 lpm->tbl8[i].next_hop = next_hop;
929 * Update tbl24 entry to point to new tbl8 entry. Note: The
930 * ext_flag and tbl8_index need to be updated simultaneously,
931 * so assign whole structure in one go.
934 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
935 { .group_idx = (uint8_t)tbl8_group_index, },
941 lpm->tbl24[tbl24_index] = new_tbl24_entry;
944 * If it is valid, extended entry calculate the index into tbl8.
946 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
947 tbl8_group_start = tbl8_group_index *
948 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
949 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
951 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
953 if (!lpm->tbl8[i].valid ||
954 lpm->tbl8[i].depth <= depth) {
955 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
958 .next_hop = next_hop,
959 .valid_group = lpm->tbl8[i].valid_group,
963 * Setting tbl8 entry in one go to avoid race
966 lpm->tbl8[i] = new_tbl8_entry;
976 static inline int32_t
977 add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
980 #define group_idx next_hop
981 uint32_t tbl24_index;
982 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
985 tbl24_index = (ip_masked >> 8);
986 tbl8_range = depth_to_range(depth);
988 if (!lpm->tbl24[tbl24_index].valid) {
989 /* Search for a free tbl8 group. */
990 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
992 /* Check tbl8 allocation was successful. */
993 if (tbl8_group_index < 0) {
994 return tbl8_group_index;
997 /* Find index into tbl8 and range. */
998 tbl8_index = (tbl8_group_index *
999 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
1002 /* Set tbl8 entry. */
1003 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1004 lpm->tbl8[i].depth = depth;
1005 lpm->tbl8[i].next_hop = next_hop;
1006 lpm->tbl8[i].valid = VALID;
1010 * Update tbl24 entry to point to new tbl8 entry. Note: The
1011 * ext_flag and tbl8_index need to be updated simultaneously,
1012 * so assign whole structure in one go
1015 struct rte_lpm_tbl_entry new_tbl24_entry = {
1016 .group_idx = (uint8_t)tbl8_group_index,
1022 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1024 } /* If valid entry but not extended calculate the index into Table8. */
1025 else if (lpm->tbl24[tbl24_index].valid_group == 0) {
1026 /* Search for free tbl8 group. */
1027 tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
1029 if (tbl8_group_index < 0) {
1030 return tbl8_group_index;
1033 tbl8_group_start = tbl8_group_index *
1034 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1035 tbl8_group_end = tbl8_group_start +
1036 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1038 /* Populate new tbl8 with tbl24 value. */
1039 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
1040 lpm->tbl8[i].valid = VALID;
1041 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
1042 lpm->tbl8[i].next_hop =
1043 lpm->tbl24[tbl24_index].next_hop;
1046 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1048 /* Insert new rule into the tbl8 entry. */
1049 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
1050 if (!lpm->tbl8[i].valid ||
1051 lpm->tbl8[i].depth <= depth) {
1052 lpm->tbl8[i].valid = VALID;
1053 lpm->tbl8[i].depth = depth;
1054 lpm->tbl8[i].next_hop = next_hop;
1061 * Update tbl24 entry to point to new tbl8 entry. Note: The
1062 * ext_flag and tbl8_index need to be updated simultaneously,
1063 * so assign whole structure in one go.
1066 struct rte_lpm_tbl_entry new_tbl24_entry = {
1067 .group_idx = (uint8_t)tbl8_group_index,
1073 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1076 * If it is valid, extended entry calculate the index into tbl8.
1078 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1079 tbl8_group_start = tbl8_group_index *
1080 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1081 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1083 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1085 if (!lpm->tbl8[i].valid ||
1086 lpm->tbl8[i].depth <= depth) {
1087 struct rte_lpm_tbl_entry new_tbl8_entry = {
1090 .next_hop = next_hop,
1091 .valid_group = lpm->tbl8[i].valid_group,
1095 * Setting tbl8 entry in one go to avoid race
1098 lpm->tbl8[i] = new_tbl8_entry;
1112 rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1115 int32_t rule_index, status = 0;
1118 /* Check user arguments. */
1119 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1122 ip_masked = ip & depth_to_mask(depth);
1124 /* Add the rule to the rule table. */
1125 rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
1127 /* If the is no space available for new rule return error. */
1128 if (rule_index < 0) {
1132 if (depth <= MAX_DEPTH_TBL24) {
1133 status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
1134 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1135 status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
1138 * If add fails due to exhaustion of tbl8 extensions delete
1139 * rule that was added to rule table.
1142 rule_delete_v20(lpm, rule_index, depth);
1150 VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
1153 rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1156 int32_t rule_index, status = 0;
1159 /* Check user arguments. */
1160 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1163 ip_masked = ip & depth_to_mask(depth);
1165 /* Add the rule to the rule table. */
1166 rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
1168 /* If the is no space available for new rule return error. */
1169 if (rule_index < 0) {
1173 if (depth <= MAX_DEPTH_TBL24) {
1174 status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
1175 } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
1176 status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
1179 * If add fails due to exhaustion of tbl8 extensions delete
1180 * rule that was added to rule table.
1183 rule_delete_v1604(lpm, rule_index, depth);
1191 BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
1192 MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
1193 uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
1196 * Look for a rule in the high-level rules table
1199 rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1205 /* Check user arguments. */
1206 if ((lpm == NULL) ||
1207 (next_hop == NULL) ||
1208 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1211 /* Look for the rule using rule_find. */
1212 ip_masked = ip & depth_to_mask(depth);
1213 rule_index = rule_find_v20(lpm, ip_masked, depth);
1215 if (rule_index >= 0) {
1216 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1220 /* If rule is not found return 0. */
1223 VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
1226 rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1232 /* Check user arguments. */
1233 if ((lpm == NULL) ||
1234 (next_hop == NULL) ||
1235 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
1238 /* Look for the rule using rule_find. */
1239 ip_masked = ip & depth_to_mask(depth);
1240 rule_index = rule_find_v1604(lpm, ip_masked, depth);
1242 if (rule_index >= 0) {
1243 *next_hop = lpm->rules_tbl[rule_index].next_hop;
1247 /* If rule is not found return 0. */
1250 BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
1251 MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
1252 uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
1254 static inline int32_t
1255 find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
1256 uint8_t *sub_rule_depth)
1262 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1263 ip_masked = ip & depth_to_mask(prev_depth);
1265 rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
1267 if (rule_index >= 0) {
1268 *sub_rule_depth = prev_depth;
1276 static inline int32_t
1277 find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1278 uint8_t *sub_rule_depth)
1284 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
1285 ip_masked = ip & depth_to_mask(prev_depth);
1287 rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
1289 if (rule_index >= 0) {
1290 *sub_rule_depth = prev_depth;
1298 static inline int32_t
1299 delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1300 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1302 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1304 /* Calculate the range and index into Table24. */
1305 tbl24_range = depth_to_range(depth);
1306 tbl24_index = (ip_masked >> 8);
1309 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1310 * and a positive number indicates a sub_rule_index.
1312 if (sub_rule_index < 0) {
1314 * If no replacement rule exists then invalidate entries
1315 * associated with this rule.
1317 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1319 if (lpm->tbl24[i].valid_group == 0 &&
1320 lpm->tbl24[i].depth <= depth) {
1321 lpm->tbl24[i].valid = INVALID;
1322 } else if (lpm->tbl24[i].valid_group == 1) {
1324 * If TBL24 entry is extended, then there has
1325 * to be a rule with depth >= 25 in the
1326 * associated TBL8 group.
1329 tbl8_group_index = lpm->tbl24[i].group_idx;
1330 tbl8_index = tbl8_group_index *
1331 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1333 for (j = tbl8_index; j < (tbl8_index +
1334 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1336 if (lpm->tbl8[j].depth <= depth)
1337 lpm->tbl8[j].valid = INVALID;
1343 * If a replacement rule exists then modify entries
1344 * associated with this rule.
1347 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1348 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
1351 .depth = sub_rule_depth,
1354 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1356 .valid_group = VALID,
1357 .depth = sub_rule_depth,
1358 .next_hop = lpm->rules_tbl
1359 [sub_rule_index].next_hop,
1362 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1364 if (lpm->tbl24[i].valid_group == 0 &&
1365 lpm->tbl24[i].depth <= depth) {
1366 lpm->tbl24[i] = new_tbl24_entry;
1367 } else if (lpm->tbl24[i].valid_group == 1) {
1369 * If TBL24 entry is extended, then there has
1370 * to be a rule with depth >= 25 in the
1371 * associated TBL8 group.
1374 tbl8_group_index = lpm->tbl24[i].group_idx;
1375 tbl8_index = tbl8_group_index *
1376 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1378 for (j = tbl8_index; j < (tbl8_index +
1379 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1381 if (lpm->tbl8[j].depth <= depth)
1382 lpm->tbl8[j] = new_tbl8_entry;
1391 static inline int32_t
1392 delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1393 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1395 #define group_idx next_hop
1396 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
1398 /* Calculate the range and index into Table24. */
1399 tbl24_range = depth_to_range(depth);
1400 tbl24_index = (ip_masked >> 8);
1403 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
1404 * and a positive number indicates a sub_rule_index.
1406 if (sub_rule_index < 0) {
1408 * If no replacement rule exists then invalidate entries
1409 * associated with this rule.
1411 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1413 if (lpm->tbl24[i].valid_group == 0 &&
1414 lpm->tbl24[i].depth <= depth) {
1415 lpm->tbl24[i].valid = INVALID;
1416 } else if (lpm->tbl24[i].valid_group == 1) {
1418 * If TBL24 entry is extended, then there has
1419 * to be a rule with depth >= 25 in the
1420 * associated TBL8 group.
1423 tbl8_group_index = lpm->tbl24[i].group_idx;
1424 tbl8_index = tbl8_group_index *
1425 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1427 for (j = tbl8_index; j < (tbl8_index +
1428 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1430 if (lpm->tbl8[j].depth <= depth)
1431 lpm->tbl8[j].valid = INVALID;
1437 * If a replacement rule exists then modify entries
1438 * associated with this rule.
1441 struct rte_lpm_tbl_entry new_tbl24_entry = {
1442 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1445 .depth = sub_rule_depth,
1448 struct rte_lpm_tbl_entry new_tbl8_entry = {
1450 .valid_group = VALID,
1451 .depth = sub_rule_depth,
1452 .next_hop = lpm->rules_tbl
1453 [sub_rule_index].next_hop,
1456 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
1458 if (lpm->tbl24[i].valid_group == 0 &&
1459 lpm->tbl24[i].depth <= depth) {
1460 lpm->tbl24[i] = new_tbl24_entry;
1461 } else if (lpm->tbl24[i].valid_group == 1) {
1463 * If TBL24 entry is extended, then there has
1464 * to be a rule with depth >= 25 in the
1465 * associated TBL8 group.
1468 tbl8_group_index = lpm->tbl24[i].group_idx;
1469 tbl8_index = tbl8_group_index *
1470 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1472 for (j = tbl8_index; j < (tbl8_index +
1473 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
1475 if (lpm->tbl8[j].depth <= depth)
1476 lpm->tbl8[j] = new_tbl8_entry;
1486 * Checks if table 8 group can be recycled.
1488 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1489 * Return of -EINVAL means tbl8 is empty and thus can be recycled
1490 * Return of value > -1 means tbl8 is in use but has all the same values and
1491 * thus can be recycled
1493 static inline int32_t
1494 tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
1495 uint32_t tbl8_group_start)
1497 uint32_t tbl8_group_end, i;
1498 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1501 * Check the first entry of the given tbl8. If it is invalid we know
1502 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1503 * (As they would affect all entries in a tbl8) and thus this table
1504 * can not be recycled.
1506 if (tbl8[tbl8_group_start].valid) {
1508 * If first entry is valid check if the depth is less than 24
1509 * and if so check the rest of the entries to verify that they
1510 * are all of this depth.
1512 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1513 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1516 if (tbl8[i].depth !=
1517 tbl8[tbl8_group_start].depth) {
1522 /* If all entries are the same return the tb8 index */
1523 return tbl8_group_start;
1529 * If the first entry is invalid check if the rest of the entries in
1530 * the tbl8 are invalid.
1532 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1536 /* If no valid entries are found then return -EINVAL. */
1540 static inline int32_t
1541 tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
1542 uint32_t tbl8_group_start)
1544 uint32_t tbl8_group_end, i;
1545 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1548 * Check the first entry of the given tbl8. If it is invalid we know
1549 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1550 * (As they would affect all entries in a tbl8) and thus this table
1551 * can not be recycled.
1553 if (tbl8[tbl8_group_start].valid) {
1555 * If first entry is valid check if the depth is less than 24
1556 * and if so check the rest of the entries to verify that they
1557 * are all of this depth.
1559 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
1560 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1563 if (tbl8[i].depth !=
1564 tbl8[tbl8_group_start].depth) {
1569 /* If all entries are the same return the tb8 index */
1570 return tbl8_group_start;
1576 * If the first entry is invalid check if the rest of the entries in
1577 * the tbl8 are invalid.
1579 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1583 /* If no valid entries are found then return -EINVAL. */
1587 static inline int32_t
1588 delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
1589 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1591 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1593 int32_t tbl8_recycle_index;
1596 * Calculate the index into tbl24 and range. Note: All depths larger
1597 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1599 tbl24_index = ip_masked >> 8;
1601 /* Calculate the index into tbl8 and range. */
1602 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1603 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1604 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1605 tbl8_range = depth_to_range(depth);
1607 if (sub_rule_index < 0) {
1609 * Loop through the range of entries on tbl8 for which the
1610 * rule_to_delete must be removed or modified.
1612 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1613 if (lpm->tbl8[i].depth <= depth)
1614 lpm->tbl8[i].valid = INVALID;
1617 /* Set new tbl8 entry. */
1618 struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
1620 .depth = sub_rule_depth,
1621 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1622 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1626 * Loop through the range of entries on tbl8 for which the
1627 * rule_to_delete must be modified.
1629 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1630 if (lpm->tbl8[i].depth <= depth)
1631 lpm->tbl8[i] = new_tbl8_entry;
1636 * Check if there are any valid entries in this tbl8 group. If all
1637 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1638 * associated tbl24 entry.
1641 tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
1643 if (tbl8_recycle_index == -EINVAL) {
1644 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1645 lpm->tbl24[tbl24_index].valid = 0;
1646 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1647 } else if (tbl8_recycle_index > -1) {
1648 /* Update tbl24 entry. */
1649 struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
1650 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
1653 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1656 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1657 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1658 tbl8_free_v20(lpm->tbl8, tbl8_group_start);
1664 static inline int32_t
1665 delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
1666 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1668 #define group_idx next_hop
1669 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1671 int32_t tbl8_recycle_index;
1674 * Calculate the index into tbl24 and range. Note: All depths larger
1675 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1677 tbl24_index = ip_masked >> 8;
1679 /* Calculate the index into tbl8 and range. */
1680 tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
1681 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1682 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1683 tbl8_range = depth_to_range(depth);
1685 if (sub_rule_index < 0) {
1687 * Loop through the range of entries on tbl8 for which the
1688 * rule_to_delete must be removed or modified.
1690 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1691 if (lpm->tbl8[i].depth <= depth)
1692 lpm->tbl8[i].valid = INVALID;
1695 /* Set new tbl8 entry. */
1696 struct rte_lpm_tbl_entry new_tbl8_entry = {
1698 .depth = sub_rule_depth,
1699 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
1700 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
1704 * Loop through the range of entries on tbl8 for which the
1705 * rule_to_delete must be modified.
1707 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1708 if (lpm->tbl8[i].depth <= depth)
1709 lpm->tbl8[i] = new_tbl8_entry;
1714 * Check if there are any valid entries in this tbl8 group. If all
1715 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1716 * associated tbl24 entry.
1719 tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
1721 if (tbl8_recycle_index == -EINVAL) {
1722 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1723 lpm->tbl24[tbl24_index].valid = 0;
1724 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1725 } else if (tbl8_recycle_index > -1) {
1726 /* Update tbl24 entry. */
1727 struct rte_lpm_tbl_entry new_tbl24_entry = {
1728 .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1731 .depth = lpm->tbl8[tbl8_recycle_index].depth,
1734 /* Set tbl24 before freeing tbl8 to avoid race condition. */
1735 lpm->tbl24[tbl24_index] = new_tbl24_entry;
1736 tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
1746 rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
1748 int32_t rule_to_delete_index, sub_rule_index;
1750 uint8_t sub_rule_depth;
1752 * Check input arguments. Note: IP must be a positive integer of 32
1753 * bits in length therefore it need not be checked.
1755 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1759 ip_masked = ip & depth_to_mask(depth);
1762 * Find the index of the input rule, that needs to be deleted, in the
1765 rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
1768 * Check if rule_to_delete_index was found. If no rule was found the
1769 * function rule_find returns -EINVAL.
1771 if (rule_to_delete_index < 0)
1774 /* Delete the rule from the rule table. */
1775 rule_delete_v20(lpm, rule_to_delete_index, depth);
1778 * Find rule to replace the rule_to_delete. If there is no rule to
1779 * replace the rule_to_delete we return -1 and invalidate the table
1780 * entries associated with this rule.
1783 sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
1786 * If the input depth value is less than 25 use function
1787 * delete_depth_small otherwise use delete_depth_big.
1789 if (depth <= MAX_DEPTH_TBL24) {
1790 return delete_depth_small_v20(lpm, ip_masked, depth,
1791 sub_rule_index, sub_rule_depth);
1792 } else { /* If depth > MAX_DEPTH_TBL24 */
1793 return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
1797 VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
1800 rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1802 int32_t rule_to_delete_index, sub_rule_index;
1804 uint8_t sub_rule_depth;
1806 * Check input arguments. Note: IP must be a positive integer of 32
1807 * bits in length therefore it need not be checked.
1809 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1813 ip_masked = ip & depth_to_mask(depth);
1816 * Find the index of the input rule, that needs to be deleted, in the
1819 rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
1822 * Check if rule_to_delete_index was found. If no rule was found the
1823 * function rule_find returns -EINVAL.
1825 if (rule_to_delete_index < 0)
1828 /* Delete the rule from the rule table. */
1829 rule_delete_v1604(lpm, rule_to_delete_index, depth);
1832 * Find rule to replace the rule_to_delete. If there is no rule to
1833 * replace the rule_to_delete we return -1 and invalidate the table
1834 * entries associated with this rule.
1837 sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
1840 * If the input depth value is less than 25 use function
1841 * delete_depth_small otherwise use delete_depth_big.
1843 if (depth <= MAX_DEPTH_TBL24) {
1844 return delete_depth_small_v1604(lpm, ip_masked, depth,
1845 sub_rule_index, sub_rule_depth);
1846 } else { /* If depth > MAX_DEPTH_TBL24 */
1847 return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
1851 BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
1852 MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
1853 uint8_t depth), rte_lpm_delete_v1604);
1856 * Delete all rules from the LPM table.
1859 rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
1861 /* Zero rule information. */
1862 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1865 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1868 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1870 /* Delete all rules form the rules table. */
1871 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1873 VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
1876 rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
1878 /* Zero rule information. */
1879 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1882 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1885 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1887 /* Delete all rules form the rules table. */
1888 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1890 BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
1891 MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
1892 rte_lpm_delete_all_v1604);