4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_common.h>
45 #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
46 #include <rte_malloc.h>
47 #include <rte_memzone.h>
49 #include <rte_eal_memconfig.h>
50 #include <rte_per_lcore.h>
51 #include <rte_string_fns.h>
52 #include <rte_errno.h>
53 #include <rte_rwlock.h>
54 #include <rte_spinlock.h>
58 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
60 #define MAX_DEPTH_TBL24 24
67 /* Macro to enable/disable run-time checks. */
68 #if defined(RTE_LIBRTE_LPM_DEBUG)
69 #include <rte_debug.h>
70 #define VERIFY_DEPTH(depth) do { \
71 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
72 rte_panic("LPM: Invalid depth (%u) at line %d", \
73 (unsigned)(depth), __LINE__); \
76 #define VERIFY_DEPTH(depth)
80 * Converts a given depth value to its corresponding mask value.
82 * depth (IN) : range = 1 - 32
83 * mask (OUT) : 32bit mask
85 static uint32_t __attribute__((pure))
86 depth_to_mask(uint8_t depth)
90 /* To calculate a mask start with a 1 on the left hand side and right
91 * shift while populating the left hand side with 1's
93 return (int)0x80000000 >> (depth - 1);
97 * Converts given depth value to its corresponding range value.
99 static inline uint32_t __attribute__((pure))
100 depth_to_range(uint8_t depth)
105 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
107 if (depth <= MAX_DEPTH_TBL24)
108 return 1 << (MAX_DEPTH_TBL24 - depth);
110 /* Else if depth is greater than 24 */
111 return (1 << (RTE_LPM_MAX_DEPTH - depth));
115 * Find an existing lpm table and return a pointer to it.
118 rte_lpm_find_existing(const char *name)
120 struct rte_lpm *l = NULL;
121 struct rte_tailq_entry *te;
122 struct rte_lpm_list *lpm_list;
124 /* check that we have an initialised tail queue */
125 if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
126 rte_lpm_list)) == NULL) {
127 rte_errno = E_RTE_NO_TAILQ;
131 rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
132 TAILQ_FOREACH(te, lpm_list, next) {
133 l = (struct rte_lpm *) te->data;
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
137 rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
148 * Allocates memory for LPM object
151 rte_lpm_create(const char *name, int socket_id, int max_rules,
152 __rte_unused int flags)
154 char mem_name[RTE_LPM_NAMESIZE];
155 struct rte_lpm *lpm = NULL;
156 struct rte_tailq_entry *te;
158 struct rte_lpm_list *lpm_list;
160 /* check that we have an initialised tail queue */
161 if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
162 rte_lpm_list)) == NULL) {
163 rte_errno = E_RTE_NO_TAILQ;
167 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
168 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
170 /* Check user arguments. */
171 if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
176 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
178 /* Determine the amount of memory to allocate. */
179 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
181 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
183 /* guarantee there's no existing */
184 TAILQ_FOREACH(te, lpm_list, next) {
185 lpm = (struct rte_lpm *) te->data;
186 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
192 /* allocate tailq entry */
193 te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
195 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
199 /* Allocate memory to store the LPM data structures. */
200 lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
201 RTE_CACHE_LINE_SIZE, socket_id);
203 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
208 /* Save user arguments. */
209 lpm->max_rules = max_rules;
210 snprintf(lpm->name, sizeof(lpm->name), "%s", name);
212 te->data = (void *) lpm;
214 TAILQ_INSERT_TAIL(lpm_list, te, next);
217 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
223 * Deallocates memory for given LPM table.
226 rte_lpm_free(struct rte_lpm *lpm)
228 struct rte_lpm_list *lpm_list;
229 struct rte_tailq_entry *te;
231 /* Check user arguments. */
235 /* check that we have an initialised tail queue */
237 RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
238 rte_errno = E_RTE_NO_TAILQ;
242 rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
244 /* find our tailq entry */
245 TAILQ_FOREACH(te, lpm_list, next) {
246 if (te->data == (void *) lpm)
250 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
254 TAILQ_REMOVE(lpm_list, te, next);
256 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
263 * Adds a rule to the rule table.
265 * NOTE: The rule table is split into 32 groups. Each group contains rules that
266 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
267 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
268 * to refer to depth 1 because even though the depth range is 1 - 32, depths
269 * are stored in the rule table from 0 - 31.
270 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
272 static inline int32_t
273 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
276 uint32_t rule_gindex, rule_index, last_rule;
281 /* Scan through rule group to see if rule already exists. */
282 if (lpm->rule_info[depth - 1].used_rules > 0) {
284 /* rule_gindex stands for rule group index. */
285 rule_gindex = lpm->rule_info[depth - 1].first_rule;
286 /* Initialise rule_index to point to start of rule group. */
287 rule_index = rule_gindex;
288 /* Last rule = Last used rule in this rule group. */
289 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
291 for (; rule_index < last_rule; rule_index++) {
293 /* If rule already exists update its next_hop and return. */
294 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
295 lpm->rules_tbl[rule_index].next_hop = next_hop;
301 if (rule_index == lpm->max_rules)
304 /* Calculate the position in which the rule will be stored. */
307 for (i = depth - 1; i > 0; i--) {
308 if (lpm->rule_info[i - 1].used_rules > 0) {
309 rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
313 if (rule_index == lpm->max_rules)
316 lpm->rule_info[depth - 1].first_rule = rule_index;
319 /* Make room for the new rule in the array. */
320 for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
321 if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
324 if (lpm->rule_info[i - 1].used_rules > 0) {
325 lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
326 = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
327 lpm->rule_info[i - 1].first_rule++;
331 /* Add the new rule. */
332 lpm->rules_tbl[rule_index].ip = ip_masked;
333 lpm->rules_tbl[rule_index].next_hop = next_hop;
335 /* Increment the used rules counter for this rule group. */
336 lpm->rule_info[depth - 1].used_rules++;
342 * Delete a rule from the rule table.
343 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
346 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
352 lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
353 + lpm->rule_info[depth - 1].used_rules - 1];
355 for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
356 if (lpm->rule_info[i].used_rules > 0) {
357 lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
358 lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
359 lpm->rule_info[i].first_rule--;
363 lpm->rule_info[depth - 1].used_rules--;
367 * Finds a rule in rule table.
368 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
370 static inline int32_t
371 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
373 uint32_t rule_gindex, last_rule, rule_index;
377 rule_gindex = lpm->rule_info[depth - 1].first_rule;
378 last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
380 /* Scan used rules at given depth to find rule. */
381 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
382 /* If rule is found return the rule index. */
383 if (lpm->rules_tbl[rule_index].ip == ip_masked)
387 /* If rule is not found return -E_RTE_NO_TAILQ. */
388 return -E_RTE_NO_TAILQ;
392 * Find, clean and allocate a tbl8.
394 static inline int32_t
395 tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
397 uint32_t tbl8_gindex; /* tbl8 group index. */
398 struct rte_lpm_tbl8_entry *tbl8_entry;
400 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
401 for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
403 tbl8_entry = &tbl8[tbl8_gindex *
404 RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
405 /* If a free tbl8 group is found clean it and set as VALID. */
406 if (!tbl8_entry->valid_group) {
407 memset(&tbl8_entry[0], 0,
408 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
409 sizeof(tbl8_entry[0]));
411 tbl8_entry->valid_group = VALID;
413 /* Return group index for allocated tbl8 group. */
418 /* If there are no tbl8 groups free then return error. */
423 tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
425 /* Set tbl8 group invalid*/
426 tbl8[tbl8_group_start].valid_group = INVALID;
429 static inline int32_t
430 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
433 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
435 /* Calculate the index into Table24. */
436 tbl24_index = ip >> 8;
437 tbl24_range = depth_to_range(depth);
439 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
441 * For invalid OR valid and non-extended tbl 24 entries set
444 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
445 lpm->tbl24[i].depth <= depth)) {
447 struct rte_lpm_tbl24_entry new_tbl24_entry = {
448 { .next_hop = next_hop, },
454 /* Setting tbl24 entry in one go to avoid race
456 lpm->tbl24[i] = new_tbl24_entry;
461 /* If tbl24 entry is valid and extended calculate the index
463 tbl8_index = lpm->tbl24[i].tbl8_gindex *
464 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
465 tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
467 for (j = tbl8_index; j < tbl8_group_end; j++) {
468 if (!lpm->tbl8[j].valid ||
469 lpm->tbl8[j].depth <= depth) {
470 struct rte_lpm_tbl8_entry new_tbl8_entry = {
472 .valid_group = VALID,
474 .next_hop = next_hop,
478 * Setting tbl8 entry in one go to avoid race
481 lpm->tbl8[j] = new_tbl8_entry;
491 static inline int32_t
492 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
495 uint32_t tbl24_index;
496 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
499 tbl24_index = (ip_masked >> 8);
500 tbl8_range = depth_to_range(depth);
502 if (!lpm->tbl24[tbl24_index].valid) {
503 /* Search for a free tbl8 group. */
504 tbl8_group_index = tbl8_alloc(lpm->tbl8);
506 /* Check tbl8 allocation was successful. */
507 if (tbl8_group_index < 0) {
508 return tbl8_group_index;
511 /* Find index into tbl8 and range. */
512 tbl8_index = (tbl8_group_index *
513 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
516 /* Set tbl8 entry. */
517 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
518 lpm->tbl8[i].depth = depth;
519 lpm->tbl8[i].next_hop = next_hop;
520 lpm->tbl8[i].valid = VALID;
524 * Update tbl24 entry to point to new tbl8 entry. Note: The
525 * ext_flag and tbl8_index need to be updated simultaneously,
526 * so assign whole structure in one go
529 struct rte_lpm_tbl24_entry new_tbl24_entry = {
530 { .tbl8_gindex = (uint8_t)tbl8_group_index, },
536 lpm->tbl24[tbl24_index] = new_tbl24_entry;
538 }/* If valid entry but not extended calculate the index into Table8. */
539 else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
540 /* Search for free tbl8 group. */
541 tbl8_group_index = tbl8_alloc(lpm->tbl8);
543 if (tbl8_group_index < 0) {
544 return tbl8_group_index;
547 tbl8_group_start = tbl8_group_index *
548 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
549 tbl8_group_end = tbl8_group_start +
550 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
552 /* Populate new tbl8 with tbl24 value. */
553 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
554 lpm->tbl8[i].valid = VALID;
555 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
556 lpm->tbl8[i].next_hop =
557 lpm->tbl24[tbl24_index].next_hop;
560 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
562 /* Insert new rule into the tbl8 entry. */
563 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
564 if (!lpm->tbl8[i].valid ||
565 lpm->tbl8[i].depth <= depth) {
566 lpm->tbl8[i].valid = VALID;
567 lpm->tbl8[i].depth = depth;
568 lpm->tbl8[i].next_hop = next_hop;
575 * Update tbl24 entry to point to new tbl8 entry. Note: The
576 * ext_flag and tbl8_index need to be updated simultaneously,
577 * so assign whole structure in one go.
580 struct rte_lpm_tbl24_entry new_tbl24_entry = {
581 { .tbl8_gindex = (uint8_t)tbl8_group_index, },
587 lpm->tbl24[tbl24_index] = new_tbl24_entry;
591 * If it is valid, extended entry calculate the index into tbl8.
593 tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
594 tbl8_group_start = tbl8_group_index *
595 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
596 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
598 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
600 if (!lpm->tbl8[i].valid ||
601 lpm->tbl8[i].depth <= depth) {
602 struct rte_lpm_tbl8_entry new_tbl8_entry = {
605 .next_hop = next_hop,
606 .valid_group = lpm->tbl8[i].valid_group,
610 * Setting tbl8 entry in one go to avoid race
613 lpm->tbl8[i] = new_tbl8_entry;
627 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
630 int32_t rule_index, status = 0;
633 /* Check user arguments. */
634 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
637 ip_masked = ip & depth_to_mask(depth);
639 /* Add the rule to the rule table. */
640 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
642 /* If the is no space available for new rule return error. */
643 if (rule_index < 0) {
647 if (depth <= MAX_DEPTH_TBL24) {
648 status = add_depth_small(lpm, ip_masked, depth, next_hop);
650 else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
651 status = add_depth_big(lpm, ip_masked, depth, next_hop);
654 * If add fails due to exhaustion of tbl8 extensions delete
655 * rule that was added to rule table.
658 rule_delete(lpm, rule_index, depth);
668 * Look for a rule in the high-level rules table
671 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
677 /* Check user arguments. */
679 (next_hop == NULL) ||
680 (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
683 /* Look for the rule using rule_find. */
684 ip_masked = ip & depth_to_mask(depth);
685 rule_index = rule_find(lpm, ip_masked, depth);
687 if (rule_index >= 0) {
688 *next_hop = lpm->rules_tbl[rule_index].next_hop;
692 /* If rule is not found return 0. */
696 static inline int32_t
697 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
703 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
704 ip_masked = ip & depth_to_mask(prev_depth);
706 rule_index = rule_find(lpm, ip_masked, prev_depth);
708 if (rule_index >= 0) {
709 *sub_rule_depth = prev_depth;
717 static inline int32_t
718 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
719 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
721 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
723 /* Calculate the range and index into Table24. */
724 tbl24_range = depth_to_range(depth);
725 tbl24_index = (ip_masked >> 8);
728 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
729 * and a positive number indicates a sub_rule_index.
731 if (sub_rule_index < 0) {
733 * If no replacement rule exists then invalidate entries
734 * associated with this rule.
736 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
738 if (lpm->tbl24[i].ext_entry == 0 &&
739 lpm->tbl24[i].depth <= depth ) {
740 lpm->tbl24[i].valid = INVALID;
744 * If TBL24 entry is extended, then there has
745 * to be a rule with depth >= 25 in the
746 * associated TBL8 group.
749 tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
750 tbl8_index = tbl8_group_index *
751 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
753 for (j = tbl8_index; j < (tbl8_index +
754 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
756 if (lpm->tbl8[j].depth <= depth)
757 lpm->tbl8[j].valid = INVALID;
764 * If a replacement rule exists then modify entries
765 * associated with this rule.
768 struct rte_lpm_tbl24_entry new_tbl24_entry = {
769 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
772 .depth = sub_rule_depth,
775 struct rte_lpm_tbl8_entry new_tbl8_entry = {
777 .depth = sub_rule_depth,
778 .next_hop = lpm->rules_tbl
779 [sub_rule_index].next_hop,
782 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
784 if (lpm->tbl24[i].ext_entry == 0 &&
785 lpm->tbl24[i].depth <= depth ) {
786 lpm->tbl24[i] = new_tbl24_entry;
790 * If TBL24 entry is extended, then there has
791 * to be a rule with depth >= 25 in the
792 * associated TBL8 group.
795 tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
796 tbl8_index = tbl8_group_index *
797 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
799 for (j = tbl8_index; j < (tbl8_index +
800 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
802 if (lpm->tbl8[j].depth <= depth)
803 lpm->tbl8[j] = new_tbl8_entry;
813 * Checks if table 8 group can be recycled.
815 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
816 * Return of -EINVAL means tbl8 is empty and thus can be recycled
817 * Return of value > -1 means tbl8 is in use but has all the same values and
818 * thus can be recycled
820 static inline int32_t
821 tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
823 uint32_t tbl8_group_end, i;
824 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
827 * Check the first entry of the given tbl8. If it is invalid we know
828 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
829 * (As they would affect all entries in a tbl8) and thus this table
830 * can not be recycled.
832 if (tbl8[tbl8_group_start].valid) {
834 * If first entry is valid check if the depth is less than 24
835 * and if so check the rest of the entries to verify that they
836 * are all of this depth.
838 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
839 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
843 tbl8[tbl8_group_start].depth) {
848 /* If all entries are the same return the tb8 index */
849 return tbl8_group_start;
855 * If the first entry is invalid check if the rest of the entries in
856 * the tbl8 are invalid.
858 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
862 /* If no valid entries are found then return -EINVAL. */
866 static inline int32_t
867 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
868 uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
870 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
872 int32_t tbl8_recycle_index;
875 * Calculate the index into tbl24 and range. Note: All depths larger
876 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
878 tbl24_index = ip_masked >> 8;
880 /* Calculate the index into tbl8 and range. */
881 tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
882 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
883 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
884 tbl8_range = depth_to_range(depth);
886 if (sub_rule_index < 0) {
888 * Loop through the range of entries on tbl8 for which the
889 * rule_to_delete must be removed or modified.
891 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
892 if (lpm->tbl8[i].depth <= depth)
893 lpm->tbl8[i].valid = INVALID;
897 /* Set new tbl8 entry. */
898 struct rte_lpm_tbl8_entry new_tbl8_entry = {
900 .depth = sub_rule_depth,
901 .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
902 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
906 * Loop through the range of entries on tbl8 for which the
907 * rule_to_delete must be modified.
909 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
910 if (lpm->tbl8[i].depth <= depth)
911 lpm->tbl8[i] = new_tbl8_entry;
916 * Check if there are any valid entries in this tbl8 group. If all
917 * tbl8 entries are invalid we can free the tbl8 and invalidate the
918 * associated tbl24 entry.
921 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
923 if (tbl8_recycle_index == -EINVAL){
924 /* Set tbl24 before freeing tbl8 to avoid race condition. */
925 lpm->tbl24[tbl24_index].valid = 0;
926 tbl8_free(lpm->tbl8, tbl8_group_start);
928 else if (tbl8_recycle_index > -1) {
929 /* Update tbl24 entry. */
930 struct rte_lpm_tbl24_entry new_tbl24_entry = {
931 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
934 .depth = lpm->tbl8[tbl8_recycle_index].depth,
937 /* Set tbl24 before freeing tbl8 to avoid race condition. */
938 lpm->tbl24[tbl24_index] = new_tbl24_entry;
939 tbl8_free(lpm->tbl8, tbl8_group_start);
949 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
951 int32_t rule_to_delete_index, sub_rule_index;
953 uint8_t sub_rule_depth;
955 * Check input arguments. Note: IP must be a positive integer of 32
956 * bits in length therefore it need not be checked.
958 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
962 ip_masked = ip & depth_to_mask(depth);
965 * Find the index of the input rule, that needs to be deleted, in the
968 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
971 * Check if rule_to_delete_index was found. If no rule was found the
972 * function rule_find returns -E_RTE_NO_TAILQ.
974 if (rule_to_delete_index < 0)
975 return -E_RTE_NO_TAILQ;
977 /* Delete the rule from the rule table. */
978 rule_delete(lpm, rule_to_delete_index, depth);
981 * Find rule to replace the rule_to_delete. If there is no rule to
982 * replace the rule_to_delete we return -1 and invalidate the table
983 * entries associated with this rule.
986 sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
989 * If the input depth value is less than 25 use function
990 * delete_depth_small otherwise use delete_depth_big.
992 if (depth <= MAX_DEPTH_TBL24) {
993 return delete_depth_small(lpm, ip_masked, depth,
994 sub_rule_index, sub_rule_depth);
996 else { /* If depth > MAX_DEPTH_TBL24 */
997 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
1002 * Delete all rules from the LPM table.
1005 rte_lpm_delete_all(struct rte_lpm *lpm)
1007 /* Zero rule information. */
1008 memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1011 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1014 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
1016 /* Delete all rules form the rules table. */
1017 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);