4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
44 #include <rte_branch_prediction.h>
45 #include <rte_common.h>
46 #include <rte_memory.h> /* for definition of CACHE_LINE_SIZE */
47 #include <rte_malloc.h>
48 #include <rte_memzone.h>
49 #include <rte_tailq.h>
51 #include <rte_eal_memconfig.h>
52 #include <rte_per_lcore.h>
53 #include <rte_string_fns.h>
54 #include <rte_errno.h>
58 TAILQ_HEAD(rte_lpm_list, rte_lpm);
60 #define MAX_DEPTH_TBL24 24
67 /* Macro to enable/disable run-time checks. */
68 #if defined(RTE_LIBRTE_LPM_DEBUG)
69 #include <rte_debug.h>
70 #define VERIFY_DEPTH(depth) do { \
71 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
72 rte_panic("LPM: Invalid depth (%u) at line %d", depth, __LINE__); \
75 #define VERIFY_DEPTH(depth)
79 * Function Name: depth_to_mask
80 * Usage : Converts a given depth value to its corresponding mask value.
82 * depth (IN) : range = 1 - 32
83 * mask (OUT) : 32bit mask
85 static uint32_t __attribute__((pure))
86 depth_to_mask(uint8_t depth)
90 /* To calculate a mask start with a 1 on the left hand side and right
91 * shift while populating the left hand side with 1's
93 return (int)0x80000000 >> (depth - 1);
97 * Function Name: depth_to_range
98 * Usage : Converts given depth value to its corresponding range value.
103 static inline uint32_t __attribute__((pure))
104 depth_to_range(uint8_t depth)
109 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
111 if (depth <= MAX_DEPTH_TBL24)
112 return 1 << (MAX_DEPTH_TBL24 - depth);
114 /* Else if depth is greater than 24 */
115 return (1 << (RTE_LPM_MAX_DEPTH - depth));
119 * Find an existing lpm table and return a pointer to it.
122 rte_lpm_find_existing(const char *name)
125 struct rte_lpm_list *lpm_list;
127 /* check that we have an initialised tail queue */
128 if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
129 rte_errno = E_RTE_NO_TAILQ;
133 TAILQ_FOREACH(l, lpm_list, next) {
134 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
145 * Function Name : rte_lpm_create
146 * Usage : Allocates memory for LPM object
151 rte_lpm_create(const char *name, int socket_id, int max_rules,
154 char mem_name[RTE_LPM_NAMESIZE];
155 struct rte_lpm *lpm = NULL;
157 struct rte_lpm_list *lpm_list;
159 /* check that we have an initialised tail queue */
161 RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
162 rte_errno = E_RTE_NO_TAILQ;
166 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
167 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
169 /* Check user arguments. */
170 if ((name == NULL) || (socket_id < -1) || (max_rules == 0) ||
171 (mem_location != RTE_LPM_HEAP &&
172 mem_location != RTE_LPM_MEMZONE)){
177 rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
180 * Pad out max_rules so that each depth is given the same number of
183 if (max_rules % RTE_LPM_MAX_DEPTH) {
184 max_rules += RTE_LPM_MAX_DEPTH -
185 (max_rules % RTE_LPM_MAX_DEPTH);
188 /* Determine the amount of memory to allocate. */
189 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
191 /* guarantee there's no existing */
192 TAILQ_FOREACH(lpm, lpm_list, next) {
193 if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
199 /* Allocate memory to store the LPM data structures. */
200 if (mem_location == RTE_LPM_MEMZONE) {
201 const struct rte_memzone *mz;
202 uint32_t mz_flags = 0;
204 mz = rte_memzone_reserve(mem_name, mem_size, socket_id,
207 RTE_LOG(ERR, LPM, "LPM memzone creation failed\n");
211 memset(mz->addr, 0, mem_size);
212 lpm = (struct rte_lpm *) mz->addr;
216 lpm = (struct rte_lpm *)rte_zmalloc(mem_name, mem_size,
219 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
224 /* Save user arguments. */
225 lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH;
226 rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);
227 lpm->mem_location = mem_location;
229 TAILQ_INSERT_TAIL(lpm_list, lpm, next);
235 * Function Name : free
236 * Usage: Deallocates memory for given LPM table.
239 rte_lpm_free(struct rte_lpm *lpm)
241 /* Check user arguments. */
245 /* Note: Its is currently not possible to free a memzone. */
246 if (lpm->mem_location == RTE_LPM_HEAP){
247 RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM, rte_lpm_list, lpm);
253 * Function Name: rule_add
254 * Usage : Adds a rule to the rule table.
256 * NOTE: The rule table is split into 32 groups. Each group contains rules that
257 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
258 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
259 * to refer to depth 1 because even though the depth range is 1 - 32, depths
260 * are stored in the rule table from 0 - 31.
261 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
263 static inline int32_t
264 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
267 uint32_t rule_gindex, rule_index, last_rule;
271 /* rule_gindex stands for rule group index. */
272 rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
273 /* Initialise rule_index to point to start of rule group. */
274 rule_index = rule_gindex;
275 /* Last rule = Last used rule in this rule group. */
276 last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
278 /* Scan through rule group to see if rule already exists. */
279 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
281 /* If rule already exists update its next_hop and return. */
282 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
283 lpm->rules_tbl[rule_index].next_hop = next_hop;
290 * If rule does not exist check if there is space to add a new rule to
291 * this rule group. If there is no space return error. */
292 if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) {
296 /* If there is space for the new rule add it. */
297 lpm->rules_tbl[rule_index].ip = ip_masked;
298 lpm->rules_tbl[rule_index].next_hop = next_hop;
300 /* Increment the used rules counter for this rule group. */
301 lpm->used_rules_at_depth[depth - 1]++;
307 * Function Name: rule_delete
308 * Usage : Delete a rule from the rule table.
309 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
312 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
314 uint32_t rule_gindex, last_rule_index;
318 rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
319 last_rule_index = rule_gindex +
320 (lpm->used_rules_at_depth[depth - 1]) - 1;
322 * Overwrite redundant rule with last rule in group and decrement rule
325 lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index];
326 lpm->used_rules_at_depth[depth - 1]--;
331 * Function Name: rule_find
332 * Usage : Finds a rule in rule table.
333 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
335 static inline int32_t
336 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
338 uint32_t rule_gindex, last_rule, rule_index;
342 rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
343 last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
345 /* Scan used rules at given depth to find rule. */
346 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
347 /* If rule is found return the rule index. */
348 if (lpm->rules_tbl[rule_index].ip == ip_masked)
352 /* If rule is not found return -E_RTE_NO_TAILQ. */
353 return -E_RTE_NO_TAILQ;
357 * Function Name: tbl8_alloc
358 * Usage : Find, clean and allocate a tbl8.
360 static inline int32_t
361 tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
363 uint32_t tbl8_gindex; /* tbl8 group index. */
364 struct rte_lpm_tbl8_entry *tbl8_entry;
366 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
367 for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
369 tbl8_entry = &tbl8[tbl8_gindex *
370 RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
371 /* If a free tbl8 group is found clean it and set as VALID. */
372 if (!tbl8_entry->valid_group) {
373 memset(&tbl8_entry[0], 0,
374 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
375 sizeof(tbl8_entry[0]));
377 tbl8_entry->valid_group = VALID;
379 /* Return group index for allocated tbl8 group. */
384 /* If there are no tbl8 groups free then return error. */
389 tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
391 /* Set tbl8 group invalid*/
392 tbl8[tbl8_group_start].valid_group = INVALID;
395 static inline int32_t
396 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
399 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
401 /* Calculate the index into Table24. */
402 tbl24_index = ip >> 8;
403 tbl24_range = depth_to_range(depth);
405 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
407 * For invalid OR valid and non-extended tbl 24 entries set
410 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
411 lpm->tbl24[i].depth <= depth)) {
413 struct rte_lpm_tbl24_entry new_tbl24_entry = {
417 { .next_hop = next_hop, }
420 /* Setting tbl24 entry in one go to avoid race
422 lpm->tbl24[i] = new_tbl24_entry;
427 /* If tbl24 entry is valid and extended calculate the index
429 tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex *
430 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
431 tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
433 for (j = tbl8_index; j < tbl8_group_end; j++) {
434 if (!lpm->tbl8[j].valid ||
435 lpm->tbl8[j].depth <= depth) {
436 struct rte_lpm_tbl8_entry new_tbl8_entry = {
438 .valid_group = VALID,
440 .next_hop = next_hop,
444 * Setting tbl8 entry in one go to avoid race
447 lpm->tbl8[j] = new_tbl8_entry;
457 static inline int32_t
458 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
461 uint32_t tbl24_index;
462 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
465 tbl24_index = (ip_masked >> 8);
466 tbl8_range = depth_to_range(depth);
468 if (!lpm->tbl24[tbl24_index].valid) {
469 /* Search for a free tbl8 group. */
470 tbl8_group_index = tbl8_alloc(lpm->tbl8);
472 /* Check tbl8 allocation was successful. */
473 if (tbl8_group_index < 0) {
474 return tbl8_group_index;
477 /* Find index into tbl8 and range. */
478 tbl8_index = (tbl8_group_index *
479 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
482 /* Set tbl8 entry. */
483 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
484 lpm->tbl8[i].depth = depth;
485 lpm->tbl8[i].next_hop = next_hop;
486 lpm->tbl8[i].valid = VALID;
490 * Update tbl24 entry to point to new tbl8 entry. Note: The
491 * ext_flag and tbl8_index need to be updated simultaneously,
492 * so assign whole structure in one go
495 struct rte_lpm_tbl24_entry new_tbl24_entry = {
499 { .tbl8_gindex = (uint8_t)tbl8_group_index, }
502 lpm->tbl24[tbl24_index] = new_tbl24_entry;
504 }/* If valid entry but not extended calculate the index into Table8. */
505 else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
506 /* Search for free tbl8 group. */
507 tbl8_group_index = tbl8_alloc(lpm->tbl8);
509 if (tbl8_group_index < 0) {
510 return tbl8_group_index;
513 tbl8_group_start = tbl8_group_index *
514 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
515 tbl8_group_end = tbl8_group_start +
516 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
518 /* Populate new tbl8 with tbl24 value. */
519 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
520 lpm->tbl8[i].valid = VALID;
521 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
522 lpm->tbl8[i].next_hop =
523 lpm->tbl24[tbl24_index].next_hop;
526 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
528 /* Insert new rule into the tbl8 entry. */
529 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
530 if (!lpm->tbl8[i].valid ||
531 lpm->tbl8[i].depth <= depth) {
532 lpm->tbl8[i].valid = VALID;
533 lpm->tbl8[i].depth = depth;
534 lpm->tbl8[i].next_hop = next_hop;
541 * Update tbl24 entry to point to new tbl8 entry. Note: The
542 * ext_flag and tbl8_index need to be updated simultaneously,
543 * so assign whole structure in one go.
546 struct rte_lpm_tbl24_entry new_tbl24_entry = {
550 { .tbl8_gindex = (uint8_t)tbl8_group_index, }
553 lpm->tbl24[tbl24_index] = new_tbl24_entry;
557 * If it is valid, extended entry calculate the index into tbl8.
559 tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
560 tbl8_group_start = tbl8_group_index *
561 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
562 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
564 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
566 if (!lpm->tbl8[i].valid ||
567 lpm->tbl8[i].depth <= depth) {
568 struct rte_lpm_tbl8_entry new_tbl8_entry = {
571 .next_hop = next_hop,
575 * Setting tbl8 entry in one go to avoid race
578 lpm->tbl8[i] = new_tbl8_entry;
589 * Function Name : rte_lpm_add
590 * Usage : Add a route
598 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
601 int32_t rule_index, status = 0;
602 uint32_t ip_masked = (ip & depth_to_mask(depth));
604 /* Check user arguments. */
605 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
608 /* Add the rule to the rule table. */
609 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
611 /* If the is no space available for new rule return error. */
612 if (rule_index < 0) {
616 if (depth <= MAX_DEPTH_TBL24) {
617 status = add_depth_small(lpm, ip_masked, depth, next_hop);
619 else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
620 status = add_depth_big(lpm, ip_masked, depth, next_hop);
623 * If add fails due to exhaustion of tbl8 extensions delete
624 * rule that was added to rule table.
627 rule_delete(lpm, rule_index, depth);
636 static inline int32_t
637 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
643 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
644 ip_masked = ip & depth_to_mask(prev_depth);
646 rule_index = rule_find(lpm, ip_masked, prev_depth);
655 static inline int32_t
656 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
657 uint8_t depth, int32_t sub_rule_index)
659 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
662 /* Calculate the range and index into Table24. */
663 tbl24_range = depth_to_range(depth);
664 tbl24_index = (ip_masked >> 8);
667 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
668 * and a positive number indicates a sub_rule_index.
670 if (sub_rule_index < 0) {
672 * If no replacement rule exists then invalidate entries
673 * associated with this rule.
675 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
676 if (lpm->tbl24[i].ext_entry == 0 &&
677 lpm->tbl24[i].depth <= depth ) {
678 lpm->tbl24[i].valid = INVALID;
682 * If TBL24 entry is extended, then there has
683 * to be a rule with depth >= 25 in the
684 * associated TBL8 group.
686 tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
687 tbl8_index = tbl8_group_index *
688 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
690 for (j = tbl8_index; j < (tbl8_index +
691 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
693 if (lpm->tbl8[j].depth <= depth)
694 lpm->tbl8[j].valid = INVALID;
701 * If a replacement rule exists then modify entries
702 * associated with this rule.
705 /* Calculate depth of sub_rule. */
706 new_depth = (uint8_t) (sub_rule_index /
707 lpm->max_rules_per_depth);
709 struct rte_lpm_tbl24_entry new_tbl24_entry = {
713 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}
716 struct rte_lpm_tbl8_entry new_tbl8_entry = {
719 .next_hop = lpm->rules_tbl
720 [sub_rule_index].next_hop,
723 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
725 if (lpm->tbl24[i].ext_entry == 0 &&
726 lpm->tbl24[i].depth <= depth ) {
727 lpm->tbl24[i] = new_tbl24_entry;
731 * If TBL24 entry is extended, then there has
732 * to be a rule with depth >= 25 in the
733 * associated TBL8 group.
736 tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
737 tbl8_index = tbl8_group_index *
738 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
740 for (j = tbl8_index; j < (tbl8_index +
741 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
743 if (lpm->tbl8[j].depth <= depth)
744 lpm->tbl8[j] = new_tbl8_entry;
754 * Function Name: tbl8_recycle_check
755 * Usage : Checks if table 8 group can be recycled.
757 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
758 * Return of -EINVAL means tbl8 is empty and thus can be recycled
759 * Return of value > -1 means tbl8 is in use but has all the same values and
760 * thus can be recycled
762 static inline int32_t
763 tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
765 uint32_t tbl8_group_end, i;
766 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
769 * Check the first entry of the given tbl8. If it is invalid we know
770 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
771 * (As they would affect all entries in a tbl8) and thus this table
772 * can not be recycled.
774 if (tbl8[tbl8_group_start].valid) {
776 * If first entry is valid check if the depth is less than 24
777 * and if so check the rest of the entries to verify that they
778 * are all of this depth.
780 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
781 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
785 tbl8[tbl8_group_start].depth) {
790 /* If all entries are the same return the tb8 index */
791 return tbl8_group_start;
797 * If the first entry is invalid check if the rest of the entries in
798 * the tbl8 are invalid.
800 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
804 /* If no valid entries are found then return -EINVAL. */
808 static inline int32_t
809 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
810 uint8_t depth, int32_t sub_rule_index)
812 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
815 int32_t tbl8_recycle_index;
818 * Calculate the index into tbl24 and range. Note: All depths larger
819 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
821 tbl24_index = ip_masked >> 8;
823 /* Calculate the index into tbl8 and range. */
824 tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
825 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
826 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
827 tbl8_range = depth_to_range(depth);
829 if (sub_rule_index < 0) {
831 * Loop through the range of entries on tbl8 for which the
832 * rule_to_delete must be removed or modified.
834 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
835 if (lpm->tbl8[i].depth <= depth)
836 lpm->tbl8[i].valid = INVALID;
840 new_depth = (uint8_t)(sub_rule_index /
841 lpm->max_rules_per_depth);
843 /* Set new tbl8 entry. */
844 struct rte_lpm_tbl8_entry new_tbl8_entry = {
847 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
851 * Loop through the range of entries on tbl8 for which the
852 * rule_to_delete must be modified.
854 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
855 if (lpm->tbl8[i].depth <= depth)
856 lpm->tbl8[i] = new_tbl8_entry;
861 * Check if there are any valid entries in this tbl8 group. If all
862 * tbl8 entries are invalid we can free the tbl8 and invalidate the
863 * associated tbl24 entry.
866 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
868 if (tbl8_recycle_index == -EINVAL){
869 /* Set tbl24 before freeing tbl8 to avoid race condition. */
870 lpm->tbl24[tbl24_index].valid = 0;
871 tbl8_free(lpm->tbl8, tbl8_group_start);
873 else if (tbl8_recycle_index > -1) {
874 /* Update tbl24 entry. */
875 struct rte_lpm_tbl24_entry new_tbl24_entry = {
878 .depth = lpm->tbl8[tbl8_recycle_index].depth,
879 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }
882 /* Set tbl24 before freeing tbl8 to avoid race condition. */
883 lpm->tbl24[tbl24_index] = new_tbl24_entry;
884 tbl8_free(lpm->tbl8, tbl8_group_start);
891 * Function Name: rte_lpm_delete
892 * Usage : Deletes a rule
899 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
901 int32_t rule_to_delete_index, sub_rule_index;
904 * Check input arguments. Note: IP must be a positive integer of 32
905 * bits in length therefore it need not be checked.
907 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
911 ip_masked = ip & depth_to_mask(depth);
914 * Find the index of the input rule, that needs to be deleted, in the
917 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
920 * Check if rule_to_delete_index was found. If no rule was found the
921 * function rule_find returns -E_RTE_NO_TAILQ.
923 if (rule_to_delete_index < 0)
924 return -E_RTE_NO_TAILQ;
926 /* Delete the rule from the rule table. */
927 rule_delete(lpm, rule_to_delete_index, depth);
930 * Find rule to replace the rule_to_delete. If there is no rule to
931 * replace the rule_to_delete we return -1 and invalidate the table
932 * entries associated with this rule.
934 sub_rule_index = find_previous_rule(lpm, ip, depth);
937 * If the input depth value is less than 25 use function
938 * delete_depth_small otherwise use delete_depth_big.
940 if (depth <= MAX_DEPTH_TBL24) {
941 return delete_depth_small(lpm, ip_masked, depth,
944 else { /* If depth > MAX_DEPTH_TBL24 */
945 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index);
950 * Function Name: rte_lpm_delete_all
951 * Usage : Delete all rules from the LPM table.
956 rte_lpm_delete_all(struct rte_lpm *lpm)
958 /* Zero used rules counter. */
959 memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth));
962 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
965 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
967 /* Delete all rules form the rules table. */
968 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) *
969 (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH));