4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
44 #include <rte_branch_prediction.h>
45 #include <rte_common.h>
46 #include <rte_memory.h> /* for definition of CACHE_LINE_SIZE */
47 #include <rte_malloc.h>
48 #include <rte_memzone.h>
49 #include <rte_tailq.h>
51 #include <rte_per_lcore.h>
52 #include <rte_string_fns.h>
53 #include <rte_errno.h>
57 TAILQ_HEAD(rte_lpm_list, rte_lpm);
59 /* global list of ring (used for debug/dump) */
60 static struct rte_lpm_list *lpm_list;
62 #define CHECK_LPM_LIST_CREATED() do { \
63 if (lpm_list == NULL) \
64 if ((lpm_list = RTE_TAILQ_RESERVE("RTE_LPM", rte_lpm_list)) == NULL){ \
65 rte_errno = E_RTE_NO_TAILQ; \
70 #define MAX_DEPTH_TBL24 24
77 /* Macro to enable/disable run-time checks. */
78 #if defined(RTE_LIBRTE_LPM_DEBUG)
79 #include <rte_debug.h>
80 #define VERIFY_DEPTH(depth) do { \
81 if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
82 rte_panic("LPM: Invalid depth (%u) at line %d", depth, __LINE__); \
85 #define VERIFY_DEPTH(depth)
89 * Function Name: depth_to_mask
90 * Usage : Converts a given depth value to its corresponding mask value.
92 * depth (IN) : range = 1 - 32
93 * mask (OUT) : 32bit mask
95 static uint32_t __attribute__((pure))
96 depth_to_mask(uint8_t depth)
100 /* To calculate a mask start with a 1 on the left hand side and right
101 * shift while populating the left hand side with 1's
103 return (int)0x80000000 >> (depth - 1);
107 * Function Name: depth_to_range
108 * Usage : Converts given depth value to its corresponding range value.
113 static inline uint32_t __attribute__((pure))
114 depth_to_range(uint8_t depth)
119 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
121 if (depth <= MAX_DEPTH_TBL24)
122 return 1 << (MAX_DEPTH_TBL24 - depth);
124 /* Else if depth is greater than 24 */
125 return (1 << (RTE_LPM_MAX_DEPTH - depth));
129 * Find an existing lpm table and return a pointer to it.
132 rte_lpm_find_existing(const char *name)
136 /* check that we have an initialised tail queue */
137 CHECK_LPM_LIST_CREATED();
139 TAILQ_FOREACH(l, lpm_list, next) {
140 if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
151 * Function Name : rte_lpm_create
152 * Usage : Allocates memory for LPM object
157 rte_lpm_create(const char *name, int socket_id, int max_rules,
160 char mem_name[RTE_LPM_NAMESIZE];
161 struct rte_lpm *lpm = NULL;
164 /* check that we have access to create things in shared memory. */
165 if (rte_eal_process_type() == RTE_PROC_SECONDARY){
166 rte_errno = E_RTE_SECONDARY;
170 /* check that we have an initialised tail queue */
171 CHECK_LPM_LIST_CREATED();
173 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
174 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
176 /* Check user arguments. */
177 if ((name == NULL) || (socket_id < -1) || (max_rules == 0) ||
178 (mem_location != RTE_LPM_HEAP &&
179 mem_location != RTE_LPM_MEMZONE)){
184 rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
187 * Pad out max_rules so that each depth is given the same number of
190 if (max_rules % RTE_LPM_MAX_DEPTH) {
191 max_rules += RTE_LPM_MAX_DEPTH -
192 (max_rules % RTE_LPM_MAX_DEPTH);
195 /* Determine the amount of memory to allocate. */
196 mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
198 /* Allocate memory to store the LPM data structures. */
199 if (mem_location == RTE_LPM_MEMZONE) {
200 const struct rte_memzone *mz;
201 uint32_t mz_flags = 0;
203 mz = rte_memzone_reserve(mem_name, mem_size, socket_id,
206 RTE_LOG(ERR, LPM, "LPM memzone creation failed\n");
210 memset(mz->addr, 0, mem_size);
211 lpm = (struct rte_lpm *) mz->addr;
215 lpm = (struct rte_lpm *)rte_zmalloc(mem_name, mem_size,
218 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
223 /* Save user arguments. */
224 lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH;
225 rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);
226 lpm->mem_location = mem_location;
228 TAILQ_INSERT_TAIL(lpm_list, lpm, next);
234 * Function Name : free
235 * Usage: Deallocates memory for given LPM table.
238 rte_lpm_free(struct rte_lpm *lpm)
240 /* Check user arguments. */
244 /* Note: Its is currently not possible to free a memzone. */
245 if (lpm->mem_location == RTE_LPM_HEAP){
246 TAILQ_REMOVE(lpm_list, lpm, next);
252 * Function Name: rule_add
253 * Usage : Adds a rule to the rule table.
255 * NOTE: The rule table is split into 32 groups. Each group contains rules that
256 * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
257 * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
258 * to refer to depth 1 because even though the depth range is 1 - 32, depths
259 * are stored in the rule table from 0 - 31.
260 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
262 static inline int32_t
263 rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
266 uint32_t rule_gindex, rule_index, last_rule;
270 /* rule_gindex stands for rule group index. */
271 rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
272 /* Initialise rule_index to point to start of rule group. */
273 rule_index = rule_gindex;
274 /* Last rule = Last used rule in this rule group. */
275 last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
277 /* Scan through rule group to see if rule already exists. */
278 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
280 /* If rule already exists update its next_hop and return. */
281 if (lpm->rules_tbl[rule_index].ip == ip_masked) {
282 lpm->rules_tbl[rule_index].next_hop = next_hop;
289 * If rule does not exist check if there is space to add a new rule to
290 * this rule group. If there is no space return error. */
291 if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) {
295 /* If there is space for the new rule add it. */
296 lpm->rules_tbl[rule_index].ip = ip_masked;
297 lpm->rules_tbl[rule_index].next_hop = next_hop;
299 /* Increment the used rules counter for this rule group. */
300 lpm->used_rules_at_depth[depth - 1]++;
306 * Function Name: rule_delete
307 * Usage : Delete a rule from the rule table.
308 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
311 rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
313 uint32_t rule_gindex, last_rule_index;
317 rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
318 last_rule_index = rule_gindex +
319 (lpm->used_rules_at_depth[depth - 1]) - 1;
321 * Overwrite redundant rule with last rule in group and decrement rule
324 lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index];
325 lpm->used_rules_at_depth[depth - 1]--;
330 * Function Name: rule_find
331 * Usage : Finds a rule in rule table.
332 * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
334 static inline int32_t
335 rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
337 uint32_t rule_gindex, last_rule, rule_index;
341 rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
342 last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
344 /* Scan used rules at given depth to find rule. */
345 for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
346 /* If rule is found return the rule index. */
347 if (lpm->rules_tbl[rule_index].ip == ip_masked)
351 /* If rule is not found return -E_RTE_NO_TAILQ. */
352 return -E_RTE_NO_TAILQ;
356 * Function Name: tbl8_alloc
357 * Usage : Find, clean and allocate a tbl8.
359 static inline int32_t
360 tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
362 uint32_t tbl8_gindex; /* tbl8 group index. */
363 struct rte_lpm_tbl8_entry *tbl8_entry;
365 /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
366 for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
368 tbl8_entry = &tbl8[tbl8_gindex *
369 RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
370 /* If a free tbl8 group is found clean it and set as VALID. */
371 if (!tbl8_entry->valid_group) {
372 memset(&tbl8_entry[0], 0,
373 RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
374 sizeof(tbl8_entry[0]));
376 tbl8_entry->valid_group = VALID;
378 /* Return group index for allocated tbl8 group. */
383 /* If there are no tbl8 groups free then return error. */
388 tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
390 /* Set tbl8 group invalid*/
391 tbl8[tbl8_group_start].valid_group = INVALID;
394 static inline int32_t
395 add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
398 uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
400 /* Calculate the index into Table24. */
401 tbl24_index = ip >> 8;
402 tbl24_range = depth_to_range(depth);
404 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
406 * For invalid OR valid and non-extended tbl 24 entries set
409 if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
410 lpm->tbl24[i].depth <= depth)) {
412 struct rte_lpm_tbl24_entry new_tbl24_entry = {
416 { .next_hop = next_hop, }
419 /* Setting tbl24 entry in one go to avoid race
421 lpm->tbl24[i] = new_tbl24_entry;
426 /* If tbl24 entry is valid and extended calculate the index
428 tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex *
429 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
430 tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
432 for (j = tbl8_index; j < tbl8_group_end; j++) {
433 if (!lpm->tbl8[j].valid ||
434 lpm->tbl8[j].depth <= depth) {
435 struct rte_lpm_tbl8_entry new_tbl8_entry = {
437 .valid_group = VALID,
439 .next_hop = next_hop,
443 * Setting tbl8 entry in one go to avoid race
446 lpm->tbl8[j] = new_tbl8_entry;
456 static inline int32_t
457 add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
460 uint32_t tbl24_index;
461 int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
464 tbl24_index = (ip_masked >> 8);
465 tbl8_range = depth_to_range(depth);
467 if (!lpm->tbl24[tbl24_index].valid) {
468 /* Search for a free tbl8 group. */
469 tbl8_group_index = tbl8_alloc(lpm->tbl8);
471 /* Check tbl8 allocation was successful. */
472 if (tbl8_group_index < 0) {
473 return tbl8_group_index;
476 /* Find index into tbl8 and range. */
477 tbl8_index = (tbl8_group_index *
478 RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
481 /* Set tbl8 entry. */
482 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
483 lpm->tbl8[i].depth = depth;
484 lpm->tbl8[i].next_hop = next_hop;
485 lpm->tbl8[i].valid = VALID;
489 * Update tbl24 entry to point to new tbl8 entry. Note: The
490 * ext_flag and tbl8_index need to be updated simultaneously,
491 * so assign whole structure in one go
494 struct rte_lpm_tbl24_entry new_tbl24_entry = {
498 { .tbl8_gindex = (uint8_t)tbl8_group_index, }
501 lpm->tbl24[tbl24_index] = new_tbl24_entry;
503 }/* If valid entry but not extended calculate the index into Table8. */
504 else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
505 /* Search for free tbl8 group. */
506 tbl8_group_index = tbl8_alloc(lpm->tbl8);
508 if (tbl8_group_index < 0) {
509 return tbl8_group_index;
512 tbl8_group_start = tbl8_group_index *
513 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
514 tbl8_group_end = tbl8_group_start +
515 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
517 /* Populate new tbl8 with tbl24 value. */
518 for (i = tbl8_group_start; i < tbl8_group_end; i++) {
519 lpm->tbl8[i].valid = VALID;
520 lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
521 lpm->tbl8[i].next_hop =
522 lpm->tbl24[tbl24_index].next_hop;
525 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
527 /* Insert new rule into the tbl8 entry. */
528 for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
529 if (!lpm->tbl8[i].valid ||
530 lpm->tbl8[i].depth <= depth) {
531 lpm->tbl8[i].valid = VALID;
532 lpm->tbl8[i].depth = depth;
533 lpm->tbl8[i].next_hop = next_hop;
540 * Update tbl24 entry to point to new tbl8 entry. Note: The
541 * ext_flag and tbl8_index need to be updated simultaneously,
542 * so assign whole structure in one go.
545 struct rte_lpm_tbl24_entry new_tbl24_entry = {
549 { .tbl8_gindex = (uint8_t)tbl8_group_index, }
552 lpm->tbl24[tbl24_index] = new_tbl24_entry;
556 * If it is valid, extended entry calculate the index into tbl8.
558 tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
559 tbl8_group_start = tbl8_group_index *
560 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
561 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
563 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
565 if (!lpm->tbl8[i].valid ||
566 lpm->tbl8[i].depth <= depth) {
567 struct rte_lpm_tbl8_entry new_tbl8_entry = {
570 .next_hop = next_hop,
574 * Setting tbl8 entry in one go to avoid race
577 lpm->tbl8[i] = new_tbl8_entry;
588 * Function Name : rte_lpm_add
589 * Usage : Add a route
597 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
600 int32_t rule_index, status = 0;
601 uint32_t ip_masked = (ip & depth_to_mask(depth));
603 /* Check user arguments. */
604 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
607 /* Add the rule to the rule table. */
608 rule_index = rule_add(lpm, ip_masked, depth, next_hop);
610 /* If the is no space available for new rule return error. */
611 if (rule_index < 0) {
615 if (depth <= MAX_DEPTH_TBL24) {
616 status = add_depth_small(lpm, ip_masked, depth, next_hop);
618 else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
619 status = add_depth_big(lpm, ip_masked, depth, next_hop);
622 * If add fails due to exhaustion of tbl8 extensions delete
623 * rule that was added to rule table.
626 rule_delete(lpm, rule_index, depth);
635 static inline int32_t
636 find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
642 for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
643 ip_masked = ip & depth_to_mask(prev_depth);
645 rule_index = rule_find(lpm, ip_masked, prev_depth);
654 static inline int32_t
655 delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
656 uint8_t depth, int32_t sub_rule_index)
658 uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
661 /* Calculate the range and index into Table24. */
662 tbl24_range = depth_to_range(depth);
663 tbl24_index = (ip_masked >> 8);
666 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
667 * and a positive number indicates a sub_rule_index.
669 if (sub_rule_index < 0) {
671 * If no replacement rule exists then invalidate entries
672 * associated with this rule.
674 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
675 if (lpm->tbl24[i].ext_entry == 0 &&
676 lpm->tbl24[i].depth <= depth ) {
677 lpm->tbl24[i].valid = INVALID;
681 * If TBL24 entry is extended, then there has
682 * to be a rule with depth >= 25 in the
683 * associated TBL8 group.
685 tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
686 tbl8_index = tbl8_group_index *
687 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
689 for (j = tbl8_index; j < (tbl8_index +
690 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
692 if (lpm->tbl8[j].depth <= depth)
693 lpm->tbl8[j].valid = INVALID;
700 * If a replacement rule exists then modify entries
701 * associated with this rule.
704 /* Calculate depth of sub_rule. */
705 new_depth = (uint8_t) (sub_rule_index /
706 lpm->max_rules_per_depth);
708 struct rte_lpm_tbl24_entry new_tbl24_entry = {
712 {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}
715 struct rte_lpm_tbl8_entry new_tbl8_entry = {
718 .next_hop = lpm->rules_tbl
719 [sub_rule_index].next_hop,
722 for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
724 if (lpm->tbl24[i].ext_entry == 0 &&
725 lpm->tbl24[i].depth <= depth ) {
726 lpm->tbl24[i] = new_tbl24_entry;
730 * If TBL24 entry is extended, then there has
731 * to be a rule with depth >= 25 in the
732 * associated TBL8 group.
735 tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
736 tbl8_index = tbl8_group_index *
737 RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
739 for (j = tbl8_index; j < (tbl8_index +
740 RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
742 if (lpm->tbl8[j].depth <= depth)
743 lpm->tbl8[j] = new_tbl8_entry;
753 * Function Name: tbl8_recycle_check
754 * Usage : Checks if table 8 group can be recycled.
756 * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
757 * Return of -EINVAL means tbl8 is empty and thus can be recycled
758 * Return of value > -1 means tbl8 is in use but has all the same values and
759 * thus can be recycled
761 static inline int32_t
762 tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
764 uint32_t tbl8_group_end, i;
765 tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
768 * Check the first entry of the given tbl8. If it is invalid we know
769 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
770 * (As they would affect all entries in a tbl8) and thus this table
771 * can not be recycled.
773 if (tbl8[tbl8_group_start].valid) {
775 * If first entry is valid check if the depth is less than 24
776 * and if so check the rest of the entries to verify that they
777 * are all of this depth.
779 if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
780 for (i = (tbl8_group_start + 1); i < tbl8_group_end;
784 tbl8[tbl8_group_start].depth) {
789 /* If all entries are the same return the tb8 index */
790 return tbl8_group_start;
796 * If the first entry is invalid check if the rest of the entries in
797 * the tbl8 are invalid.
799 for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
803 /* If no valid entries are found then return -EINVAL. */
807 static inline int32_t
808 delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
809 uint8_t depth, int32_t sub_rule_index)
811 uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
814 int32_t tbl8_recycle_index;
817 * Calculate the index into tbl24 and range. Note: All depths larger
818 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
820 tbl24_index = ip_masked >> 8;
822 /* Calculate the index into tbl8 and range. */
823 tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
824 tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
825 tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
826 tbl8_range = depth_to_range(depth);
828 if (sub_rule_index < 0) {
830 * Loop through the range of entries on tbl8 for which the
831 * rule_to_delete must be removed or modified.
833 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
834 if (lpm->tbl8[i].depth <= depth)
835 lpm->tbl8[i].valid = INVALID;
839 new_depth = (uint8_t)(sub_rule_index /
840 lpm->max_rules_per_depth);
842 /* Set new tbl8 entry. */
843 struct rte_lpm_tbl8_entry new_tbl8_entry = {
846 .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
850 * Loop through the range of entries on tbl8 for which the
851 * rule_to_delete must be modified.
853 for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
854 if (lpm->tbl8[i].depth <= depth)
855 lpm->tbl8[i] = new_tbl8_entry;
860 * Check if there are any valid entries in this tbl8 group. If all
861 * tbl8 entries are invalid we can free the tbl8 and invalidate the
862 * associated tbl24 entry.
865 tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
867 if (tbl8_recycle_index == -EINVAL){
868 /* Set tbl24 before freeing tbl8 to avoid race condition. */
869 lpm->tbl24[tbl24_index].valid = 0;
870 tbl8_free(lpm->tbl8, tbl8_group_start);
872 else if (tbl8_recycle_index > -1) {
873 /* Update tbl24 entry. */
874 struct rte_lpm_tbl24_entry new_tbl24_entry = {
877 .depth = lpm->tbl8[tbl8_recycle_index].depth,
878 { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }
881 /* Set tbl24 before freeing tbl8 to avoid race condition. */
882 lpm->tbl24[tbl24_index] = new_tbl24_entry;
883 tbl8_free(lpm->tbl8, tbl8_group_start);
890 * Function Name: rte_lpm_delete
891 * Usage : Deletes a rule
898 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
900 int32_t rule_to_delete_index, sub_rule_index;
903 * Check input arguments. Note: IP must be a positive integer of 32
904 * bits in length therefore it need not be checked.
906 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
910 ip_masked = ip & depth_to_mask(depth);
913 * Find the index of the input rule, that needs to be deleted, in the
916 rule_to_delete_index = rule_find(lpm, ip_masked, depth);
919 * Check if rule_to_delete_index was found. If no rule was found the
920 * function rule_find returns -E_RTE_NO_TAILQ.
922 if (rule_to_delete_index < 0)
923 return -E_RTE_NO_TAILQ;
925 /* Delete the rule from the rule table. */
926 rule_delete(lpm, rule_to_delete_index, depth);
929 * Find rule to replace the rule_to_delete. If there is no rule to
930 * replace the rule_to_delete we return -1 and invalidate the table
931 * entries associated with this rule.
933 sub_rule_index = find_previous_rule(lpm, ip, depth);
936 * If the input depth value is less than 25 use function
937 * delete_depth_small otherwise use delete_depth_big.
939 if (depth <= MAX_DEPTH_TBL24) {
940 return delete_depth_small(lpm, ip_masked, depth,
943 else { /* If depth > MAX_DEPTH_TBL24 */
944 return delete_depth_big(lpm, ip_masked, depth, sub_rule_index);
949 * Function Name: rte_lpm_delete_all
950 * Usage : Delete all rules from the LPM table.
955 rte_lpm_delete_all(struct rte_lpm *lpm)
957 /* Zero used rules counter. */
958 memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth));
961 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
964 memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
966 /* Delete all rules form the rules table. */
967 memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) *
968 (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH));