X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=e915c24eedc886eef70117a8f6b0c123a53ac8bd;hb=e9d48c0072d36eb6423b45fba4ec49d0def6c36f;hp=4269b3c8d8a81eb060b46f28c59d0ba6e6233eda;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 4269b3c8d8..e915c24eed 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,36 +1,34 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: * - * * Redistributions of source code must retain the above copyright + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 */ #include @@ -49,25 +47,17 @@ #include #include #include +#include #include #include #include +#include +#include #include "rte_lpm.h" TAILQ_HEAD(rte_lpm_list, rte_lpm); - -/* global list of ring (used for debug/dump) */ -static struct rte_lpm_list *lpm_list; - -#define CHECK_LPM_LIST_CREATED() do { \ - if (lpm_list == NULL) \ - if ((lpm_list = RTE_TAILQ_RESERVE("RTE_LPM", rte_lpm_list)) == NULL){ \ - rte_errno = E_RTE_NO_TAILQ; \ - return NULL; \ - } \ -} while (0) - + #define MAX_DEPTH_TBL24 24 enum valid_flag { @@ -78,17 +68,17 @@ enum valid_flag { /* Macro to enable/disable run-time checks. */ #if defined(RTE_LIBRTE_LPM_DEBUG) #include -#define VERIFY_DEPTH(depth) do { \ - if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \ - rte_panic("LPM: Invalid depth (%u) at line %d", depth, __LINE__); \ +#define VERIFY_DEPTH(depth) do { \ + if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \ + rte_panic("LPM: Invalid depth (%u) at line %d", \ + (unsigned)(depth), __LINE__); \ } while (0) #else #define VERIFY_DEPTH(depth) #endif /* - * Function Name: depth_to_mask - * Usage : Converts a given depth value to its corresponding mask value. + * Converts a given depth value to its corresponding mask value. * * depth (IN) : range = 1 - 32 * mask (OUT) : 32bit mask @@ -105,11 +95,7 @@ depth_to_mask(uint8_t depth) } /* - * Function Name: depth_to_range - * Usage : Converts given depth value to its corresponding range value. - * - * (IN) depth - * (OUT) mask + * Converts given depth value to its corresponding range value. */ static inline uint32_t __attribute__((pure)) depth_to_range(uint8_t depth) @@ -133,14 +119,20 @@ struct rte_lpm * rte_lpm_find_existing(const char *name) { struct rte_lpm *l; + struct rte_lpm_list *lpm_list; /* check that we have an initialised tail queue */ - CHECK_LPM_LIST_CREATED(); + if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { + rte_errno = E_RTE_NO_TAILQ; + return NULL; + } + rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); TAILQ_FOREACH(l, lpm_list, next) { if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } + rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); if (l == NULL) rte_errno = ENOENT; @@ -149,91 +141,70 @@ rte_lpm_find_existing(const char *name) } /* - * Function Name : rte_lpm_create - * Usage : Allocates memory for LPM object - * - * rte_lpm (RETURN) + * Allocates memory for LPM object */ struct rte_lpm * rte_lpm_create(const char *name, int socket_id, int max_rules, - int mem_location) + __rte_unused int flags) { char mem_name[RTE_LPM_NAMESIZE]; struct rte_lpm *lpm = NULL; uint32_t mem_size; - - /* check that we have access to create things in shared memory. */ - if (rte_eal_process_type() == RTE_PROC_SECONDARY){ - rte_errno = E_RTE_SECONDARY; - return NULL; - } + struct rte_lpm_list *lpm_list; /* check that we have an initialised tail queue */ - CHECK_LPM_LIST_CREATED(); + if ((lpm_list = + RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { + rte_errno = E_RTE_NO_TAILQ; + return NULL; + } RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2); RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2); /* Check user arguments. */ - if ((name == NULL) || (socket_id < -1) || (max_rules == 0) || - (mem_location != RTE_LPM_HEAP && - mem_location != RTE_LPM_MEMZONE)){ + if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){ rte_errno = EINVAL; return NULL; } rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); - /* - * Pad out max_rules so that each depth is given the same number of - * rules. - */ - if (max_rules % RTE_LPM_MAX_DEPTH) { - max_rules += RTE_LPM_MAX_DEPTH - - (max_rules % RTE_LPM_MAX_DEPTH); - } - /* Determine the amount of memory to allocate. */ mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); - /* Allocate memory to store the LPM data structures. */ - if (mem_location == RTE_LPM_MEMZONE) { - const struct rte_memzone *mz; - uint32_t mz_flags = 0; - - mz = rte_memzone_reserve(mem_name, mem_size, socket_id, - mz_flags); - if (mz == NULL) { - RTE_LOG(ERR, LPM, "LPM memzone creation failed\n"); - return NULL; - } - - memset(mz->addr, 0, mem_size); - lpm = (struct rte_lpm *) mz->addr; + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + /* guarantee there's no existing */ + TAILQ_FOREACH(lpm, lpm_list, next) { + if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) + break; } - else { - lpm = (struct rte_lpm *)rte_zmalloc(mem_name, mem_size, - CACHE_LINE_SIZE); - if (lpm == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); - return NULL; - } + if (lpm != NULL) + goto exit; + + /* Allocate memory to store the LPM data structures. */ + lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, + CACHE_LINE_SIZE, socket_id); + if (lpm == NULL) { + RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + goto exit; } /* Save user arguments. */ - lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH; + lpm->max_rules = max_rules; rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name); - lpm->mem_location = mem_location; TAILQ_INSERT_TAIL(lpm_list, lpm, next); +exit: + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + return lpm; } /* - * Function Name : free - * Usage: Deallocates memory for given LPM table. + * Deallocates memory for given LPM table. */ void rte_lpm_free(struct rte_lpm *lpm) @@ -242,16 +213,12 @@ rte_lpm_free(struct rte_lpm *lpm) if (lpm == NULL) return; - /* Note: Its is currently not possible to free a memzone. */ - if (lpm->mem_location == RTE_LPM_HEAP){ - TAILQ_REMOVE(lpm_list, lpm, next); - rte_free(lpm); - } + RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM, rte_lpm_list, lpm); + rte_free(lpm); } /* - * Function Name: rule_add - * Usage : Adds a rule to the rule table. + * Adds a rule to the rule table. * * NOTE: The rule table is split into 32 groups. Each group contains rules that * apply to a specific prefix depth (i.e. group 1 contains rules that apply to @@ -265,71 +232,94 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint8_t next_hop) { uint32_t rule_gindex, rule_index, last_rule; + int i; VERIFY_DEPTH(depth); - /* rule_gindex stands for rule group index. */ - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - /* Initialise rule_index to point to start of rule group. */ - rule_index = rule_gindex; - /* Last rule = Last used rule in this rule group. */ - last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; - /* Scan through rule group to see if rule already exists. */ - for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { - - /* If rule already exists update its next_hop and return. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) { - lpm->rules_tbl[rule_index].next_hop = next_hop; - - return rule_index; + if (lpm->rule_info[depth - 1].used_rules > 0) { + + /* rule_gindex stands for rule group index. */ + rule_gindex = lpm->rule_info[depth - 1].first_rule; + /* Initialise rule_index to point to start of rule group. */ + rule_index = rule_gindex; + /* Last rule = Last used rule in this rule group. */ + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; + + for (; rule_index < last_rule; rule_index++) { + + /* If rule already exists update its next_hop and return. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) { + lpm->rules_tbl[rule_index].next_hop = next_hop; + + return rule_index; + } } + } else { + /* Calculate the position in which the rule will be stored. */ + rule_index = 0; + + for (i = depth - 1; i > 0; i--) { + if (lpm->rule_info[i - 1].used_rules > 0) { + rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules; + break; + } + } + if (rule_index == lpm->max_rules) + return -ENOSPC; + + lpm->rule_info[depth - 1].first_rule = rule_index; } - /* - * If rule does not exist check if there is space to add a new rule to - * this rule group. If there is no space return error. */ - if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) { - return -ENOSPC; + /* Make room for the new rule in the array. */ + for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) { + if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules) + return -ENOSPC; + + if (lpm->rule_info[i - 1].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules] + = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule]; + lpm->rule_info[i - 1].first_rule++; + } } - /* If there is space for the new rule add it. */ + /* Add the new rule. */ lpm->rules_tbl[rule_index].ip = ip_masked; lpm->rules_tbl[rule_index].next_hop = next_hop; /* Increment the used rules counter for this rule group. */ - lpm->used_rules_at_depth[depth - 1]++; + lpm->rule_info[depth - 1].used_rules++; return rule_index; } /* - * Function Name: rule_delete - * Usage : Delete a rule from the rule table. + * Delete a rule from the rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static inline void rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) { - uint32_t rule_gindex, last_rule_index; + int i; VERIFY_DEPTH(depth); - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - last_rule_index = rule_gindex + - (lpm->used_rules_at_depth[depth - 1]) - 1; - /* - * Overwrite redundant rule with last rule in group and decrement rule - * counter. - */ - lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index]; - lpm->used_rules_at_depth[depth - 1]--; -} + lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule + + lpm->rule_info[depth - 1].used_rules - 1]; + + for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) { + if (lpm->rule_info[i].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] = + lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1]; + lpm->rule_info[i].first_rule--; + } + } + lpm->rule_info[depth - 1].used_rules--; +} /* - * Function Name: rule_find - * Usage : Finds a rule in rule table. + * Finds a rule in rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static inline int32_t @@ -339,8 +329,8 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) VERIFY_DEPTH(depth); - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; + rule_gindex = lpm->rule_info[depth - 1].first_rule; + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; /* Scan used rules at given depth to find rule. */ for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { @@ -354,8 +344,7 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) } /* - * Function Name: tbl8_alloc - * Usage : Find, clean and allocate a tbl8. + * Find, clean and allocate a tbl8. */ static inline int32_t tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) @@ -411,10 +400,10 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, lpm->tbl24[i].depth <= depth)) { struct rte_lpm_tbl24_entry new_tbl24_entry = { + { .next_hop = next_hop, }, .valid = VALID, .ext_entry = 0, .depth = depth, - { .next_hop = next_hop, } }; /* Setting tbl24 entry in one go to avoid race @@ -426,7 +415,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, /* If tbl24 entry is valid and extended calculate the index * into tbl8. */ - tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex * + tbl8_index = lpm->tbl24[i].tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -493,10 +482,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl24_entry new_tbl24_entry = { + { .tbl8_gindex = (uint8_t)tbl8_group_index, }, .valid = VALID, .ext_entry = 1, .depth = 0, - { .tbl8_gindex = (uint8_t)tbl8_group_index, } }; lpm->tbl24[tbl24_index] = new_tbl24_entry; @@ -544,10 +533,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl24_entry new_tbl24_entry = { + { .tbl8_gindex = (uint8_t)tbl8_group_index, }, .valid = VALID, .ext_entry = 1, .depth = 0, - { .tbl8_gindex = (uint8_t)tbl8_group_index, } }; lpm->tbl24[tbl24_index] = new_tbl24_entry; @@ -569,6 +558,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, .valid = VALID, .depth = depth, .next_hop = next_hop, + .valid_group = lpm->tbl8[i].valid_group, }; /* @@ -586,25 +576,21 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, } /* - * Function Name : rte_lpm_add - * Usage : Add a route - * - *(IN) lpm_handle, - *(IN) ip - *(IN) depth - *(IN) next_hop + * Add a route */ int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop) { int32_t rule_index, status = 0; - uint32_t ip_masked = (ip & depth_to_mask(depth)); + uint32_t ip_masked; /* Check user arguments. */ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) return -EINVAL; + ip_masked = ip & depth_to_mask(depth); + /* Add the rule to the rule table. */ rule_index = rule_add(lpm, ip_masked, depth, next_hop); @@ -634,7 +620,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } static inline int32_t -find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { int32_t rule_index; uint32_t ip_masked; @@ -645,8 +631,10 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) rule_index = rule_find(lpm, ip_masked, prev_depth); - if (rule_index >= 0) + if (rule_index >= 0) { + *sub_rule_depth = prev_depth; return rule_index; + } } return -1; @@ -654,10 +642,9 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) static inline int32_t delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index) + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; - uint8_t new_depth; /* Calculate the range and index into Table24. */ tbl24_range = depth_to_range(depth); @@ -673,6 +660,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * associated with this rule. */ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + if (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i].valid = INVALID; @@ -683,6 +671,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * to be a rule with depth >= 25 in the * associated TBL8 group. */ + tbl8_group_index = lpm->tbl24[i].tbl8_gindex; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -702,20 +691,16 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * associated with this rule. */ - /* Calculate depth of sub_rule. */ - new_depth = (uint8_t) (sub_rule_index / - lpm->max_rules_per_depth); - struct rte_lpm_tbl24_entry new_tbl24_entry = { + {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, .valid = VALID, .ext_entry = 0, - .depth = new_depth, - {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,} + .depth = sub_rule_depth, }; struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, - .depth = new_depth, + .depth = sub_rule_depth, .next_hop = lpm->rules_tbl [sub_rule_index].next_hop, }; @@ -736,7 +721,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, tbl8_group_index = lpm->tbl24[i].tbl8_gindex; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - + for (j = tbl8_index; j < (tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { @@ -751,8 +736,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, } /* - * Function Name: tbl8_recycle_check - * Usage : Checks if table 8 group can be recycled. + * Checks if table 8 group can be recycled. * * Return of -EEXIST means tbl8 is in use and thus can not be recycled. * Return of -EINVAL means tbl8 is empty and thus can be recycled @@ -807,11 +791,10 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) static inline int32_t delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index) + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; - uint8_t new_depth; int32_t tbl8_recycle_index; /* @@ -837,13 +820,11 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, } } else { - new_depth = (uint8_t)(sub_rule_index / - lpm->max_rules_per_depth); - /* Set new tbl8 entry. */ struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, - .depth = new_depth, + .depth = sub_rule_depth, + .valid_group = lpm->tbl8[tbl8_group_start].valid_group, .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; @@ -873,10 +854,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl24_entry new_tbl24_entry = { + { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }, .valid = VALID, .ext_entry = 0, .depth = lpm->tbl8[tbl8_recycle_index].depth, - { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, } }; /* Set tbl24 before freeing tbl8 to avoid race condition. */ @@ -888,18 +869,14 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, } /* - * Function Name: rte_lpm_delete - * Usage : Deletes a rule - * - *(IN) lpm_handle, - *(IN) ip - *(IN) depth + * Deletes a rule */ int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; + uint8_t sub_rule_depth; /* * Check input arguments. Note: IP must be a positive integer of 32 * bits in length therefore it need not be checked. @@ -931,7 +908,8 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) * replace the rule_to_delete we return -1 and invalidate the table * entries associated with this rule. */ - sub_rule_index = find_previous_rule(lpm, ip, depth); + sub_rule_depth = 0; + sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth); /* * If the input depth value is less than 25 use function @@ -939,24 +917,21 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) */ if (depth <= MAX_DEPTH_TBL24) { return delete_depth_small(lpm, ip_masked, depth, - sub_rule_index); + sub_rule_index, sub_rule_depth); } else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big(lpm, ip_masked, depth, sub_rule_index); + return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth); } } /* - * Function Name: rte_lpm_delete_all - * Usage : Delete all rules from the LPM table. - * - *(IN) lpm_handle + * Delete all rules from the LPM table. */ void rte_lpm_delete_all(struct rte_lpm *lpm) { - /* Zero used rules counter. */ - memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth)); + /* Zero rule information. */ + memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); /* Zero tbl24. */ memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); @@ -965,7 +940,6 @@ rte_lpm_delete_all(struct rte_lpm *lpm) memset(lpm->tbl8, 0, sizeof(lpm->tbl8)); /* Delete all rules form the rules table. */ - memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * - (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH)); + memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); }