X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=978ac6013ecf39aeb29aee368d7c1f83af8f9fab;hb=725956e6d11aaa3c6a2346ae3a1027428600cfbf;hp=e846e6692ad2dc3ca36a648eefcc76dcee5eb400;hpb=4cdf471a06e322fc6abf13c18c2028d48bc63ac1;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index e846e6692a..978ac6013e 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -43,20 +42,26 @@ #include #include #include -#include /* for definition of CACHE_LINE_SIZE */ +#include /* for definition of RTE_CACHE_LINE_SIZE */ #include #include -#include #include #include #include #include #include +#include +#include #include "rte_lpm.h" -TAILQ_HEAD(rte_lpm_list, rte_lpm); - +TAILQ_HEAD(rte_lpm_list, rte_tailq_entry); + +static struct rte_tailq_elem rte_lpm_tailq = { + .name = "RTE_LPM", +}; +EAL_REGISTER_TAILQ(rte_lpm_tailq) + #define MAX_DEPTH_TBL24 24 enum valid_flag { @@ -108,115 +113,304 @@ depth_to_range(uint8_t depth) return 1 << (MAX_DEPTH_TBL24 - depth); /* Else if depth is greater than 24 */ - return (1 << (RTE_LPM_MAX_DEPTH - depth)); + return 1 << (RTE_LPM_MAX_DEPTH - depth); } /* * Find an existing lpm table and return a pointer to it. */ -struct rte_lpm * -rte_lpm_find_existing(const char *name) +struct rte_lpm_v20 * +rte_lpm_find_existing_v20(const char *name) { - struct rte_lpm *l; + struct rte_lpm_v20 *l = NULL; + struct rte_tailq_entry *te; struct rte_lpm_list *lpm_list; - /* check that we have an initialised tail queue */ - if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); + + rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_FOREACH(te, lpm_list, next) { + l = (struct rte_lpm_v20 *) te->data; + if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) + break; + } + rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + + if (te == NULL) { + rte_errno = ENOENT; return NULL; } - TAILQ_FOREACH(l, lpm_list, next) { + return l; +} +VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0); + +struct rte_lpm * +rte_lpm_find_existing_v1604(const char *name) +{ + struct rte_lpm *l = NULL; + struct rte_tailq_entry *te; + struct rte_lpm_list *lpm_list; + + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); + + rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_FOREACH(te, lpm_list, next) { + l = (struct rte_lpm *) te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } + rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); - if (l == NULL) + if (te == NULL) { rte_errno = ENOENT; + return NULL; + } return l; } +BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04); +MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name), + rte_lpm_find_existing_v1604); /* * Allocates memory for LPM object */ -struct rte_lpm * -rte_lpm_create(const char *name, int socket_id, int max_rules, +struct rte_lpm_v20 * +rte_lpm_create_v20(const char *name, int socket_id, int max_rules, __rte_unused int flags) { char mem_name[RTE_LPM_NAMESIZE]; - struct rte_lpm *lpm = NULL; + struct rte_lpm_v20 *lpm = NULL; + struct rte_tailq_entry *te; uint32_t mem_size; struct rte_lpm_list *lpm_list; - /* check that we have an initialised tail queue */ - if ((lpm_list = - RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; - return NULL; - } + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2); - RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2); + RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2); /* Check user arguments. */ - if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){ + if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) { rte_errno = EINVAL; return NULL; } - rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); - - /* - * Pad out max_rules so that each depth is given the same number of - * rules. - */ - if (max_rules % RTE_LPM_MAX_DEPTH) { - max_rules += RTE_LPM_MAX_DEPTH - - (max_rules % RTE_LPM_MAX_DEPTH); - } + snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); /* Determine the amount of memory to allocate. */ mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + /* guarantee there's no existing */ - TAILQ_FOREACH(lpm, lpm_list, next) { + TAILQ_FOREACH(te, lpm_list, next) { + lpm = (struct rte_lpm_v20 *) te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - if (lpm != NULL) - return NULL; + lpm = NULL; + if (te != NULL) { + rte_errno = EEXIST; + goto exit; + } + + /* allocate tailq entry */ + te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + goto exit; + } /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm *)rte_zmalloc(mem_name, mem_size, - CACHE_LINE_SIZE); + lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size, + RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + rte_free(te); + goto exit; + } + + /* Save user arguments. */ + lpm->max_rules = max_rules; + snprintf(lpm->name, sizeof(lpm->name), "%s", name); + + te->data = (void *) lpm; + + TAILQ_INSERT_TAIL(lpm_list, te, next); + +exit: + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + + return lpm; +} +VERSION_SYMBOL(rte_lpm_create, _v20, 2.0); + +struct rte_lpm * +rte_lpm_create_v1604(const char *name, int socket_id, + const struct rte_lpm_config *config) +{ + char mem_name[RTE_LPM_NAMESIZE]; + struct rte_lpm *lpm = NULL; + struct rte_tailq_entry *te; + uint32_t mem_size, rules_size, tbl8s_size; + struct rte_lpm_list *lpm_list; + + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); + + RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4); + + /* Check user arguments. */ + if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0) + || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) { + rte_errno = EINVAL; return NULL; } + snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); + + /* Determine the amount of memory to allocate. */ + mem_size = sizeof(*lpm); + rules_size = sizeof(struct rte_lpm_rule) * config->max_rules; + tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); + + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + + /* guarantee there's no existing */ + TAILQ_FOREACH(te, lpm_list, next) { + lpm = (struct rte_lpm *) te->data; + if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) + break; + } + lpm = NULL; + if (te != NULL) { + rte_errno = EEXIST; + goto exit; + } + + /* allocate tailq entry */ + te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + goto exit; + } + + /* Allocate memory to store the LPM data structures. */ + lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, + RTE_CACHE_LINE_SIZE, socket_id); + if (lpm == NULL) { + RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + rte_free(te); + goto exit; + } + + lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL, + (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); + + if (lpm->rules_tbl == NULL) { + RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n"); + rte_free(lpm); + lpm = NULL; + rte_free(te); + goto exit; + } + + lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL, + (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id); + + if (lpm->tbl8 == NULL) { + RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n"); + rte_free(lpm->rules_tbl); + rte_free(lpm); + lpm = NULL; + rte_free(te); + goto exit; + } + /* Save user arguments. */ - lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH; - rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name); + lpm->max_rules = config->max_rules; + lpm->number_tbl8s = config->number_tbl8s; + snprintf(lpm->name, sizeof(lpm->name), "%s", name); + + te->data = (void *) lpm; - TAILQ_INSERT_TAIL(lpm_list, lpm, next); + TAILQ_INSERT_TAIL(lpm_list, te, next); + +exit: + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); return lpm; } +BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04); +MAP_STATIC_SYMBOL( + struct rte_lpm *rte_lpm_create(const char *name, int socket_id, + const struct rte_lpm_config *config), rte_lpm_create_v1604); /* * Deallocates memory for given LPM table. */ void -rte_lpm_free(struct rte_lpm *lpm) +rte_lpm_free_v20(struct rte_lpm_v20 *lpm) +{ + struct rte_lpm_list *lpm_list; + struct rte_tailq_entry *te; + + /* Check user arguments. */ + if (lpm == NULL) + return; + + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); + + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + + /* find our tailq entry */ + TAILQ_FOREACH(te, lpm_list, next) { + if (te->data == (void *) lpm) + break; + } + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); + + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + + rte_free(lpm); + rte_free(te); +} +VERSION_SYMBOL(rte_lpm_free, _v20, 2.0); + +void +rte_lpm_free_v1604(struct rte_lpm *lpm) { + struct rte_lpm_list *lpm_list; + struct rte_tailq_entry *te; + /* Check user arguments. */ if (lpm == NULL) return; - RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM, rte_lpm_list, lpm); + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); + + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + + /* find our tailq entry */ + TAILQ_FOREACH(te, lpm_list, next) { + if (te->data == (void *) lpm) + break; + } + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); + + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + + rte_free(lpm->tbl8); + rte_free(lpm->rules_tbl); rte_free(lpm); + rte_free(te); } +BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04); +MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm), + rte_lpm_free_v1604); /* * Adds a rule to the rule table. @@ -229,44 +423,145 @@ rte_lpm_free(struct rte_lpm *lpm) * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static inline int32_t -rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, +rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, uint8_t next_hop) { uint32_t rule_gindex, rule_index, last_rule; + int i; VERIFY_DEPTH(depth); - /* rule_gindex stands for rule group index. */ - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - /* Initialise rule_index to point to start of rule group. */ - rule_index = rule_gindex; - /* Last rule = Last used rule in this rule group. */ - last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; - /* Scan through rule group to see if rule already exists. */ - for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { + if (lpm->rule_info[depth - 1].used_rules > 0) { - /* If rule already exists update its next_hop and return. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) { - lpm->rules_tbl[rule_index].next_hop = next_hop; + /* rule_gindex stands for rule group index. */ + rule_gindex = lpm->rule_info[depth - 1].first_rule; + /* Initialise rule_index to point to start of rule group. */ + rule_index = rule_gindex; + /* Last rule = Last used rule in this rule group. */ + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; - return rule_index; + for (; rule_index < last_rule; rule_index++) { + + /* If rule already exists update its next_hop and return. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) { + lpm->rules_tbl[rule_index].next_hop = next_hop; + + return rule_index; + } + } + + if (rule_index == lpm->max_rules) + return -ENOSPC; + } else { + /* Calculate the position in which the rule will be stored. */ + rule_index = 0; + + for (i = depth - 1; i > 0; i--) { + if (lpm->rule_info[i - 1].used_rules > 0) { + rule_index = lpm->rule_info[i - 1].first_rule + + lpm->rule_info[i - 1].used_rules; + break; + } } + if (rule_index == lpm->max_rules) + return -ENOSPC; + + lpm->rule_info[depth - 1].first_rule = rule_index; } - /* - * If rule does not exist check if there is space to add a new rule to - * this rule group. If there is no space return error. */ - if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) { - return -ENOSPC; + /* Make room for the new rule in the array. */ + for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) { + if (lpm->rule_info[i - 1].first_rule + + lpm->rule_info[i - 1].used_rules == lpm->max_rules) + return -ENOSPC; + + if (lpm->rule_info[i - 1].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + + lpm->rule_info[i - 1].used_rules] + = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule]; + lpm->rule_info[i - 1].first_rule++; + } + } + + /* Add the new rule. */ + lpm->rules_tbl[rule_index].ip = ip_masked; + lpm->rules_tbl[rule_index].next_hop = next_hop; + + /* Increment the used rules counter for this rule group. */ + lpm->rule_info[depth - 1].used_rules++; + + return rule_index; +} + +static inline int32_t +rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, + uint32_t next_hop) +{ + uint32_t rule_gindex, rule_index, last_rule; + int i; + + VERIFY_DEPTH(depth); + + /* Scan through rule group to see if rule already exists. */ + if (lpm->rule_info[depth - 1].used_rules > 0) { + + /* rule_gindex stands for rule group index. */ + rule_gindex = lpm->rule_info[depth - 1].first_rule; + /* Initialise rule_index to point to start of rule group. */ + rule_index = rule_gindex; + /* Last rule = Last used rule in this rule group. */ + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; + + for (; rule_index < last_rule; rule_index++) { + + /* If rule already exists update its next_hop and return. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) { + lpm->rules_tbl[rule_index].next_hop = next_hop; + + return rule_index; + } + } + + if (rule_index == lpm->max_rules) + return -ENOSPC; + } else { + /* Calculate the position in which the rule will be stored. */ + rule_index = 0; + + for (i = depth - 1; i > 0; i--) { + if (lpm->rule_info[i - 1].used_rules > 0) { + rule_index = lpm->rule_info[i - 1].first_rule + + lpm->rule_info[i - 1].used_rules; + break; + } + } + if (rule_index == lpm->max_rules) + return -ENOSPC; + + lpm->rule_info[depth - 1].first_rule = rule_index; } - /* If there is space for the new rule add it. */ + /* Make room for the new rule in the array. */ + for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) { + if (lpm->rule_info[i - 1].first_rule + + lpm->rule_info[i - 1].used_rules == lpm->max_rules) + return -ENOSPC; + + if (lpm->rule_info[i - 1].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + + lpm->rule_info[i - 1].used_rules] + = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule]; + lpm->rule_info[i - 1].first_rule++; + } + } + + /* Add the new rule. */ lpm->rules_tbl[rule_index].ip = ip_masked; lpm->rules_tbl[rule_index].next_hop = next_hop; /* Increment the used rules counter for this rule group. */ - lpm->used_rules_at_depth[depth - 1]++; + lpm->rule_info[depth - 1].used_rules++; return rule_index; } @@ -276,63 +571,136 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static inline void -rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) +rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth) { - uint32_t rule_gindex, last_rule_index; + int i; VERIFY_DEPTH(depth); - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - last_rule_index = rule_gindex + - (lpm->used_rules_at_depth[depth - 1]) - 1; - /* - * Overwrite redundant rule with last rule in group and decrement rule - * counter. - */ - lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index]; - lpm->used_rules_at_depth[depth - 1]--; + lpm->rules_tbl[rule_index] = + lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule + + lpm->rule_info[depth - 1].used_rules - 1]; + + for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) { + if (lpm->rule_info[i].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] = + lpm->rules_tbl[lpm->rule_info[i].first_rule + + lpm->rule_info[i].used_rules - 1]; + lpm->rule_info[i].first_rule--; + } + } + + lpm->rule_info[depth - 1].used_rules--; } +static inline void +rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) +{ + int i; + + VERIFY_DEPTH(depth); + + lpm->rules_tbl[rule_index] = + lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule + + lpm->rule_info[depth - 1].used_rules - 1]; + + for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) { + if (lpm->rule_info[i].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] = + lpm->rules_tbl[lpm->rule_info[i].first_rule + + lpm->rule_info[i].used_rules - 1]; + lpm->rule_info[i].first_rule--; + } + } + + lpm->rule_info[depth - 1].used_rules--; +} /* * Finds a rule in rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static inline int32_t -rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) +rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth) +{ + uint32_t rule_gindex, last_rule, rule_index; + + VERIFY_DEPTH(depth); + + rule_gindex = lpm->rule_info[depth - 1].first_rule; + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; + + /* Scan used rules at given depth to find rule. */ + for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { + /* If rule is found return the rule index. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) + return rule_index; + } + + /* If rule is not found return -EINVAL. */ + return -EINVAL; +} + +static inline int32_t +rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) { uint32_t rule_gindex, last_rule, rule_index; VERIFY_DEPTH(depth); - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; + rule_gindex = lpm->rule_info[depth - 1].first_rule; + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; /* Scan used rules at given depth to find rule. */ for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { /* If rule is found return the rule index. */ if (lpm->rules_tbl[rule_index].ip == ip_masked) - return (rule_index); + return rule_index; } - /* If rule is not found return -E_RTE_NO_TAILQ. */ - return -E_RTE_NO_TAILQ; + /* If rule is not found return -EINVAL. */ + return -EINVAL; } /* * Find, clean and allocate a tbl8. */ static inline int32_t -tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) +tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) +{ + uint32_t group_idx; /* tbl8 group index. */ + struct rte_lpm_tbl_entry_v20 *tbl8_entry; + + /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ + for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS; + group_idx++) { + tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; + /* If a free tbl8 group is found clean it and set as VALID. */ + if (!tbl8_entry->valid_group) { + memset(&tbl8_entry[0], 0, + RTE_LPM_TBL8_GROUP_NUM_ENTRIES * + sizeof(tbl8_entry[0])); + + tbl8_entry->valid_group = VALID; + + /* Return group index for allocated tbl8 group. */ + return group_idx; + } + } + + /* If there are no tbl8 groups free then return error. */ + return -ENOSPC; +} + +static inline int32_t +tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) { - uint32_t tbl8_gindex; /* tbl8 group index. */ - struct rte_lpm_tbl8_entry *tbl8_entry; + uint32_t group_idx; /* tbl8 group index. */ + struct rte_lpm_tbl_entry *tbl8_entry; /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ - for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS; - tbl8_gindex++) { - tbl8_entry = &tbl8[tbl8_gindex * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; + for (group_idx = 0; group_idx < number_tbl8s; group_idx++) { + tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { memset(&tbl8_entry[0], 0, @@ -342,7 +710,7 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) tbl8_entry->valid_group = VALID; /* Return group index for allocated tbl8 group. */ - return tbl8_gindex; + return group_idx; } } @@ -351,14 +719,21 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) } static inline void -tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) +tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start) +{ + /* Set tbl8 group invalid*/ + tbl8[tbl8_group_start].valid_group = INVALID; +} + +static inline void +tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ tbl8[tbl8_group_start].valid_group = INVALID; } static inline int32_t -add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop) { uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j; @@ -372,55 +747,130 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, * For invalid OR valid and non-extended tbl 24 entries set * entry. */ - if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 && + if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth)) { - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { .valid = VALID, - .ext_entry = 0, + .valid_group = 0, .depth = depth, - { .next_hop = next_hop, } }; + new_tbl24_entry.next_hop = next_hop; /* Setting tbl24 entry in one go to avoid race - * conditions */ + * conditions + */ lpm->tbl24[i] = new_tbl24_entry; continue; } - /* If tbl24 entry is valid and extended calculate the index - * into tbl8. */ - tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + if (lpm->tbl24[i].valid_group == 1) { + /* If tbl24 entry is valid and extended calculate the + * index into tbl8. + */ + tbl8_index = lpm->tbl24[i].group_idx * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl_entry_v20 + new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + }; + new_tbl8_entry.next_hop = next_hop; + + /* + * Setting tbl8 entry in one go to avoid + * race conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } + } + } + } - for (j = tbl8_index; j < tbl8_group_end; j++) { - if (!lpm->tbl8[j].valid || - lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl8_entry new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = depth, - .next_hop = next_hop, - }; + return 0; +} - /* - * Setting tbl8 entry in one go to avoid race - * conditions - */ - lpm->tbl8[j] = new_tbl8_entry; +static inline int32_t +add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, + uint32_t next_hop) +{ +#define group_idx next_hop + uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j; - continue; + /* Calculate the index into Table24. */ + tbl24_index = ip >> 8; + tbl24_range = depth_to_range(depth); + + for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + /* + * For invalid OR valid and non-extended tbl 24 entries set + * entry. + */ + if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && + lpm->tbl24[i].depth <= depth)) { + + struct rte_lpm_tbl_entry new_tbl24_entry = { + .next_hop = next_hop, + .valid = VALID, + .valid_group = 0, + .depth = depth, + }; + + /* Setting tbl24 entry in one go to avoid race + * conditions + */ + lpm->tbl24[i] = new_tbl24_entry; + + continue; + } + + if (lpm->tbl24[i].valid_group == 1) { + /* If tbl24 entry is valid and extended calculate the + * index into tbl8. + */ + tbl8_index = lpm->tbl24[i].group_idx * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl_entry + new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid + * race conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } } } } - +#undef group_idx return 0; } static inline int32_t -add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, +add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, uint8_t next_hop) { uint32_t tbl24_index; @@ -432,7 +882,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, if (!lpm->tbl24[tbl24_index].valid) { /* Search for a free tbl8 group. */ - tbl8_group_index = tbl8_alloc(lpm->tbl8); + tbl8_group_index = tbl8_alloc_v20(lpm->tbl8); /* Check tbl8 allocation was successful. */ if (tbl8_group_index < 0) { @@ -457,19 +907,19 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * so assign whole structure in one go */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { + { .group_idx = (uint8_t)tbl8_group_index, }, .valid = VALID, - .ext_entry = 1, + .valid_group = 1, .depth = 0, - { .tbl8_gindex = (uint8_t)tbl8_group_index, } }; lpm->tbl24[tbl24_index] = new_tbl24_entry; - }/* If valid entry but not extended calculate the index into Table8. */ - else if (lpm->tbl24[tbl24_index].ext_entry == 0) { + } /* If valid entry but not extended calculate the index into Table8. */ + else if (lpm->tbl24[tbl24_index].valid_group == 0) { /* Search for free tbl8 group. */ - tbl8_group_index = tbl8_alloc(lpm->tbl8); + tbl8_group_index = tbl8_alloc_v20(lpm->tbl8); if (tbl8_group_index < 0) { return tbl8_group_index; @@ -492,15 +942,136 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; + } - continue; + /* + * Update tbl24 entry to point to new tbl8 entry. Note: The + * ext_flag and tbl8_index need to be updated simultaneously, + * so assign whole structure in one go. + */ + + struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { + { .group_idx = (uint8_t)tbl8_group_index, }, + .valid = VALID, + .valid_group = 1, + .depth = 0, + }; + + lpm->tbl24[tbl24_index] = new_tbl24_entry; + + } else { /* + * If it is valid, extended entry calculate the index into tbl8. + */ + tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; + tbl8_group_start = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_index = tbl8_group_start + (ip_masked & 0xFF); + + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + + if (!lpm->tbl8[i].valid || + lpm->tbl8[i].depth <= depth) { + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + }; + new_tbl8_entry.next_hop = next_hop; + /* + * Setting tbl8 entry in one go to avoid race + * condition + */ + lpm->tbl8[i] = new_tbl8_entry; + + continue; } } + } + + return 0; +} + +static inline int32_t +add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, + uint32_t next_hop) +{ +#define group_idx next_hop + uint32_t tbl24_index; + int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index, + tbl8_range, i; + + tbl24_index = (ip_masked >> 8); + tbl8_range = depth_to_range(depth); + + if (!lpm->tbl24[tbl24_index].valid) { + /* Search for a free tbl8 group. */ + tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s); + + /* Check tbl8 allocation was successful. */ + if (tbl8_group_index < 0) { + return tbl8_group_index; + } + + /* Find index into tbl8 and range. */ + tbl8_index = (tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + + (ip_masked & 0xFF); + + /* Set tbl8 entry. */ + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; + lpm->tbl8[i].valid = VALID; + } + + /* + * Update tbl24 entry to point to new tbl8 entry. Note: The + * ext_flag and tbl8_index need to be updated simultaneously, + * so assign whole structure in one go + */ + + struct rte_lpm_tbl_entry new_tbl24_entry = { + .group_idx = tbl8_group_index, + .valid = VALID, + .valid_group = 1, + .depth = 0, + }; + + lpm->tbl24[tbl24_index] = new_tbl24_entry; + + } /* If valid entry but not extended calculate the index into Table8. */ + else if (lpm->tbl24[tbl24_index].valid_group == 0) { + /* Search for free tbl8 group. */ + tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s); + + if (tbl8_group_index < 0) { + return tbl8_group_index; + } + + tbl8_group_start = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_group_start + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + /* Populate new tbl8 with tbl24 value. */ + for (i = tbl8_group_start; i < tbl8_group_end; i++) { + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth; + lpm->tbl8[i].next_hop = + lpm->tbl24[tbl24_index].next_hop; + } + + tbl8_index = tbl8_group_start + (ip_masked & 0xFF); + + /* Insert new rule into the tbl8 entry. */ + for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; + } /* * Update tbl24 entry to point to new tbl8 entry. Note: The @@ -508,20 +1079,19 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * so assign whole structure in one go. */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry new_tbl24_entry = { + .group_idx = tbl8_group_index, .valid = VALID, - .ext_entry = 1, + .valid_group = 1, .depth = 0, - { .tbl8_gindex = (uint8_t)tbl8_group_index, } }; lpm->tbl24[tbl24_index] = new_tbl24_entry; - } - else { /* + } else { /* * If it is valid, extended entry calculate the index into tbl8. */ - tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_index = tbl8_group_start + (ip_masked & 0xFF); @@ -530,10 +1100,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, if (!lpm->tbl8[i].valid || lpm->tbl8[i].depth <= depth) { - struct rte_lpm_tbl8_entry new_tbl8_entry = { + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, .depth = depth, .next_hop = next_hop, + .valid_group = lpm->tbl8[i].valid_group, }; /* @@ -546,7 +1117,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, } } } - +#undef group_idx return 0; } @@ -554,7 +1125,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Add a route */ int -rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop) { int32_t rule_index, status = 0; @@ -567,7 +1138,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, ip_masked = ip & depth_to_mask(depth); /* Add the rule to the rule table. */ - rule_index = rule_add(lpm, ip_masked, depth, next_hop); + rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop); /* If the is no space available for new rule return error. */ if (rule_index < 0) { @@ -575,17 +1146,57 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } if (depth <= MAX_DEPTH_TBL24) { - status = add_depth_small(lpm, ip_masked, depth, next_hop); + status = add_depth_small_v20(lpm, ip_masked, depth, next_hop); + } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ + status = add_depth_big_v20(lpm, ip_masked, depth, next_hop); + + /* + * If add fails due to exhaustion of tbl8 extensions delete + * rule that was added to rule table. + */ + if (status < 0) { + rule_delete_v20(lpm, rule_index, depth); + + return status; + } } - else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ - status = add_depth_big(lpm, ip_masked, depth, next_hop); + + return 0; +} +VERSION_SYMBOL(rte_lpm_add, _v20, 2.0); + +int +rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, + uint32_t next_hop) +{ + int32_t rule_index, status = 0; + uint32_t ip_masked; + + /* Check user arguments. */ + if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) + return -EINVAL; + + ip_masked = ip & depth_to_mask(depth); + + /* Add the rule to the rule table. */ + rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop); + + /* If the is no space available for new rule return error. */ + if (rule_index < 0) { + return rule_index; + } + + if (depth <= MAX_DEPTH_TBL24) { + status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop); + } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ + status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop); /* * If add fails due to exhaustion of tbl8 extensions delete * rule that was added to rule table. */ if (status < 0) { - rule_delete(lpm, rule_index, depth); + rule_delete_v1604(lpm, rule_index, depth); return status; } @@ -593,9 +1204,72 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, return 0; } +BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04); +MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, + uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604); + +/* + * Look for a rule in the high-level rules table + */ +int +rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, +uint8_t *next_hop) +{ + uint32_t ip_masked; + int32_t rule_index; + + /* Check user arguments. */ + if ((lpm == NULL) || + (next_hop == NULL) || + (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) + return -EINVAL; + + /* Look for the rule using rule_find. */ + ip_masked = ip & depth_to_mask(depth); + rule_index = rule_find_v20(lpm, ip_masked, depth); + + if (rule_index >= 0) { + *next_hop = lpm->rules_tbl[rule_index].next_hop; + return 1; + } + + /* If rule is not found return 0. */ + return 0; +} +VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0); + +int +rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +uint32_t *next_hop) +{ + uint32_t ip_masked; + int32_t rule_index; + + /* Check user arguments. */ + if ((lpm == NULL) || + (next_hop == NULL) || + (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) + return -EINVAL; + + /* Look for the rule using rule_find. */ + ip_masked = ip & depth_to_mask(depth); + rule_index = rule_find_v1604(lpm, ip_masked, depth); + + if (rule_index >= 0) { + *next_hop = lpm->rules_tbl[rule_index].next_hop; + return 1; + } + + /* If rule is not found return 0. */ + return 0; +} +BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04); +MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, + uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604); static inline int32_t -find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, + uint8_t *sub_rule_depth) { int32_t rule_index; uint32_t ip_masked; @@ -604,21 +1278,44 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { ip_masked = ip & depth_to_mask(prev_depth); - rule_index = rule_find(lpm, ip_masked, prev_depth); + rule_index = rule_find_v20(lpm, ip_masked, prev_depth); - if (rule_index >= 0) + if (rule_index >= 0) { + *sub_rule_depth = prev_depth; return rule_index; + } } return -1; } static inline int32_t -delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index) +find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, + uint8_t *sub_rule_depth) +{ + int32_t rule_index; + uint32_t ip_masked; + uint8_t prev_depth; + + for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { + ip_masked = ip & depth_to_mask(prev_depth); + + rule_index = rule_find_v1604(lpm, ip_masked, prev_depth); + + if (rule_index >= 0) { + *sub_rule_depth = prev_depth; + return rule_index; + } + } + + return -1; +} + +static inline int32_t +delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; - uint8_t new_depth; /* Calculate the range and index into Table24. */ tbl24_range = depth_to_range(depth); @@ -634,19 +1331,18 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * associated with this rule. */ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - - if (lpm->tbl24[i].ext_entry == 0 && - lpm->tbl24[i].depth <= depth ) { + + if (lpm->tbl24[i].valid_group == 0 && + lpm->tbl24[i].depth <= depth) { lpm->tbl24[i].valid = INVALID; - } - else { + } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the * associated TBL8 group. */ - tbl8_group_index = lpm->tbl24[i].tbl8_gindex; + tbl8_group_index = lpm->tbl24[i].group_idx; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -658,48 +1354,137 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, } } } - } - else { + } else { /* * If a replacement rule exists then modify entries * associated with this rule. */ - /* Calculate depth of sub_rule. */ - new_depth = (uint8_t) (sub_rule_index / - lpm->max_rules_per_depth); + struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { + {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, + .valid = VALID, + .valid_group = 0, + .depth = sub_rule_depth, + }; + + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = sub_rule_depth, + }; + new_tbl8_entry.next_hop = + lpm->rules_tbl[sub_rule_index].next_hop; + + for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + + if (lpm->tbl24[i].valid_group == 0 && + lpm->tbl24[i].depth <= depth) { + lpm->tbl24[i] = new_tbl24_entry; + } else if (lpm->tbl24[i].valid_group == 1) { + /* + * If TBL24 entry is extended, then there has + * to be a rule with depth >= 25 in the + * associated TBL8 group. + */ + + tbl8_group_index = lpm->tbl24[i].group_idx; + tbl8_index = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < (tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { + + if (lpm->tbl8[j].depth <= depth) + lpm->tbl8[j] = new_tbl8_entry; + } + } + } + } + + return 0; +} + +static inline int32_t +delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) +{ +#define group_idx next_hop + uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; + + /* Calculate the range and index into Table24. */ + tbl24_range = depth_to_range(depth); + tbl24_index = (ip_masked >> 8); + + /* + * Firstly check the sub_rule_index. A -1 indicates no replacement rule + * and a positive number indicates a sub_rule_index. + */ + if (sub_rule_index < 0) { + /* + * If no replacement rule exists then invalidate entries + * associated with this rule. + */ + for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + + if (lpm->tbl24[i].valid_group == 0 && + lpm->tbl24[i].depth <= depth) { + lpm->tbl24[i].valid = INVALID; + } else if (lpm->tbl24[i].valid_group == 1) { + /* + * If TBL24 entry is extended, then there has + * to be a rule with depth >= 25 in the + * associated TBL8 group. + */ + + tbl8_group_index = lpm->tbl24[i].group_idx; + tbl8_index = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < (tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { - struct rte_lpm_tbl24_entry new_tbl24_entry = { + if (lpm->tbl8[j].depth <= depth) + lpm->tbl8[j].valid = INVALID; + } + } + } + } else { + /* + * If a replacement rule exists then modify entries + * associated with this rule. + */ + + struct rte_lpm_tbl_entry new_tbl24_entry = { + .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, .valid = VALID, - .ext_entry = 0, - .depth = new_depth, - {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,} + .valid_group = 0, + .depth = sub_rule_depth, }; - struct rte_lpm_tbl8_entry new_tbl8_entry = { + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, - .depth = new_depth, + .valid_group = VALID, + .depth = sub_rule_depth, .next_hop = lpm->rules_tbl [sub_rule_index].next_hop, }; for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - if (lpm->tbl24[i].ext_entry == 0 && - lpm->tbl24[i].depth <= depth ) { + if (lpm->tbl24[i].valid_group == 0 && + lpm->tbl24[i].depth <= depth) { lpm->tbl24[i] = new_tbl24_entry; - } - else { + } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the * associated TBL8 group. */ - tbl8_group_index = lpm->tbl24[i].tbl8_gindex; + tbl8_group_index = lpm->tbl24[i].group_idx; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - + for (j = tbl8_index; j < (tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { @@ -709,7 +1494,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, } } } - +#undef group_idx return 0; } @@ -722,7 +1507,8 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * thus can be recycled */ static inline int32_t -tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) +tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, + uint32_t tbl8_group_start) { uint32_t tbl8_group_end, i; tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -739,7 +1525,7 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) * and if so check the rest of the entries to verify that they * are all of this depth. */ - if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) { + if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { @@ -768,12 +1554,58 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) } static inline int32_t -delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index) +tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, + uint32_t tbl8_group_start) +{ + uint32_t tbl8_group_end, i; + tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + /* + * Check the first entry of the given tbl8. If it is invalid we know + * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH + * (As they would affect all entries in a tbl8) and thus this table + * can not be recycled. + */ + if (tbl8[tbl8_group_start].valid) { + /* + * If first entry is valid check if the depth is less than 24 + * and if so check the rest of the entries to verify that they + * are all of this depth. + */ + if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { + for (i = (tbl8_group_start + 1); i < tbl8_group_end; + i++) { + + if (tbl8[i].depth != + tbl8[tbl8_group_start].depth) { + + return -EEXIST; + } + } + /* If all entries are the same return the tb8 index */ + return tbl8_group_start; + } + + return -EEXIST; + } + /* + * If the first entry is invalid check if the rest of the entries in + * the tbl8 are invalid. + */ + for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { + if (tbl8[i].valid) + return -EEXIST; + } + /* If no valid entries are found then return -EINVAL. */ + return -EINVAL; +} + +static inline int32_t +delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; - uint8_t new_depth; int32_t tbl8_recycle_index; /* @@ -783,7 +1615,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, tbl24_index = ip_masked >> 8; /* Calculate the index into tbl8 and range. */ - tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_index = tbl8_group_start + (ip_masked & 0xFF); tbl8_range = depth_to_range(depth); @@ -797,15 +1629,91 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, if (lpm->tbl8[i].depth <= depth) lpm->tbl8[i].valid = INVALID; } + } else { + /* Set new tbl8 entry. */ + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = VALID, + .depth = sub_rule_depth, + .valid_group = lpm->tbl8[tbl8_group_start].valid_group, + }; + + new_tbl8_entry.next_hop = + lpm->rules_tbl[sub_rule_index].next_hop; + /* + * Loop through the range of entries on tbl8 for which the + * rule_to_delete must be modified. + */ + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + if (lpm->tbl8[i].depth <= depth) + lpm->tbl8[i] = new_tbl8_entry; + } } - else { - new_depth = (uint8_t)(sub_rule_index / - lpm->max_rules_per_depth); + /* + * Check if there are any valid entries in this tbl8 group. If all + * tbl8 entries are invalid we can free the tbl8 and invalidate the + * associated tbl24 entry. + */ + + tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start); + + if (tbl8_recycle_index == -EINVAL) { + /* Set tbl24 before freeing tbl8 to avoid race condition. */ + lpm->tbl24[tbl24_index].valid = 0; + tbl8_free_v20(lpm->tbl8, tbl8_group_start); + } else if (tbl8_recycle_index > -1) { + /* Update tbl24 entry. */ + struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { + { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }, + .valid = VALID, + .valid_group = 0, + .depth = lpm->tbl8[tbl8_recycle_index].depth, + }; + + /* Set tbl24 before freeing tbl8 to avoid race condition. */ + lpm->tbl24[tbl24_index] = new_tbl24_entry; + tbl8_free_v20(lpm->tbl8, tbl8_group_start); + } + + return 0; +} + +static inline int32_t +delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) +{ +#define group_idx next_hop + uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, + tbl8_range, i; + int32_t tbl8_recycle_index; + + /* + * Calculate the index into tbl24 and range. Note: All depths larger + * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry. + */ + tbl24_index = ip_masked >> 8; + + /* Calculate the index into tbl8 and range. */ + tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; + tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_index = tbl8_group_start + (ip_masked & 0xFF); + tbl8_range = depth_to_range(depth); + + if (sub_rule_index < 0) { + /* + * Loop through the range of entries on tbl8 for which the + * rule_to_delete must be removed or modified. + */ + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + if (lpm->tbl8[i].depth <= depth) + lpm->tbl8[i].valid = INVALID; + } + } else { /* Set new tbl8 entry. */ - struct rte_lpm_tbl8_entry new_tbl8_entry = { + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, - .depth = new_depth, + .depth = sub_rule_depth, + .valid_group = lpm->tbl8[tbl8_group_start].valid_group, .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; @@ -825,27 +1733,26 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, * associated tbl24 entry. */ - tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start); + tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start); - if (tbl8_recycle_index == -EINVAL){ + if (tbl8_recycle_index == -EINVAL) { /* Set tbl24 before freeing tbl8 to avoid race condition. */ lpm->tbl24[tbl24_index].valid = 0; - tbl8_free(lpm->tbl8, tbl8_group_start); - } - else if (tbl8_recycle_index > -1) { + tbl8_free_v1604(lpm->tbl8, tbl8_group_start); + } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry new_tbl24_entry = { + .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, .valid = VALID, - .ext_entry = 0, + .valid_group = 0, .depth = lpm->tbl8[tbl8_recycle_index].depth, - { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, } }; /* Set tbl24 before freeing tbl8 to avoid race condition. */ lpm->tbl24[tbl24_index] = new_tbl24_entry; - tbl8_free(lpm->tbl8, tbl8_group_start); + tbl8_free_v1604(lpm->tbl8, tbl8_group_start); } - +#undef group_idx return 0; } @@ -853,10 +1760,11 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, * Deletes a rule */ int -rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; + uint8_t sub_rule_depth; /* * Check input arguments. Note: IP must be a positive integer of 32 * bits in length therefore it need not be checked. @@ -871,46 +1779,104 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) * Find the index of the input rule, that needs to be deleted, in the * rule table. */ - rule_to_delete_index = rule_find(lpm, ip_masked, depth); + rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth); /* * Check if rule_to_delete_index was found. If no rule was found the - * function rule_find returns -E_RTE_NO_TAILQ. + * function rule_find returns -EINVAL. */ if (rule_to_delete_index < 0) - return -E_RTE_NO_TAILQ; + return -EINVAL; /* Delete the rule from the rule table. */ - rule_delete(lpm, rule_to_delete_index, depth); + rule_delete_v20(lpm, rule_to_delete_index, depth); /* * Find rule to replace the rule_to_delete. If there is no rule to * replace the rule_to_delete we return -1 and invalidate the table * entries associated with this rule. */ - sub_rule_index = find_previous_rule(lpm, ip, depth); + sub_rule_depth = 0; + sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth); /* * If the input depth value is less than 25 use function * delete_depth_small otherwise use delete_depth_big. */ if (depth <= MAX_DEPTH_TBL24) { - return delete_depth_small(lpm, ip_masked, depth, - sub_rule_index); + return delete_depth_small_v20(lpm, ip_masked, depth, + sub_rule_index, sub_rule_depth); + } else { /* If depth > MAX_DEPTH_TBL24 */ + return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index, + sub_rule_depth); } - else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big(lpm, ip_masked, depth, sub_rule_index); +} +VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0); + +int +rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +{ + int32_t rule_to_delete_index, sub_rule_index; + uint32_t ip_masked; + uint8_t sub_rule_depth; + /* + * Check input arguments. Note: IP must be a positive integer of 32 + * bits in length therefore it need not be checked. + */ + if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) { + return -EINVAL; + } + + ip_masked = ip & depth_to_mask(depth); + + /* + * Find the index of the input rule, that needs to be deleted, in the + * rule table. + */ + rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth); + + /* + * Check if rule_to_delete_index was found. If no rule was found the + * function rule_find returns -EINVAL. + */ + if (rule_to_delete_index < 0) + return -EINVAL; + + /* Delete the rule from the rule table. */ + rule_delete_v1604(lpm, rule_to_delete_index, depth); + + /* + * Find rule to replace the rule_to_delete. If there is no rule to + * replace the rule_to_delete we return -1 and invalidate the table + * entries associated with this rule. + */ + sub_rule_depth = 0; + sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth); + + /* + * If the input depth value is less than 25 use function + * delete_depth_small otherwise use delete_depth_big. + */ + if (depth <= MAX_DEPTH_TBL24) { + return delete_depth_small_v1604(lpm, ip_masked, depth, + sub_rule_index, sub_rule_depth); + } else { /* If depth > MAX_DEPTH_TBL24 */ + return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index, + sub_rule_depth); } } +BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04); +MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, + uint8_t depth), rte_lpm_delete_v1604); /* * Delete all rules from the LPM table. */ void -rte_lpm_delete_all(struct rte_lpm *lpm) +rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm) { - /* Zero used rules counter. */ - memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth)); + /* Zero rule information. */ + memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); /* Zero tbl24. */ memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); @@ -919,7 +1885,26 @@ rte_lpm_delete_all(struct rte_lpm *lpm) memset(lpm->tbl8, 0, sizeof(lpm->tbl8)); /* Delete all rules form the rules table. */ - memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * - (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH)); + memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); } +VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0); +void +rte_lpm_delete_all_v1604(struct rte_lpm *lpm) +{ + /* Zero rule information. */ + memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); + + /* Zero tbl24. */ + memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); + + /* Zero tbl8. */ + memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) + * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s); + + /* Delete all rules form the rules table. */ + memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); +} +BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04); +MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm), + rte_lpm_delete_all_v1604);