X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=983e04b1598edb8f55634226653b969336d60e70;hb=fdf20fa7bee9df9037116318a87080e1eb7e757e;hp=b1bc6b301b6d34489fe919233334074bc6e33210;hpb=3569fcf1c4be7ac6f0179b40fcfba4c89b685e1b;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index b1bc6b301b..983e04b159 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -43,7 +42,7 @@ #include #include #include -#include /* for definition of CACHE_LINE_SIZE */ +#include /* for definition of RTE_CACHE_LINE_SIZE */ #include #include #include @@ -52,11 +51,13 @@ #include #include #include +#include +#include #include "rte_lpm.h" -TAILQ_HEAD(rte_lpm_list, rte_lpm); - +TAILQ_HEAD(rte_lpm_list, rte_tailq_entry); + #define MAX_DEPTH_TBL24 24 enum valid_flag { @@ -117,22 +118,29 @@ depth_to_range(uint8_t depth) struct rte_lpm * rte_lpm_find_existing(const char *name) { - struct rte_lpm *l; + struct rte_lpm *l = NULL; + struct rte_tailq_entry *te; struct rte_lpm_list *lpm_list; /* check that we have an initialised tail queue */ - if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { + if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, + rte_lpm_list)) == NULL) { rte_errno = E_RTE_NO_TAILQ; return NULL; } - TAILQ_FOREACH(l, lpm_list, next) { + rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + TAILQ_FOREACH(te, lpm_list, next) { + l = (struct rte_lpm *) te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } + rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); - if (l == NULL) + if (te == NULL) { rte_errno = ENOENT; + return NULL; + } return l; } @@ -146,14 +154,15 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, { char mem_name[RTE_LPM_NAMESIZE]; struct rte_lpm *lpm = NULL; + struct rte_tailq_entry *te; uint32_t mem_size; struct rte_lpm_list *lpm_list; /* check that we have an initialised tail queue */ - if ((lpm_list = - RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { + if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, + rte_lpm_list)) == NULL) { rte_errno = E_RTE_NO_TAILQ; - return NULL; + return NULL; } RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2); @@ -165,41 +174,48 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, return NULL; } - rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); - - /* - * Pad out max_rules so that each depth is given the same number of - * rules. - */ - if (max_rules % RTE_LPM_MAX_DEPTH) { - max_rules += RTE_LPM_MAX_DEPTH - - (max_rules % RTE_LPM_MAX_DEPTH); - } + snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); /* Determine the amount of memory to allocate. */ mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + /* guarantee there's no existing */ - TAILQ_FOREACH(lpm, lpm_list, next) { + TAILQ_FOREACH(te, lpm_list, next) { + lpm = (struct rte_lpm *) te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - if (lpm != NULL) - return NULL; + if (te != NULL) + goto exit; + + /* allocate tailq entry */ + te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); + if (te == NULL) { + RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + goto exit; + } /* Allocate memory to store the LPM data structures. */ lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, - CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); - return NULL; + rte_free(te); + goto exit; } /* Save user arguments. */ - lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH; - rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name); + lpm->max_rules = max_rules; + snprintf(lpm->name, sizeof(lpm->name), "%s", name); + + te->data = (void *) lpm; + + TAILQ_INSERT_TAIL(lpm_list, te, next); - TAILQ_INSERT_TAIL(lpm_list, lpm, next); +exit: + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); return lpm; } @@ -210,12 +226,38 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, void rte_lpm_free(struct rte_lpm *lpm) { + struct rte_lpm_list *lpm_list; + struct rte_tailq_entry *te; + /* Check user arguments. */ if (lpm == NULL) return; - RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM, rte_lpm_list, lpm); + /* check that we have an initialised tail queue */ + if ((lpm_list = + RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { + rte_errno = E_RTE_NO_TAILQ; + return; + } + + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + + /* find our tailq entry */ + TAILQ_FOREACH(te, lpm_list, next) { + if (te->data == (void *) lpm) + break; + } + if (te == NULL) { + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + return; + } + + TAILQ_REMOVE(lpm_list, te, next); + + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_free(lpm); + rte_free(te); } /* @@ -233,40 +275,63 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint8_t next_hop) { uint32_t rule_gindex, rule_index, last_rule; + int i; VERIFY_DEPTH(depth); - /* rule_gindex stands for rule group index. */ - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - /* Initialise rule_index to point to start of rule group. */ - rule_index = rule_gindex; - /* Last rule = Last used rule in this rule group. */ - last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; - /* Scan through rule group to see if rule already exists. */ - for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { + if (lpm->rule_info[depth - 1].used_rules > 0) { - /* If rule already exists update its next_hop and return. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) { - lpm->rules_tbl[rule_index].next_hop = next_hop; + /* rule_gindex stands for rule group index. */ + rule_gindex = lpm->rule_info[depth - 1].first_rule; + /* Initialise rule_index to point to start of rule group. */ + rule_index = rule_gindex; + /* Last rule = Last used rule in this rule group. */ + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; - return rule_index; + for (; rule_index < last_rule; rule_index++) { + + /* If rule already exists update its next_hop and return. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) { + lpm->rules_tbl[rule_index].next_hop = next_hop; + + return rule_index; + } } + } else { + /* Calculate the position in which the rule will be stored. */ + rule_index = 0; + + for (i = depth - 1; i > 0; i--) { + if (lpm->rule_info[i - 1].used_rules > 0) { + rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules; + break; + } + } + if (rule_index == lpm->max_rules) + return -ENOSPC; + + lpm->rule_info[depth - 1].first_rule = rule_index; } - /* - * If rule does not exist check if there is space to add a new rule to - * this rule group. If there is no space return error. */ - if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) { - return -ENOSPC; + /* Make room for the new rule in the array. */ + for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) { + if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules) + return -ENOSPC; + + if (lpm->rule_info[i - 1].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules] + = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule]; + lpm->rule_info[i - 1].first_rule++; + } } - /* If there is space for the new rule add it. */ + /* Add the new rule. */ lpm->rules_tbl[rule_index].ip = ip_masked; lpm->rules_tbl[rule_index].next_hop = next_hop; /* Increment the used rules counter for this rule group. */ - lpm->used_rules_at_depth[depth - 1]++; + lpm->rule_info[depth - 1].used_rules++; return rule_index; } @@ -278,21 +343,23 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, static inline void rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) { - uint32_t rule_gindex, last_rule_index; + int i; VERIFY_DEPTH(depth); - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - last_rule_index = rule_gindex + - (lpm->used_rules_at_depth[depth - 1]) - 1; - /* - * Overwrite redundant rule with last rule in group and decrement rule - * counter. - */ - lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index]; - lpm->used_rules_at_depth[depth - 1]--; -} + lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule + + lpm->rule_info[depth - 1].used_rules - 1]; + for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) { + if (lpm->rule_info[i].used_rules > 0) { + lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] = + lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1]; + lpm->rule_info[i].first_rule--; + } + } + + lpm->rule_info[depth - 1].used_rules--; +} /* * Finds a rule in rule table. @@ -305,8 +372,8 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) VERIFY_DEPTH(depth); - rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); - last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; + rule_gindex = lpm->rule_info[depth - 1].first_rule; + last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; /* Scan used rules at given depth to find rule. */ for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { @@ -391,7 +458,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, /* If tbl24 entry is valid and extended calculate the index * into tbl8. */ - tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex * + tbl8_index = lpm->tbl24[i].tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -534,6 +601,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, .valid = VALID, .depth = depth, .next_hop = next_hop, + .valid_group = lpm->tbl8[i].valid_group, }; /* @@ -594,8 +662,37 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, return 0; } +/* + * Look for a rule in the high-level rules table + */ +int +rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +uint8_t *next_hop) +{ + uint32_t ip_masked; + int32_t rule_index; + + /* Check user arguments. */ + if ((lpm == NULL) || + (next_hop == NULL) || + (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) + return -EINVAL; + + /* Look for the rule using rule_find. */ + ip_masked = ip & depth_to_mask(depth); + rule_index = rule_find(lpm, ip_masked, depth); + + if (rule_index >= 0) { + *next_hop = lpm->rules_tbl[rule_index].next_hop; + return 1; + } + + /* If rule is not found return 0. */ + return 0; +} + static inline int32_t -find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { int32_t rule_index; uint32_t ip_masked; @@ -606,8 +703,10 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) rule_index = rule_find(lpm, ip_masked, prev_depth); - if (rule_index >= 0) + if (rule_index >= 0) { + *sub_rule_depth = prev_depth; return rule_index; + } } return -1; @@ -615,10 +714,9 @@ find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) static inline int32_t delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index) + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; - uint8_t new_depth; /* Calculate the range and index into Table24. */ tbl24_range = depth_to_range(depth); @@ -634,7 +732,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * associated with this rule. */ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - + if (lpm->tbl24[i].ext_entry == 0 && lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i].valid = INVALID; @@ -665,20 +763,16 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * associated with this rule. */ - /* Calculate depth of sub_rule. */ - new_depth = (uint8_t) (sub_rule_index / - lpm->max_rules_per_depth); - struct rte_lpm_tbl24_entry new_tbl24_entry = { {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, .valid = VALID, .ext_entry = 0, - .depth = new_depth, + .depth = sub_rule_depth, }; struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, - .depth = new_depth, + .depth = sub_rule_depth, .next_hop = lpm->rules_tbl [sub_rule_index].next_hop, }; @@ -699,7 +793,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, tbl8_group_index = lpm->tbl24[i].tbl8_gindex; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - + for (j = tbl8_index; j < (tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { @@ -769,11 +863,10 @@ tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) static inline int32_t delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index) + uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, tbl8_range, i; - uint8_t new_depth; int32_t tbl8_recycle_index; /* @@ -799,13 +892,11 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, } } else { - new_depth = (uint8_t)(sub_rule_index / - lpm->max_rules_per_depth); - /* Set new tbl8 entry. */ struct rte_lpm_tbl8_entry new_tbl8_entry = { .valid = VALID, - .depth = new_depth, + .depth = sub_rule_depth, + .valid_group = lpm->tbl8[tbl8_group_start].valid_group, .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; @@ -857,6 +948,7 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; + uint8_t sub_rule_depth; /* * Check input arguments. Note: IP must be a positive integer of 32 * bits in length therefore it need not be checked. @@ -888,7 +980,8 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) * replace the rule_to_delete we return -1 and invalidate the table * entries associated with this rule. */ - sub_rule_index = find_previous_rule(lpm, ip, depth); + sub_rule_depth = 0; + sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth); /* * If the input depth value is less than 25 use function @@ -896,10 +989,10 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) */ if (depth <= MAX_DEPTH_TBL24) { return delete_depth_small(lpm, ip_masked, depth, - sub_rule_index); + sub_rule_index, sub_rule_depth); } else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big(lpm, ip_masked, depth, sub_rule_index); + return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth); } } @@ -909,8 +1002,8 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) void rte_lpm_delete_all(struct rte_lpm *lpm) { - /* Zero used rules counter. */ - memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth)); + /* Zero rule information. */ + memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); /* Zero tbl24. */ memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); @@ -919,7 +1012,6 @@ rte_lpm_delete_all(struct rte_lpm *lpm) memset(lpm->tbl8, 0, sizeof(lpm->tbl8)); /* Delete all rules form the rules table. */ - memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * - (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH)); + memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); }