X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=163ba3c1730b622076c0514bf44179440deca053;hb=8b8264bdb90deacbf6469638272a30d987d18679;hp=9e76988d3e78d40c6bcd6d115f97a37217e5227c;hpb=899d8bc9b3b5956d10fbbb79a6b27fc9838a683c;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 9e76988d3e..163ba3c173 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -42,10 +42,9 @@ #include #include #include -#include /* for definition of CACHE_LINE_SIZE */ +#include /* for definition of RTE_CACHE_LINE_SIZE */ #include #include -#include #include #include #include @@ -58,6 +57,11 @@ TAILQ_HEAD(rte_lpm_list, rte_tailq_entry); +static struct rte_tailq_elem rte_lpm_tailq = { + .name = "RTE_LPM", +}; +EAL_REGISTER_TAILQ(rte_lpm_tailq) + #define MAX_DEPTH_TBL24 24 enum valid_flag { @@ -122,12 +126,7 @@ rte_lpm_find_existing(const char *name) struct rte_tailq_entry *te; struct rte_lpm_list *lpm_list; - /* check that we have an initialised tail queue */ - if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, - rte_lpm_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; - return NULL; - } + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); TAILQ_FOREACH(te, lpm_list, next) { @@ -158,12 +157,7 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, uint32_t mem_size; struct rte_lpm_list *lpm_list; - /* check that we have an initialised tail queue */ - if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, - rte_lpm_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; - return NULL; - } + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2); RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2); @@ -199,7 +193,7 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, /* Allocate memory to store the LPM data structures. */ lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, - CACHE_LINE_SIZE, socket_id); + RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); @@ -233,12 +227,7 @@ rte_lpm_free(struct rte_lpm *lpm) if (lpm == NULL) return; - /* check that we have an initialised tail queue */ - if ((lpm_list = - RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) { - rte_errno = E_RTE_NO_TAILQ; - return; - } + lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); @@ -298,6 +287,9 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, return rule_index; } } + + if (rule_index == lpm->max_rules) + return -ENOSPC; } else { /* Calculate the position in which the rule will be stored. */ rule_index = 0; @@ -379,11 +371,11 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { /* If rule is found return the rule index. */ if (lpm->rules_tbl[rule_index].ip == ip_masked) - return (rule_index); + return rule_index; } - /* If rule is not found return -E_RTE_NO_TAILQ. */ - return -E_RTE_NO_TAILQ; + /* If rule is not found return -EINVAL. */ + return -EINVAL; } /* @@ -450,35 +442,41 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, }; /* Setting tbl24 entry in one go to avoid race - * conditions */ + * conditions + */ lpm->tbl24[i] = new_tbl24_entry; continue; } - /* If tbl24 entry is valid and extended calculate the index - * into tbl8. */ - tbl8_index = lpm->tbl24[i].tbl8_gindex * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < tbl8_group_end; j++) { - if (!lpm->tbl8[j].valid || - lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl8_entry new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = depth, - .next_hop = next_hop, - }; - - /* - * Setting tbl8 entry in one go to avoid race - * conditions - */ - lpm->tbl8[j] = new_tbl8_entry; - - continue; + if (lpm->tbl24[i].ext_entry == 1) { + /* If tbl24 entry is valid and extended calculate the + * index into tbl8. + */ + tbl8_index = lpm->tbl24[i].tbl8_gindex * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl8_entry + new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid + * race conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } } } } @@ -967,10 +965,10 @@ rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) /* * Check if rule_to_delete_index was found. If no rule was found the - * function rule_find returns -E_RTE_NO_TAILQ. + * function rule_find returns -EINVAL. */ if (rule_to_delete_index < 0) - return -E_RTE_NO_TAILQ; + return -EINVAL; /* Delete the rule from the rule table. */ rule_delete(lpm, rule_to_delete_index, depth); @@ -1014,4 +1012,3 @@ rte_lpm_delete_all(struct rte_lpm *lpm) /* Delete all rules form the rules table. */ memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); } -