X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=e1f1fad5f30fb302b8491e65fcd36f0ebdefb679;hb=455da5453987;hp=ccaaa2af3d67757b0aa2d1b3c76045f92d70ce48;hpb=f1f7261838b38f57fb1b3e62d1f97164439d017d;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index ccaaa2af3d..e1f1fad5f3 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include @@ -44,7 +43,6 @@ #include #include /* for definition of RTE_CACHE_LINE_SIZE */ #include -#include #include #include #include @@ -209,13 +207,17 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules, if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - if (te != NULL) + lpm = NULL; + if (te != NULL) { + rte_errno = EEXIST; goto exit; + } /* allocate tailq entry */ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; goto exit; } @@ -225,6 +227,7 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules, if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); + rte_errno = ENOMEM; goto exit; } @@ -280,13 +283,17 @@ rte_lpm_create_v1604(const char *name, int socket_id, if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - if (te != NULL) + lpm = NULL; + if (te != NULL) { + rte_errno = EEXIST; goto exit; + } /* allocate tailq entry */ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; goto exit; } @@ -296,6 +303,7 @@ rte_lpm_create_v1604(const char *name, int socket_id, if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); + rte_errno = ENOMEM; goto exit; } @@ -303,9 +311,11 @@ rte_lpm_create_v1604(const char *name, int socket_id, (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->rules_tbl == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n"); rte_free(lpm); + lpm = NULL; rte_free(te); + rte_errno = ENOMEM; goto exit; } @@ -313,9 +323,12 @@ rte_lpm_create_v1604(const char *name, int socket_id, (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->tbl8 == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n"); + rte_free(lpm->rules_tbl); rte_free(lpm); + lpm = NULL; rte_free(te); + rte_errno = ENOMEM; goto exit; } @@ -360,12 +373,8 @@ rte_lpm_free_v20(struct rte_lpm_v20 *lpm) if (te->data == (void *) lpm) break; } - if (te == NULL) { - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - return; - } - - TAILQ_REMOVE(lpm_list, te, next); + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); @@ -393,15 +402,13 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) if (te->data == (void *) lpm) break; } - if (te == NULL) { - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - return; - } - - TAILQ_REMOVE(lpm_list, te, next); + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_free(lpm->tbl8); + rte_free(lpm->rules_tbl); rte_free(lpm); rte_free(te); } @@ -748,11 +755,11 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, lpm->tbl24[i].depth <= depth)) { struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .next_hop = next_hop, }, .valid = VALID, .valid_group = 0, .depth = depth, }; + new_tbl24_entry.next_hop = next_hop; /* Setting tbl24 entry in one go to avoid race * conditions @@ -779,8 +786,8 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, .valid = VALID, .valid_group = VALID, .depth = depth, - .next_hop = next_hop, }; + new_tbl8_entry.next_hop = next_hop; /* * Setting tbl8 entry in one go to avoid @@ -939,14 +946,9 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - - continue; - } + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; } /* @@ -979,10 +981,9 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { .valid = VALID, .depth = depth, - .next_hop = next_hop, .valid_group = lpm->tbl8[i].valid_group, }; - + new_tbl8_entry.next_hop = next_hop; /* * Setting tbl8 entry in one go to avoid race * condition @@ -1037,7 +1038,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, + .group_idx = tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, @@ -1071,14 +1072,9 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - - continue; - } + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; } /* @@ -1088,7 +1084,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, + .group_idx = tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, @@ -1379,9 +1375,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, .valid = VALID, .valid_group = VALID, .depth = sub_rule_depth, - .next_hop = lpm->rules_tbl - [sub_rule_index].next_hop, }; + new_tbl8_entry.next_hop = + lpm->rules_tbl[sub_rule_index].next_hop; for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { @@ -1533,7 +1529,7 @@ tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, * and if so check the rest of the entries to verify that they * are all of this depth. */ - if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) { + if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { @@ -1580,7 +1576,7 @@ tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, * and if so check the rest of the entries to verify that they * are all of this depth. */ - if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) { + if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { @@ -1643,9 +1639,10 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, .valid = VALID, .depth = sub_rule_depth, .valid_group = lpm->tbl8[tbl8_group_start].valid_group, - .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; + new_tbl8_entry.next_hop = + lpm->rules_tbl[sub_rule_index].next_hop; /* * Loop through the range of entries on tbl8 for which the * rule_to_delete must be modified.