X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=d00b13d9310563acc69797ab06a8cc77e6ec43f9;hb=4726fb245e739155ab6edd0c3c15d838b3d593e2;hp=ccaaa2af3d67757b0aa2d1b3c76045f92d70ce48;hpb=f1f7261838b38f57fb1b3e62d1f97164439d017d;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index ccaaa2af3d..d00b13d931 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -36,7 +7,6 @@ #include #include #include -#include #include #include @@ -44,7 +14,6 @@ #include #include /* for definition of RTE_CACHE_LINE_SIZE */ #include -#include #include #include #include @@ -130,7 +99,7 @@ rte_lpm_find_existing_v20(const char *name) rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); TAILQ_FOREACH(te, lpm_list, next) { - l = (struct rte_lpm_v20 *) te->data; + l = te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } @@ -156,7 +125,7 @@ rte_lpm_find_existing_v1604(const char *name) rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); TAILQ_FOREACH(te, lpm_list, next) { - l = (struct rte_lpm *) te->data; + l = te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } @@ -205,26 +174,32 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules, /* guarantee there's no existing */ TAILQ_FOREACH(te, lpm_list, next) { - lpm = (struct rte_lpm_v20 *) te->data; + lpm = te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - if (te != NULL) + + if (te != NULL) { + lpm = NULL; + rte_errno = EEXIST; goto exit; + } /* allocate tailq entry */ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; goto exit; } /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size, + lpm = rte_zmalloc_socket(mem_name, mem_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); + rte_errno = ENOMEM; goto exit; } @@ -232,7 +207,7 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules, lpm->max_rules = max_rules; snprintf(lpm->name, sizeof(lpm->name), "%s", name); - te->data = (void *) lpm; + te->data = lpm; TAILQ_INSERT_TAIL(lpm_list, te, next); @@ -276,46 +251,57 @@ rte_lpm_create_v1604(const char *name, int socket_id, /* guarantee there's no existing */ TAILQ_FOREACH(te, lpm_list, next) { - lpm = (struct rte_lpm *) te->data; + lpm = te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - if (te != NULL) + + if (te != NULL) { + lpm = NULL; + rte_errno = EEXIST; goto exit; + } /* allocate tailq entry */ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; goto exit; } /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, + lpm = rte_zmalloc_socket(mem_name, mem_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); + rte_errno = ENOMEM; goto exit; } - lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL, + lpm->rules_tbl = rte_zmalloc_socket(NULL, (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->rules_tbl == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n"); rte_free(lpm); + lpm = NULL; rte_free(te); + rte_errno = ENOMEM; goto exit; } - lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL, + lpm->tbl8 = rte_zmalloc_socket(NULL, (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->tbl8 == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n"); + rte_free(lpm->rules_tbl); rte_free(lpm); + lpm = NULL; rte_free(te); + rte_errno = ENOMEM; goto exit; } @@ -324,7 +310,7 @@ rte_lpm_create_v1604(const char *name, int socket_id, lpm->number_tbl8s = config->number_tbl8s; snprintf(lpm->name, sizeof(lpm->name), "%s", name); - te->data = (void *) lpm; + te->data = lpm; TAILQ_INSERT_TAIL(lpm_list, te, next); @@ -360,12 +346,8 @@ rte_lpm_free_v20(struct rte_lpm_v20 *lpm) if (te->data == (void *) lpm) break; } - if (te == NULL) { - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - return; - } - - TAILQ_REMOVE(lpm_list, te, next); + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); @@ -393,15 +375,13 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) if (te->data == (void *) lpm) break; } - if (te == NULL) { - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - return; - } - - TAILQ_REMOVE(lpm_list, te, next); + if (te != NULL) + TAILQ_REMOVE(lpm_list, te, next); rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_free(lpm->tbl8); + rte_free(lpm->rules_tbl); rte_free(lpm); rte_free(te); } @@ -748,11 +728,11 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, lpm->tbl24[i].depth <= depth)) { struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .next_hop = next_hop, }, .valid = VALID, .valid_group = 0, .depth = depth, }; + new_tbl24_entry.next_hop = next_hop; /* Setting tbl24 entry in one go to avoid race * conditions @@ -779,8 +759,8 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, .valid = VALID, .valid_group = VALID, .depth = depth, - .next_hop = next_hop, }; + new_tbl8_entry.next_hop = next_hop; /* * Setting tbl8 entry in one go to avoid @@ -905,7 +885,7 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .group_idx = (uint8_t)tbl8_group_index, }, + .group_idx = (uint8_t)tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, @@ -939,14 +919,9 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - - continue; - } + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; } /* @@ -956,7 +931,7 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .group_idx = (uint8_t)tbl8_group_index, }, + .group_idx = (uint8_t)tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, @@ -979,10 +954,9 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { .valid = VALID, .depth = depth, - .next_hop = next_hop, .valid_group = lpm->tbl8[i].valid_group, }; - + new_tbl8_entry.next_hop = next_hop; /* * Setting tbl8 entry in one go to avoid race * condition @@ -1037,7 +1011,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, + .group_idx = tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, @@ -1071,14 +1045,9 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - - continue; - } + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; } /* @@ -1088,7 +1057,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, + .group_idx = tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, @@ -1369,7 +1338,7 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, + .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, .valid = VALID, .valid_group = 0, .depth = sub_rule_depth, @@ -1379,9 +1348,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, .valid = VALID, .valid_group = VALID, .depth = sub_rule_depth, - .next_hop = lpm->rules_tbl - [sub_rule_index].next_hop, }; + new_tbl8_entry.next_hop = + lpm->rules_tbl[sub_rule_index].next_hop; for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { @@ -1533,7 +1502,7 @@ tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, * and if so check the rest of the entries to verify that they * are all of this depth. */ - if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) { + if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { @@ -1580,7 +1549,7 @@ tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, * and if so check the rest of the entries to verify that they * are all of this depth. */ - if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) { + if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { @@ -1643,9 +1612,10 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, .valid = VALID, .depth = sub_rule_depth, .valid_group = lpm->tbl8[tbl8_group_start].valid_group, - .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, }; + new_tbl8_entry.next_hop = + lpm->rules_tbl[sub_rule_index].next_hop; /* * Loop through the range of entries on tbl8 for which the * rule_to_delete must be modified. @@ -1671,7 +1641,7 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }, + .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, .valid = VALID, .valid_group = 0, .depth = lpm->tbl8[tbl8_recycle_index].depth,