X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=2687564194c373fe06c3576338693abfd46d3662;hb=9160d6386472b70503e504df765440ecdb604d7d;hp=e1f1fad5f30fb302b8491e65fcd36f0ebdefb679;hpb=4c00cfdc0ea225f2518a35db928ad1ab02b2a724;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index e1f1fad5f3..2687564194 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -50,6 +21,7 @@ #include #include #include +#include #include "rte_lpm.h" @@ -99,7 +71,7 @@ depth_to_mask(uint8_t depth) /* * Converts given depth value to its corresponding range value. */ -static inline uint32_t __attribute__((pure)) +static uint32_t __attribute__((pure)) depth_to_range(uint8_t depth) { VERIFY_DEPTH(depth); @@ -117,34 +89,8 @@ depth_to_range(uint8_t depth) /* * Find an existing lpm table and return a pointer to it. */ -struct rte_lpm_v20 * -rte_lpm_find_existing_v20(const char *name) -{ - struct rte_lpm_v20 *l = NULL; - struct rte_tailq_entry *te; - struct rte_lpm_list *lpm_list; - - lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); - TAILQ_FOREACH(te, lpm_list, next) { - l = (struct rte_lpm_v20 *) te->data; - if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) - break; - } - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); - - if (te == NULL) { - rte_errno = ENOENT; - return NULL; - } - - return l; -} -VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0); - struct rte_lpm * -rte_lpm_find_existing_v1604(const char *name) +rte_lpm_find_existing(const char *name) { struct rte_lpm *l = NULL; struct rte_tailq_entry *te; @@ -152,13 +98,13 @@ rte_lpm_find_existing_v1604(const char *name) lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_lock(); TAILQ_FOREACH(te, lpm_list, next) { - l = (struct rte_lpm *) te->data; + l = te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_unlock(); if (te == NULL) { rte_errno = ENOENT; @@ -167,87 +113,12 @@ rte_lpm_find_existing_v1604(const char *name) return l; } -BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04); -MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name), - rte_lpm_find_existing_v1604); /* * Allocates memory for LPM object */ -struct rte_lpm_v20 * -rte_lpm_create_v20(const char *name, int socket_id, int max_rules, - __rte_unused int flags) -{ - char mem_name[RTE_LPM_NAMESIZE]; - struct rte_lpm_v20 *lpm = NULL; - struct rte_tailq_entry *te; - uint32_t mem_size; - struct rte_lpm_list *lpm_list; - - lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - - RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2); - - /* Check user arguments. */ - if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) { - rte_errno = EINVAL; - return NULL; - } - - snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); - - /* Determine the amount of memory to allocate. */ - mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); - - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); - - /* guarantee there's no existing */ - TAILQ_FOREACH(te, lpm_list, next) { - lpm = (struct rte_lpm_v20 *) te->data; - if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) - break; - } - lpm = NULL; - if (te != NULL) { - rte_errno = EEXIST; - goto exit; - } - - /* allocate tailq entry */ - te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); - if (te == NULL) { - RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); - rte_errno = ENOMEM; - goto exit; - } - - /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size, - RTE_CACHE_LINE_SIZE, socket_id); - if (lpm == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); - rte_free(te); - rte_errno = ENOMEM; - goto exit; - } - - /* Save user arguments. */ - lpm->max_rules = max_rules; - snprintf(lpm->name, sizeof(lpm->name), "%s", name); - - te->data = (void *) lpm; - - TAILQ_INSERT_TAIL(lpm_list, te, next); - -exit: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - - return lpm; -} -VERSION_SYMBOL(rte_lpm_create, _v20, 2.0); - struct rte_lpm * -rte_lpm_create_v1604(const char *name, int socket_id, +rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config) { char mem_name[RTE_LPM_NAMESIZE]; @@ -275,16 +146,17 @@ rte_lpm_create_v1604(const char *name, int socket_id, tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* guarantee there's no existing */ TAILQ_FOREACH(te, lpm_list, next) { - lpm = (struct rte_lpm *) te->data; + lpm = te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - lpm = NULL; + if (te != NULL) { + lpm = NULL; rte_errno = EEXIST; goto exit; } @@ -298,7 +170,7 @@ rte_lpm_create_v1604(const char *name, int socket_id, } /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, + lpm = rte_zmalloc_socket(mem_name, mem_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); @@ -307,7 +179,7 @@ rte_lpm_create_v1604(const char *name, int socket_id, goto exit; } - lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL, + lpm->rules_tbl = rte_zmalloc_socket(NULL, (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->rules_tbl == NULL) { @@ -319,7 +191,7 @@ rte_lpm_create_v1604(const char *name, int socket_id, goto exit; } - lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL, + lpm->tbl8 = rte_zmalloc_socket(NULL, (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->tbl8 == NULL) { @@ -335,56 +207,23 @@ rte_lpm_create_v1604(const char *name, int socket_id, /* Save user arguments. */ lpm->max_rules = config->max_rules; lpm->number_tbl8s = config->number_tbl8s; - snprintf(lpm->name, sizeof(lpm->name), "%s", name); + strlcpy(lpm->name, name, sizeof(lpm->name)); - te->data = (void *) lpm; + te->data = lpm; TAILQ_INSERT_TAIL(lpm_list, te, next); exit: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return lpm; } -BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04); -MAP_STATIC_SYMBOL( - struct rte_lpm *rte_lpm_create(const char *name, int socket_id, - const struct rte_lpm_config *config), rte_lpm_create_v1604); /* * Deallocates memory for given LPM table. */ void -rte_lpm_free_v20(struct rte_lpm_v20 *lpm) -{ - struct rte_lpm_list *lpm_list; - struct rte_tailq_entry *te; - - /* Check user arguments. */ - if (lpm == NULL) - return; - - lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); - - /* find our tailq entry */ - TAILQ_FOREACH(te, lpm_list, next) { - if (te->data == (void *) lpm) - break; - } - if (te != NULL) - TAILQ_REMOVE(lpm_list, te, next); - - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - - rte_free(lpm); - rte_free(te); -} -VERSION_SYMBOL(rte_lpm_free, _v20, 2.0); - -void -rte_lpm_free_v1604(struct rte_lpm *lpm) +rte_lpm_free(struct rte_lpm *lpm) { struct rte_lpm_list *lpm_list; struct rte_tailq_entry *te; @@ -395,7 +234,7 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* find our tailq entry */ TAILQ_FOREACH(te, lpm_list, next) { @@ -405,16 +244,13 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) if (te != NULL) TAILQ_REMOVE(lpm_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); rte_free(lpm->tbl8); rte_free(lpm->rules_tbl); rte_free(lpm); rte_free(te); } -BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04); -MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm), - rte_lpm_free_v1604); /* * Adds a rule to the rule table. @@ -426,80 +262,8 @@ MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm), * are stored in the rule table from 0 - 31. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ -static inline int32_t -rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, - uint8_t next_hop) -{ - uint32_t rule_gindex, rule_index, last_rule; - int i; - - VERIFY_DEPTH(depth); - - /* Scan through rule group to see if rule already exists. */ - if (lpm->rule_info[depth - 1].used_rules > 0) { - - /* rule_gindex stands for rule group index. */ - rule_gindex = lpm->rule_info[depth - 1].first_rule; - /* Initialise rule_index to point to start of rule group. */ - rule_index = rule_gindex; - /* Last rule = Last used rule in this rule group. */ - last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; - - for (; rule_index < last_rule; rule_index++) { - - /* If rule already exists update its next_hop and return. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) { - lpm->rules_tbl[rule_index].next_hop = next_hop; - - return rule_index; - } - } - - if (rule_index == lpm->max_rules) - return -ENOSPC; - } else { - /* Calculate the position in which the rule will be stored. */ - rule_index = 0; - - for (i = depth - 1; i > 0; i--) { - if (lpm->rule_info[i - 1].used_rules > 0) { - rule_index = lpm->rule_info[i - 1].first_rule - + lpm->rule_info[i - 1].used_rules; - break; - } - } - if (rule_index == lpm->max_rules) - return -ENOSPC; - - lpm->rule_info[depth - 1].first_rule = rule_index; - } - - /* Make room for the new rule in the array. */ - for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) { - if (lpm->rule_info[i - 1].first_rule - + lpm->rule_info[i - 1].used_rules == lpm->max_rules) - return -ENOSPC; - - if (lpm->rule_info[i - 1].used_rules > 0) { - lpm->rules_tbl[lpm->rule_info[i - 1].first_rule - + lpm->rule_info[i - 1].used_rules] - = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule]; - lpm->rule_info[i - 1].first_rule++; - } - } - - /* Add the new rule. */ - lpm->rules_tbl[rule_index].ip = ip_masked; - lpm->rules_tbl[rule_index].next_hop = next_hop; - - /* Increment the used rules counter for this rule group. */ - lpm->rule_info[depth - 1].used_rules++; - - return rule_index; -} - -static inline int32_t -rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, +static int32_t +rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint32_t next_hop) { uint32_t rule_gindex, rule_index, last_rule; @@ -574,31 +338,8 @@ rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Delete a rule from the rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ -static inline void -rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth) -{ - int i; - - VERIFY_DEPTH(depth); - - lpm->rules_tbl[rule_index] = - lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule - + lpm->rule_info[depth - 1].used_rules - 1]; - - for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) { - if (lpm->rule_info[i].used_rules > 0) { - lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] = - lpm->rules_tbl[lpm->rule_info[i].first_rule - + lpm->rule_info[i].used_rules - 1]; - lpm->rule_info[i].first_rule--; - } - } - - lpm->rule_info[depth - 1].used_rules--; -} - -static inline void -rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) +static void +rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) { int i; @@ -624,29 +365,8 @@ rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) * Finds a rule in rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ -static inline int32_t -rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth) -{ - uint32_t rule_gindex, last_rule, rule_index; - - VERIFY_DEPTH(depth); - - rule_gindex = lpm->rule_info[depth - 1].first_rule; - last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; - - /* Scan used rules at given depth to find rule. */ - for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { - /* If rule is found return the rule index. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) - return rule_index; - } - - /* If rule is not found return -EINVAL. */ - return -EINVAL; -} - -static inline int32_t -rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) +static int32_t +rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) { uint32_t rule_gindex, last_rule, rule_index; @@ -669,35 +389,8 @@ rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) /* * Find, clean and allocate a tbl8. */ -static inline int32_t -tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) -{ - uint32_t group_idx; /* tbl8 group index. */ - struct rte_lpm_tbl_entry_v20 *tbl8_entry; - - /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ - for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS; - group_idx++) { - tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; - /* If a free tbl8 group is found clean it and set as VALID. */ - if (!tbl8_entry->valid_group) { - memset(&tbl8_entry[0], 0, - RTE_LPM_TBL8_GROUP_NUM_ENTRIES * - sizeof(tbl8_entry[0])); - - tbl8_entry->valid_group = VALID; - - /* Return group index for allocated tbl8 group. */ - return group_idx; - } - } - - /* If there are no tbl8 groups free then return error. */ - return -ENOSPC; -} - -static inline int32_t -tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) +static int32_t +tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) { uint32_t group_idx; /* tbl8 group index. */ struct rte_lpm_tbl_entry *tbl8_entry; @@ -707,11 +400,19 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { + struct rte_lpm_tbl_entry new_tbl8_entry = { + .next_hop = 0, + .valid = INVALID, + .depth = 0, + .valid_group = VALID, + }; + memset(&tbl8_entry[0], 0, RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0])); - tbl8_entry->valid_group = VALID; + __atomic_store(tbl8_entry, &new_tbl8_entry, + __ATOMIC_RELAXED); /* Return group index for allocated tbl8 group. */ return group_idx; @@ -722,24 +423,21 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) return -ENOSPC; } -static inline void -tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start) +static void +tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ - tbl8[tbl8_group_start].valid_group = INVALID; -} + struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; -static inline void -tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) -{ - /* Set tbl8 group invalid*/ - tbl8[tbl8_group_start].valid_group = INVALID; + __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); } -static inline int32_t -add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, - uint8_t next_hop) +static __rte_noinline int32_t +add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, + uint32_t next_hop) { +#define group_idx next_hop uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j; /* Calculate the index into Table24. */ @@ -754,17 +452,18 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth)) { - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { + struct rte_lpm_tbl_entry new_tbl24_entry = { + .next_hop = next_hop, .valid = VALID, .valid_group = 0, .depth = depth, }; - new_tbl24_entry.next_hop = next_hop; /* Setting tbl24 entry in one go to avoid race * conditions */ - lpm->tbl24[i] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, + __ATOMIC_RELEASE); continue; } @@ -781,112 +480,46 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, for (j = tbl8_index; j < tbl8_group_end; j++) { if (!lpm->tbl8[j].valid || lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl_entry_v20 + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, .valid_group = VALID, .depth = depth, + .next_hop = next_hop, }; - new_tbl8_entry.next_hop = next_hop; /* * Setting tbl8 entry in one go to avoid * race conditions */ - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } } } } - +#undef group_idx return 0; } -static inline int32_t -add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +static __rte_noinline int32_t +add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint32_t next_hop) { #define group_idx next_hop - uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j; + uint32_t tbl24_index; + int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index, + tbl8_range, i; - /* Calculate the index into Table24. */ - tbl24_index = ip >> 8; - tbl24_range = depth_to_range(depth); - - for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - /* - * For invalid OR valid and non-extended tbl 24 entries set - * entry. - */ - if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && - lpm->tbl24[i].depth <= depth)) { - - struct rte_lpm_tbl_entry new_tbl24_entry = { - .next_hop = next_hop, - .valid = VALID, - .valid_group = 0, - .depth = depth, - }; - - /* Setting tbl24 entry in one go to avoid race - * conditions - */ - lpm->tbl24[i] = new_tbl24_entry; - - continue; - } - - if (lpm->tbl24[i].valid_group == 1) { - /* If tbl24 entry is valid and extended calculate the - * index into tbl8. - */ - tbl8_index = lpm->tbl24[i].group_idx * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_index + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < tbl8_group_end; j++) { - if (!lpm->tbl8[j].valid || - lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl_entry - new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = depth, - .next_hop = next_hop, - }; - - /* - * Setting tbl8 entry in one go to avoid - * race conditions - */ - lpm->tbl8[j] = new_tbl8_entry; - - continue; - } - } - } - } -#undef group_idx - return 0; -} - -static inline int32_t -add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, - uint8_t next_hop) -{ - uint32_t tbl24_index; - int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index, - tbl8_range, i; - - tbl24_index = (ip_masked >> 8); - tbl8_range = depth_to_range(depth); + tbl24_index = (ip_masked >> 8); + tbl8_range = depth_to_range(depth); if (!lpm->tbl24[tbl24_index].valid) { /* Search for a free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v20(lpm->tbl8); + tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); /* Check tbl8 allocation was successful. */ if (tbl8_group_index < 0) { @@ -900,135 +533,14 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, /* Set tbl8 entry. */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - lpm->tbl8[i].valid = VALID; - } - - /* - * Update tbl24 entry to point to new tbl8 entry. Note: The - * ext_flag and tbl8_index need to be updated simultaneously, - * so assign whole structure in one go - */ - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .group_idx = (uint8_t)tbl8_group_index, }, - .valid = VALID, - .valid_group = 1, - .depth = 0, - }; - - lpm->tbl24[tbl24_index] = new_tbl24_entry; - - } /* If valid entry but not extended calculate the index into Table8. */ - else if (lpm->tbl24[tbl24_index].valid_group == 0) { - /* Search for free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v20(lpm->tbl8); - - if (tbl8_group_index < 0) { - return tbl8_group_index; - } - - tbl8_group_start = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_group_start + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - /* Populate new tbl8 with tbl24 value. */ - for (i = tbl8_group_start; i < tbl8_group_end; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth; - lpm->tbl8[i].next_hop = - lpm->tbl24[tbl24_index].next_hop; - } - - tbl8_index = tbl8_group_start + (ip_masked & 0xFF); - - /* Insert new rule into the tbl8 entry. */ - for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - } - - /* - * Update tbl24 entry to point to new tbl8 entry. Note: The - * ext_flag and tbl8_index need to be updated simultaneously, - * so assign whole structure in one go. - */ - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .group_idx = (uint8_t)tbl8_group_index, }, + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, - .valid_group = 1, - .depth = 0, - }; - - lpm->tbl24[tbl24_index] = new_tbl24_entry; - - } else { /* - * If it is valid, extended entry calculate the index into tbl8. - */ - tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; - tbl8_group_start = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_index = tbl8_group_start + (ip_masked & 0xFF); - - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = depth, - .valid_group = lpm->tbl8[i].valid_group, - }; - new_tbl8_entry.next_hop = next_hop; - /* - * Setting tbl8 entry in one go to avoid race - * condition - */ - lpm->tbl8[i] = new_tbl8_entry; - - continue; - } - } - } - - return 0; -} - -static inline int32_t -add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, - uint32_t next_hop) -{ -#define group_idx next_hop - uint32_t tbl24_index; - int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index, - tbl8_range, i; - - tbl24_index = (ip_masked >> 8); - tbl8_range = depth_to_range(depth); - - if (!lpm->tbl24[tbl24_index].valid) { - /* Search for a free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s); - - /* Check tbl8 allocation was successful. */ - if (tbl8_group_index < 0) { - return tbl8_group_index; - } - - /* Find index into tbl8 and range. */ - tbl8_index = (tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + - (ip_masked & 0xFF); - - /* Set tbl8 entry. */ - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - lpm->tbl8[i].valid = VALID; + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + .next_hop = next_hop, + }; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } /* @@ -1044,12 +556,16 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, .depth = 0, }; - lpm->tbl24[tbl24_index] = new_tbl24_entry; + /* The tbl24 entry must be written only after the + * tbl8 entries are written. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELEASE); } /* If valid entry but not extended calculate the index into Table8. */ else if (lpm->tbl24[tbl24_index].valid_group == 0) { /* Search for free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s); + tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); if (tbl8_group_index < 0) { return tbl8_group_index; @@ -1062,19 +578,28 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* Populate new tbl8 with tbl24 value. */ for (i = tbl8_group_start; i < tbl8_group_end; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth; - lpm->tbl8[i].next_hop = - lpm->tbl24[tbl24_index].next_hop; + struct rte_lpm_tbl_entry new_tbl8_entry = { + .valid = VALID, + .depth = lpm->tbl24[tbl24_index].depth, + .valid_group = lpm->tbl8[i].valid_group, + .next_hop = lpm->tbl24[tbl24_index].next_hop, + }; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } tbl8_index = tbl8_group_start + (ip_masked & 0xFF); /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; + struct rte_lpm_tbl_entry new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + .next_hop = next_hop, + }; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } /* @@ -1090,7 +615,11 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, .depth = 0, }; - lpm->tbl24[tbl24_index] = new_tbl24_entry; + /* The tbl24 entry must be written only after the + * tbl8 entries are written. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELEASE); } else { /* * If it is valid, extended entry calculate the index into tbl8. @@ -1115,7 +644,8 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Setting tbl8 entry in one go to avoid race * condition */ - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -1129,48 +659,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Add a route */ int -rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, - uint8_t next_hop) -{ - int32_t rule_index, status = 0; - uint32_t ip_masked; - - /* Check user arguments. */ - if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) - return -EINVAL; - - ip_masked = ip & depth_to_mask(depth); - - /* Add the rule to the rule table. */ - rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop); - - /* If the is no space available for new rule return error. */ - if (rule_index < 0) { - return rule_index; - } - - if (depth <= MAX_DEPTH_TBL24) { - status = add_depth_small_v20(lpm, ip_masked, depth, next_hop); - } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ - status = add_depth_big_v20(lpm, ip_masked, depth, next_hop); - - /* - * If add fails due to exhaustion of tbl8 extensions delete - * rule that was added to rule table. - */ - if (status < 0) { - rule_delete_v20(lpm, rule_index, depth); - - return status; - } - } - - return 0; -} -VERSION_SYMBOL(rte_lpm_add, _v20, 2.0); - -int -rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop) { int32_t rule_index, status = 0; @@ -1183,7 +672,7 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, ip_masked = ip & depth_to_mask(depth); /* Add the rule to the rule table. */ - rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop); + rule_index = rule_add(lpm, ip_masked, depth, next_hop); /* If the is no space available for new rule return error. */ if (rule_index < 0) { @@ -1191,16 +680,16 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } if (depth <= MAX_DEPTH_TBL24) { - status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop); + status = add_depth_small(lpm, ip_masked, depth, next_hop); } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ - status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop); + status = add_depth_big(lpm, ip_masked, depth, next_hop); /* * If add fails due to exhaustion of tbl8 extensions delete * rule that was added to rule table. */ if (status < 0) { - rule_delete_v1604(lpm, rule_index, depth); + rule_delete(lpm, rule_index, depth); return status; } @@ -1208,42 +697,12 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, return 0; } -BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04); -MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, - uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604); /* * Look for a rule in the high-level rules table */ int -rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, -uint8_t *next_hop) -{ - uint32_t ip_masked; - int32_t rule_index; - - /* Check user arguments. */ - if ((lpm == NULL) || - (next_hop == NULL) || - (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) - return -EINVAL; - - /* Look for the rule using rule_find. */ - ip_masked = ip & depth_to_mask(depth); - rule_index = rule_find_v20(lpm, ip_masked, depth); - - if (rule_index >= 0) { - *next_hop = lpm->rules_tbl[rule_index].next_hop; - return 1; - } - - /* If rule is not found return 0. */ - return 0; -} -VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0); - -int -rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop) { uint32_t ip_masked; @@ -1257,7 +716,7 @@ uint32_t *next_hop) /* Look for the rule using rule_find. */ ip_masked = ip & depth_to_mask(depth); - rule_index = rule_find_v1604(lpm, ip_masked, depth); + rule_index = rule_find(lpm, ip_masked, depth); if (rule_index >= 0) { *next_hop = lpm->rules_tbl[rule_index].next_hop; @@ -1267,12 +726,9 @@ uint32_t *next_hop) /* If rule is not found return 0. */ return 0; } -BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04); -MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, - uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604); -static inline int32_t -find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, +static int32_t +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { int32_t rule_index; @@ -1282,7 +738,7 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { ip_masked = ip & depth_to_mask(prev_depth); - rule_index = rule_find_v20(lpm, ip_masked, prev_depth); + rule_index = rule_find(lpm, ip_masked, prev_depth); if (rule_index >= 0) { *sub_rule_depth = prev_depth; @@ -1293,123 +749,8 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, return -1; } -static inline int32_t -find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, - uint8_t *sub_rule_depth) -{ - int32_t rule_index; - uint32_t ip_masked; - uint8_t prev_depth; - - for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { - ip_masked = ip & depth_to_mask(prev_depth); - - rule_index = rule_find_v1604(lpm, ip_masked, prev_depth); - - if (rule_index >= 0) { - *sub_rule_depth = prev_depth; - return rule_index; - } - } - - return -1; -} - -static inline int32_t -delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) -{ - uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; - - /* Calculate the range and index into Table24. */ - tbl24_range = depth_to_range(depth); - tbl24_index = (ip_masked >> 8); - - /* - * Firstly check the sub_rule_index. A -1 indicates no replacement rule - * and a positive number indicates a sub_rule_index. - */ - if (sub_rule_index < 0) { - /* - * If no replacement rule exists then invalidate entries - * associated with this rule. - */ - for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - - if (lpm->tbl24[i].valid_group == 0 && - lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i].valid = INVALID; - } else if (lpm->tbl24[i].valid_group == 1) { - /* - * If TBL24 entry is extended, then there has - * to be a rule with depth >= 25 in the - * associated TBL8 group. - */ - - tbl8_group_index = lpm->tbl24[i].group_idx; - tbl8_index = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < (tbl8_index + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { - - if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j].valid = INVALID; - } - } - } - } else { - /* - * If a replacement rule exists then modify entries - * associated with this rule. - */ - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, - .valid = VALID, - .valid_group = 0, - .depth = sub_rule_depth, - }; - - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = sub_rule_depth, - }; - new_tbl8_entry.next_hop = - lpm->rules_tbl[sub_rule_index].next_hop; - - for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - - if (lpm->tbl24[i].valid_group == 0 && - lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i] = new_tbl24_entry; - } else if (lpm->tbl24[i].valid_group == 1) { - /* - * If TBL24 entry is extended, then there has - * to be a rule with depth >= 25 in the - * associated TBL8 group. - */ - - tbl8_group_index = lpm->tbl24[i].group_idx; - tbl8_index = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < (tbl8_index + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { - - if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j] = new_tbl8_entry; - } - } - } - } - - return 0; -} - -static inline int32_t -delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, +static int32_t +delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { #define group_idx next_hop @@ -1418,6 +759,7 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, /* Calculate the range and index into Table24. */ tbl24_range = depth_to_range(depth); tbl24_index = (ip_masked >> 8); + struct rte_lpm_tbl_entry zero_tbl24_entry = {0}; /* * Firstly check the sub_rule_index. A -1 indicates no replacement rule @@ -1432,7 +774,8 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i].valid = INVALID; + __atomic_store(&lpm->tbl24[i], + &zero_tbl24_entry, __ATOMIC_RELEASE); } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -1477,7 +820,8 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, + __ATOMIC_RELEASE); } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -1493,7 +837,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); } } } @@ -1510,55 +856,8 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * Return of value > -1 means tbl8 is in use but has all the same values and * thus can be recycled */ -static inline int32_t -tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, - uint32_t tbl8_group_start) -{ - uint32_t tbl8_group_end, i; - tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - /* - * Check the first entry of the given tbl8. If it is invalid we know - * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH - * (As they would affect all entries in a tbl8) and thus this table - * can not be recycled. - */ - if (tbl8[tbl8_group_start].valid) { - /* - * If first entry is valid check if the depth is less than 24 - * and if so check the rest of the entries to verify that they - * are all of this depth. - */ - if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { - for (i = (tbl8_group_start + 1); i < tbl8_group_end; - i++) { - - if (tbl8[i].depth != - tbl8[tbl8_group_start].depth) { - - return -EEXIST; - } - } - /* If all entries are the same return the tb8 index */ - return tbl8_group_start; - } - - return -EEXIST; - } - /* - * If the first entry is invalid check if the rest of the entries in - * the tbl8 are invalid. - */ - for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { - if (tbl8[i].valid) - return -EEXIST; - } - /* If no valid entries are found then return -EINVAL. */ - return -EINVAL; -} - -static inline int32_t -tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, +static int32_t +tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { uint32_t tbl8_group_end, i; @@ -1604,86 +903,8 @@ tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, return -EINVAL; } -static inline int32_t -delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) -{ - uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, - tbl8_range, i; - int32_t tbl8_recycle_index; - - /* - * Calculate the index into tbl24 and range. Note: All depths larger - * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry. - */ - tbl24_index = ip_masked >> 8; - - /* Calculate the index into tbl8 and range. */ - tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; - tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_index = tbl8_group_start + (ip_masked & 0xFF); - tbl8_range = depth_to_range(depth); - - if (sub_rule_index < 0) { - /* - * Loop through the range of entries on tbl8 for which the - * rule_to_delete must be removed or modified. - */ - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i].valid = INVALID; - } - } else { - /* Set new tbl8 entry. */ - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = sub_rule_depth, - .valid_group = lpm->tbl8[tbl8_group_start].valid_group, - }; - - new_tbl8_entry.next_hop = - lpm->rules_tbl[sub_rule_index].next_hop; - /* - * Loop through the range of entries on tbl8 for which the - * rule_to_delete must be modified. - */ - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i] = new_tbl8_entry; - } - } - - /* - * Check if there are any valid entries in this tbl8 group. If all - * tbl8 entries are invalid we can free the tbl8 and invalidate the - * associated tbl24 entry. - */ - - tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start); - - if (tbl8_recycle_index == -EINVAL) { - /* Set tbl24 before freeing tbl8 to avoid race condition. */ - lpm->tbl24[tbl24_index].valid = 0; - tbl8_free_v20(lpm->tbl8, tbl8_group_start); - } else if (tbl8_recycle_index > -1) { - /* Update tbl24 entry. */ - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }, - .valid = VALID, - .valid_group = 0, - .depth = lpm->tbl8[tbl8_recycle_index].depth, - }; - - /* Set tbl24 before freeing tbl8 to avoid race condition. */ - lpm->tbl24[tbl24_index] = new_tbl24_entry; - tbl8_free_v20(lpm->tbl8, tbl8_group_start); - } - - return 0; -} - -static inline int32_t -delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, +static int32_t +delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { #define group_idx next_hop @@ -1727,7 +948,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } } @@ -1737,12 +959,15 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * associated tbl24 entry. */ - tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start); + tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start); if (tbl8_recycle_index == -EINVAL) { - /* Set tbl24 before freeing tbl8 to avoid race condition. */ + /* Set tbl24 before freeing tbl8 to avoid race condition. + * Prevent the free of the tbl8 group from hoisting. + */ lpm->tbl24[tbl24_index].valid = 0; - tbl8_free_v1604(lpm->tbl8, tbl8_group_start); + __atomic_thread_fence(__ATOMIC_RELEASE); + tbl8_free(lpm->tbl8, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1752,9 +977,13 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, .depth = lpm->tbl8[tbl8_recycle_index].depth, }; - /* Set tbl24 before freeing tbl8 to avoid race condition. */ - lpm->tbl24[tbl24_index] = new_tbl24_entry; - tbl8_free_v1604(lpm->tbl8, tbl8_group_start); + /* Set tbl24 before freeing tbl8 to avoid race condition. + * Prevent the free of the tbl8 group from hoisting. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELAXED); + __atomic_thread_fence(__ATOMIC_RELEASE); + tbl8_free(lpm->tbl8, tbl8_group_start); } #undef group_idx return 0; @@ -1764,7 +993,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * Deletes a rule */ int -rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) +rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; @@ -1783,7 +1012,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) * Find the index of the input rule, that needs to be deleted, in the * rule table. */ - rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth); + rule_to_delete_index = rule_find(lpm, ip_masked, depth); /* * Check if rule_to_delete_index was found. If no rule was found the @@ -1793,7 +1022,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) return -EINVAL; /* Delete the rule from the rule table. */ - rule_delete_v20(lpm, rule_to_delete_index, depth); + rule_delete(lpm, rule_to_delete_index, depth); /* * Find rule to replace the rule_to_delete. If there is no rule to @@ -1801,100 +1030,26 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) * entries associated with this rule. */ sub_rule_depth = 0; - sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth); + sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth); /* * If the input depth value is less than 25 use function * delete_depth_small otherwise use delete_depth_big. */ if (depth <= MAX_DEPTH_TBL24) { - return delete_depth_small_v20(lpm, ip_masked, depth, + return delete_depth_small(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth); } else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index, + return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth); } } -VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0); - -int -rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) -{ - int32_t rule_to_delete_index, sub_rule_index; - uint32_t ip_masked; - uint8_t sub_rule_depth; - /* - * Check input arguments. Note: IP must be a positive integer of 32 - * bits in length therefore it need not be checked. - */ - if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) { - return -EINVAL; - } - - ip_masked = ip & depth_to_mask(depth); - - /* - * Find the index of the input rule, that needs to be deleted, in the - * rule table. - */ - rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth); - - /* - * Check if rule_to_delete_index was found. If no rule was found the - * function rule_find returns -EINVAL. - */ - if (rule_to_delete_index < 0) - return -EINVAL; - - /* Delete the rule from the rule table. */ - rule_delete_v1604(lpm, rule_to_delete_index, depth); - - /* - * Find rule to replace the rule_to_delete. If there is no rule to - * replace the rule_to_delete we return -1 and invalidate the table - * entries associated with this rule. - */ - sub_rule_depth = 0; - sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth); - - /* - * If the input depth value is less than 25 use function - * delete_depth_small otherwise use delete_depth_big. - */ - if (depth <= MAX_DEPTH_TBL24) { - return delete_depth_small_v1604(lpm, ip_masked, depth, - sub_rule_index, sub_rule_depth); - } else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index, - sub_rule_depth); - } -} -BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04); -MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, - uint8_t depth), rte_lpm_delete_v1604); /* * Delete all rules from the LPM table. */ void -rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm) -{ - /* Zero rule information. */ - memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); - - /* Zero tbl24. */ - memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); - - /* Zero tbl8. */ - memset(lpm->tbl8, 0, sizeof(lpm->tbl8)); - - /* Delete all rules form the rules table. */ - memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); -} -VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0); - -void -rte_lpm_delete_all_v1604(struct rte_lpm *lpm) +rte_lpm_delete_all(struct rte_lpm *lpm) { /* Zero rule information. */ memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); @@ -1909,6 +1064,3 @@ rte_lpm_delete_all_v1604(struct rte_lpm *lpm) /* Delete all rules form the rules table. */ memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); } -BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04); -MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm), - rte_lpm_delete_all_v1604);