X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=106916dc82cb25b94758b6f1b3065452d765aae2;hb=ff962da373ae3f28c677210dce0c46963717881f;hp=ec67765cbe50651677a0121bfae9ff7e8c729021;hpb=f05b0fbe7d73b105ec9c848d4ecb6bb965391b8c;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index ec67765cbe..106916dc82 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -36,7 +7,6 @@ #include #include #include -#include #include #include @@ -44,7 +14,6 @@ #include #include /* for definition of RTE_CACHE_LINE_SIZE */ #include -#include #include #include #include @@ -52,6 +21,8 @@ #include #include #include +#include +#include #include "rte_lpm.h" @@ -101,7 +72,7 @@ depth_to_mask(uint8_t depth) /* * Converts given depth value to its corresponding range value. */ -static inline uint32_t __attribute__((pure)) +static uint32_t __attribute__((pure)) depth_to_range(uint8_t depth) { VERIFY_DEPTH(depth); @@ -119,7 +90,7 @@ depth_to_range(uint8_t depth) /* * Find an existing lpm table and return a pointer to it. */ -struct rte_lpm_v20 * +struct rte_lpm_v20 * __vsym rte_lpm_find_existing_v20(const char *name) { struct rte_lpm_v20 *l = NULL; @@ -128,13 +99,13 @@ rte_lpm_find_existing_v20(const char *name) lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_lock(); TAILQ_FOREACH(te, lpm_list, next) { - l = (struct rte_lpm_v20 *) te->data; + l = te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_unlock(); if (te == NULL) { rte_errno = ENOENT; @@ -145,7 +116,7 @@ rte_lpm_find_existing_v20(const char *name) } VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0); -struct rte_lpm * +struct rte_lpm * __vsym rte_lpm_find_existing_v1604(const char *name) { struct rte_lpm *l = NULL; @@ -154,13 +125,13 @@ rte_lpm_find_existing_v1604(const char *name) lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_lock(); TAILQ_FOREACH(te, lpm_list, next) { - l = (struct rte_lpm *) te->data; + l = te->data; if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) break; } - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_unlock(); if (te == NULL) { rte_errno = ENOENT; @@ -176,7 +147,7 @@ MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name), /* * Allocates memory for LPM object */ -struct rte_lpm_v20 * +struct rte_lpm_v20 * __vsym rte_lpm_create_v20(const char *name, int socket_id, int max_rules, __rte_unused int flags) { @@ -201,16 +172,17 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules, /* Determine the amount of memory to allocate. */ mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* guarantee there's no existing */ TAILQ_FOREACH(te, lpm_list, next) { - lpm = (struct rte_lpm_v20 *) te->data; + lpm = te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - lpm = NULL; + if (te != NULL) { + lpm = NULL; rte_errno = EEXIST; goto exit; } @@ -219,34 +191,36 @@ rte_lpm_create_v20(const char *name, int socket_id, int max_rules, te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; goto exit; } /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size, + lpm = rte_zmalloc_socket(mem_name, mem_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); + rte_errno = ENOMEM; goto exit; } /* Save user arguments. */ lpm->max_rules = max_rules; - snprintf(lpm->name, sizeof(lpm->name), "%s", name); + strlcpy(lpm->name, name, sizeof(lpm->name)); - te->data = (void *) lpm; + te->data = lpm; TAILQ_INSERT_TAIL(lpm_list, te, next); exit: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return lpm; } VERSION_SYMBOL(rte_lpm_create, _v20, 2.0); -struct rte_lpm * +struct rte_lpm * __vsym rte_lpm_create_v1604(const char *name, int socket_id, const struct rte_lpm_config *config) { @@ -275,16 +249,17 @@ rte_lpm_create_v1604(const char *name, int socket_id, tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* guarantee there's no existing */ TAILQ_FOREACH(te, lpm_list, next) { - lpm = (struct rte_lpm *) te->data; + lpm = te->data; if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) break; } - lpm = NULL; + if (te != NULL) { + lpm = NULL; rte_errno = EEXIST; goto exit; } @@ -293,19 +268,21 @@ rte_lpm_create_v1604(const char *name, int socket_id, te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); + rte_errno = ENOMEM; goto exit; } /* Allocate memory to store the LPM data structures. */ - lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size, + lpm = rte_zmalloc_socket(mem_name, mem_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); + rte_errno = ENOMEM; goto exit; } - lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL, + lpm->rules_tbl = rte_zmalloc_socket(NULL, (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->rules_tbl == NULL) { @@ -313,31 +290,34 @@ rte_lpm_create_v1604(const char *name, int socket_id, rte_free(lpm); lpm = NULL; rte_free(te); + rte_errno = ENOMEM; goto exit; } - lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL, + lpm->tbl8 = rte_zmalloc_socket(NULL, (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->tbl8 == NULL) { RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n"); + rte_free(lpm->rules_tbl); rte_free(lpm); lpm = NULL; rte_free(te); + rte_errno = ENOMEM; goto exit; } /* Save user arguments. */ lpm->max_rules = config->max_rules; lpm->number_tbl8s = config->number_tbl8s; - snprintf(lpm->name, sizeof(lpm->name), "%s", name); + strlcpy(lpm->name, name, sizeof(lpm->name)); - te->data = (void *) lpm; + te->data = lpm; TAILQ_INSERT_TAIL(lpm_list, te, next); exit: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return lpm; } @@ -349,7 +329,7 @@ MAP_STATIC_SYMBOL( /* * Deallocates memory for given LPM table. */ -void +void __vsym rte_lpm_free_v20(struct rte_lpm_v20 *lpm) { struct rte_lpm_list *lpm_list; @@ -361,7 +341,7 @@ rte_lpm_free_v20(struct rte_lpm_v20 *lpm) lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* find our tailq entry */ TAILQ_FOREACH(te, lpm_list, next) { @@ -371,14 +351,14 @@ rte_lpm_free_v20(struct rte_lpm_v20 *lpm) if (te != NULL) TAILQ_REMOVE(lpm_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); rte_free(lpm); rte_free(te); } VERSION_SYMBOL(rte_lpm_free, _v20, 2.0); -void +void __vsym rte_lpm_free_v1604(struct rte_lpm *lpm) { struct rte_lpm_list *lpm_list; @@ -390,7 +370,7 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* find our tailq entry */ TAILQ_FOREACH(te, lpm_list, next) { @@ -400,8 +380,9 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) if (te != NULL) TAILQ_REMOVE(lpm_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); + rte_free(lpm->tbl8); rte_free(lpm->rules_tbl); rte_free(lpm); rte_free(te); @@ -420,7 +401,7 @@ MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm), * are stored in the rule table from 0 - 31. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ -static inline int32_t +static int32_t rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, uint8_t next_hop) { @@ -492,7 +473,7 @@ rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, return rule_index; } -static inline int32_t +static int32_t rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint32_t next_hop) { @@ -568,7 +549,7 @@ rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Delete a rule from the rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ -static inline void +static void rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth) { int i; @@ -591,7 +572,7 @@ rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth) lpm->rule_info[depth - 1].used_rules--; } -static inline void +static void rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) { int i; @@ -618,7 +599,7 @@ rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) * Finds a rule in rule table. * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ -static inline int32_t +static int32_t rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth) { uint32_t rule_gindex, last_rule, rule_index; @@ -639,7 +620,7 @@ rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth) return -EINVAL; } -static inline int32_t +static int32_t rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) { uint32_t rule_gindex, last_rule, rule_index; @@ -663,7 +644,7 @@ rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) /* * Find, clean and allocate a tbl8. */ -static inline int32_t +static int32_t tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) { uint32_t group_idx; /* tbl8 group index. */ @@ -675,11 +656,19 @@ tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = INVALID, + .depth = 0, + .valid_group = VALID, + }; + new_tbl8_entry.next_hop = 0; + memset(&tbl8_entry[0], 0, RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0])); - tbl8_entry->valid_group = VALID; + __atomic_store(tbl8_entry, &new_tbl8_entry, + __ATOMIC_RELAXED); /* Return group index for allocated tbl8 group. */ return group_idx; @@ -690,7 +679,7 @@ tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) return -ENOSPC; } -static inline int32_t +static int32_t tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) { uint32_t group_idx; /* tbl8 group index. */ @@ -701,11 +690,19 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { + struct rte_lpm_tbl_entry new_tbl8_entry = { + .next_hop = 0, + .valid = INVALID, + .depth = 0, + .valid_group = VALID, + }; + memset(&tbl8_entry[0], 0, RTE_LPM_TBL8_GROUP_NUM_ENTRIES * sizeof(tbl8_entry[0])); - tbl8_entry->valid_group = VALID; + __atomic_store(tbl8_entry, &new_tbl8_entry, + __ATOMIC_RELAXED); /* Return group index for allocated tbl8 group. */ return group_idx; @@ -716,21 +713,32 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) return -ENOSPC; } -static inline void +static void tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ - tbl8[tbl8_group_start].valid_group = INVALID; + struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = { + .valid = INVALID, + .depth = 0, + .valid_group = INVALID, + }; + zero_tbl8_entry.next_hop = 0; + + __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); } -static inline void +static void tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ - tbl8[tbl8_group_start].valid_group = INVALID; + struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; + + __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); } -static inline int32_t +static __rte_noinline int32_t add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop) { @@ -758,7 +766,8 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, /* Setting tbl24 entry in one go to avoid race * conditions */ - lpm->tbl24[i] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, + __ATOMIC_RELEASE); continue; } @@ -787,7 +796,9 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, * Setting tbl8 entry in one go to avoid * race conditions */ - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -798,7 +809,7 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, return 0; } -static inline int32_t +static __rte_noinline int32_t add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop) { @@ -827,7 +838,8 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, /* Setting tbl24 entry in one go to avoid race * conditions */ - lpm->tbl24[i] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, + __ATOMIC_RELEASE); continue; } @@ -856,7 +868,9 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, * Setting tbl8 entry in one go to avoid * race conditions */ - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -867,7 +881,7 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, return 0; } -static inline int32_t +static __rte_noinline int32_t add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, uint8_t next_hop) { @@ -894,9 +908,14 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, /* Set tbl8 entry. */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - lpm->tbl8[i].valid = VALID; + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + }; + new_tbl8_entry.next_hop = next_hop; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } /* @@ -906,13 +925,14 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .group_idx = (uint8_t)tbl8_group_index, }, + .group_idx = (uint8_t)tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, }; - lpm->tbl24[tbl24_index] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELEASE); } /* If valid entry but not extended calculate the index into Table8. */ else if (lpm->tbl24[tbl24_index].valid_group == 0) { @@ -930,19 +950,29 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, /* Populate new tbl8 with tbl24 value. */ for (i = tbl8_group_start; i < tbl8_group_end; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth; - lpm->tbl8[i].next_hop = - lpm->tbl24[tbl24_index].next_hop; + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = VALID, + .depth = lpm->tbl24[tbl24_index].depth, + .valid_group = lpm->tbl8[i].valid_group, + }; + new_tbl8_entry.next_hop = + lpm->tbl24[tbl24_index].next_hop; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } tbl8_index = tbl8_group_start + (ip_masked & 0xFF); /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; + struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + }; + new_tbl8_entry.next_hop = next_hop; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } /* @@ -952,13 +982,14 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .group_idx = (uint8_t)tbl8_group_index, }, + .group_idx = (uint8_t)tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, }; - lpm->tbl24[tbl24_index] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELEASE); } else { /* * If it is valid, extended entry calculate the index into tbl8. @@ -982,7 +1013,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, * Setting tbl8 entry in one go to avoid race * condition */ - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -992,7 +1024,7 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, return 0; } -static inline int32_t +static __rte_noinline int32_t add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint32_t next_hop) { @@ -1020,9 +1052,14 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* Set tbl8 entry. */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; - lpm->tbl8[i].valid = VALID; + struct rte_lpm_tbl_entry new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + .next_hop = next_hop, + }; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } /* @@ -1032,13 +1069,17 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, + .group_idx = tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, }; - lpm->tbl24[tbl24_index] = new_tbl24_entry; + /* The tbl24 entry must be written only after the + * tbl8 entries are written. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELEASE); } /* If valid entry but not extended calculate the index into Table8. */ else if (lpm->tbl24[tbl24_index].valid_group == 0) { @@ -1056,19 +1097,28 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* Populate new tbl8 with tbl24 value. */ for (i = tbl8_group_start; i < tbl8_group_end; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth; - lpm->tbl8[i].next_hop = - lpm->tbl24[tbl24_index].next_hop; + struct rte_lpm_tbl_entry new_tbl8_entry = { + .valid = VALID, + .depth = lpm->tbl24[tbl24_index].depth, + .valid_group = lpm->tbl8[i].valid_group, + .next_hop = lpm->tbl24[tbl24_index].next_hop, + }; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } tbl8_index = tbl8_group_start + (ip_masked & 0xFF); /* Insert new rule into the tbl8 entry. */ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - lpm->tbl8[i].valid = VALID; - lpm->tbl8[i].depth = depth; - lpm->tbl8[i].next_hop = next_hop; + struct rte_lpm_tbl_entry new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .valid_group = lpm->tbl8[i].valid_group, + .next_hop = next_hop, + }; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } /* @@ -1078,13 +1128,17 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, */ struct rte_lpm_tbl_entry new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, + .group_idx = tbl8_group_index, .valid = VALID, .valid_group = 1, .depth = 0, }; - lpm->tbl24[tbl24_index] = new_tbl24_entry; + /* The tbl24 entry must be written only after the + * tbl8 entries are written. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELEASE); } else { /* * If it is valid, extended entry calculate the index into tbl8. @@ -1109,7 +1163,8 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Setting tbl8 entry in one go to avoid race * condition */ - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); continue; } @@ -1122,7 +1177,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, /* * Add a route */ -int +int __vsym rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop) { @@ -1163,7 +1218,7 @@ rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, } VERSION_SYMBOL(rte_lpm_add, _v20, 2.0); -int +int __vsym rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop) { @@ -1209,7 +1264,7 @@ MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, /* * Look for a rule in the high-level rules table */ -int +int __vsym rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, uint8_t *next_hop) { @@ -1236,7 +1291,7 @@ uint8_t *next_hop) } VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0); -int +int __vsym rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop) { @@ -1265,7 +1320,7 @@ BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04); MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604); -static inline int32_t +static int32_t find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { @@ -1287,7 +1342,7 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, return -1; } -static inline int32_t +static int32_t find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { @@ -1309,7 +1364,7 @@ find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, return -1; } -static inline int32_t +static int32_t delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { @@ -1332,7 +1387,15 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i].valid = INVALID; + struct rte_lpm_tbl_entry_v20 + zero_tbl24_entry = { + .valid = INVALID, + .depth = 0, + .valid_group = 0, + }; + zero_tbl24_entry.next_hop = 0; + __atomic_store(&lpm->tbl24[i], + &zero_tbl24_entry, __ATOMIC_RELEASE); } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -1359,7 +1422,7 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, + .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, .valid = VALID, .valid_group = 0, .depth = sub_rule_depth, @@ -1377,7 +1440,8 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, + __ATOMIC_RELEASE); } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -1393,7 +1457,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); } } } @@ -1402,7 +1468,7 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, return 0; } -static inline int32_t +static int32_t delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { @@ -1412,6 +1478,7 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, /* Calculate the range and index into Table24. */ tbl24_range = depth_to_range(depth); tbl24_index = (ip_masked >> 8); + struct rte_lpm_tbl_entry zero_tbl24_entry = {0}; /* * Firstly check the sub_rule_index. A -1 indicates no replacement rule @@ -1426,7 +1493,8 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i].valid = INVALID; + __atomic_store(&lpm->tbl24[i], + &zero_tbl24_entry, __ATOMIC_RELEASE); } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -1471,7 +1539,8 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth) { - lpm->tbl24[i] = new_tbl24_entry; + __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, + __ATOMIC_RELEASE); } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has @@ -1487,7 +1556,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[j], + &new_tbl8_entry, + __ATOMIC_RELAXED); } } } @@ -1504,7 +1575,7 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * Return of value > -1 means tbl8 is in use but has all the same values and * thus can be recycled */ -static inline int32_t +static int32_t tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start) { @@ -1551,7 +1622,7 @@ tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, return -EINVAL; } -static inline int32_t +static int32_t tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { @@ -1598,7 +1669,7 @@ tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, return -EINVAL; } -static inline int32_t +static int32_t delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { @@ -1643,7 +1714,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } } @@ -1656,27 +1728,34 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start); if (tbl8_recycle_index == -EINVAL) { - /* Set tbl24 before freeing tbl8 to avoid race condition. */ + /* Set tbl24 before freeing tbl8 to avoid race condition. + * Prevent the free of the tbl8 group from hoisting. + */ lpm->tbl24[tbl24_index].valid = 0; + __atomic_thread_fence(__ATOMIC_RELEASE); tbl8_free_v20(lpm->tbl8, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }, + .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, .valid = VALID, .valid_group = 0, .depth = lpm->tbl8[tbl8_recycle_index].depth, }; - /* Set tbl24 before freeing tbl8 to avoid race condition. */ - lpm->tbl24[tbl24_index] = new_tbl24_entry; + /* Set tbl24 before freeing tbl8 to avoid race condition. + * Prevent the free of the tbl8 group from hoisting. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELAXED); + __atomic_thread_fence(__ATOMIC_RELEASE); tbl8_free_v20(lpm->tbl8, tbl8_group_start); } return 0; } -static inline int32_t +static int32_t delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { @@ -1721,7 +1800,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, */ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i] = new_tbl8_entry; + __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, + __ATOMIC_RELAXED); } } @@ -1734,8 +1814,11 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start); if (tbl8_recycle_index == -EINVAL) { - /* Set tbl24 before freeing tbl8 to avoid race condition. */ + /* Set tbl24 before freeing tbl8 to avoid race condition. + * Prevent the free of the tbl8 group from hoisting. + */ lpm->tbl24[tbl24_index].valid = 0; + __atomic_thread_fence(__ATOMIC_RELEASE); tbl8_free_v1604(lpm->tbl8, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ @@ -1746,8 +1829,12 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, .depth = lpm->tbl8[tbl8_recycle_index].depth, }; - /* Set tbl24 before freeing tbl8 to avoid race condition. */ - lpm->tbl24[tbl24_index] = new_tbl24_entry; + /* Set tbl24 before freeing tbl8 to avoid race condition. + * Prevent the free of the tbl8 group from hoisting. + */ + __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, + __ATOMIC_RELAXED); + __atomic_thread_fence(__ATOMIC_RELEASE); tbl8_free_v1604(lpm->tbl8, tbl8_group_start); } #undef group_idx @@ -1757,7 +1844,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, /* * Deletes a rule */ -int +int __vsym rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; @@ -1811,7 +1898,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) } VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0); -int +int __vsym rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; @@ -1870,7 +1957,7 @@ MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, /* * Delete all rules from the LPM table. */ -void +void __vsym rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm) { /* Zero rule information. */ @@ -1887,7 +1974,7 @@ rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm) } VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0); -void +void __vsym rte_lpm_delete_all_v1604(struct rte_lpm *lpm) { /* Zero rule information. */