#include <rte_errno.h>
#include <rte_rwlock.h>
#include <rte_spinlock.h>
+#include <rte_tailq.h>
#include "rte_lpm.h"
/*
* Converts given depth value to its corresponding range value.
*/
-static inline uint32_t __attribute__((pure))
+static uint32_t __attribute__((pure))
depth_to_range(uint8_t depth)
{
VERIFY_DEPTH(depth);
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_lock();
TAILQ_FOREACH(te, lpm_list, next) {
l = te->data;
if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
break;
}
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_unlock();
if (te == NULL) {
rte_errno = ENOENT;
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_lock();
TAILQ_FOREACH(te, lpm_list, next) {
l = te->data;
if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
break;
}
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_unlock();
if (te == NULL) {
rte_errno = ENOENT;
/* Determine the amount of memory to allocate. */
mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* guarantee there's no existing */
TAILQ_FOREACH(te, lpm_list, next) {
/* Save user arguments. */
lpm->max_rules = max_rules;
- snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+ strlcpy(lpm->name, name, sizeof(lpm->name));
te->data = lpm;
TAILQ_INSERT_TAIL(lpm_list, te, next);
exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return lpm;
}
tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* guarantee there's no existing */
TAILQ_FOREACH(te, lpm_list, next) {
/* Save user arguments. */
lpm->max_rules = config->max_rules;
lpm->number_tbl8s = config->number_tbl8s;
- snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+ strlcpy(lpm->name, name, sizeof(lpm->name));
te->data = lpm;
TAILQ_INSERT_TAIL(lpm_list, te, next);
exit:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return lpm;
}
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* find our tailq entry */
TAILQ_FOREACH(te, lpm_list, next) {
if (te != NULL)
TAILQ_REMOVE(lpm_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
rte_free(lpm);
rte_free(te);
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* find our tailq entry */
TAILQ_FOREACH(te, lpm_list, next) {
if (te != NULL)
TAILQ_REMOVE(lpm_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
rte_free(lpm->tbl8);
rte_free(lpm->rules_tbl);
* are stored in the rule table from 0 - 31.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
-static inline int32_t
+static int32_t
rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
uint8_t next_hop)
{
return rule_index;
}
-static inline int32_t
+static int32_t
rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
uint32_t next_hop)
{
* Delete a rule from the rule table.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
-static inline void
+static void
rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
{
int i;
lpm->rule_info[depth - 1].used_rules--;
}
-static inline void
+static void
rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
{
int i;
* Finds a rule in rule table.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
-static inline int32_t
+static int32_t
rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
{
uint32_t rule_gindex, last_rule, rule_index;
return -EINVAL;
}
-static inline int32_t
+static int32_t
rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
{
uint32_t rule_gindex, last_rule, rule_index;
/*
* Find, clean and allocate a tbl8.
*/
-static inline int32_t
+static int32_t
tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
{
uint32_t group_idx; /* tbl8 group index. */
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = VALID,
+ };
+ new_tbl8_entry.next_hop = 0;
+
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ __atomic_store(tbl8_entry, &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Return group index for allocated tbl8 group. */
return group_idx;
return -ENOSPC;
}
-static inline int32_t
+static int32_t
tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
{
uint32_t group_idx; /* tbl8 group index. */
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .next_hop = 0,
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = VALID,
+ };
+
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ __atomic_store(tbl8_entry, &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Return group index for allocated tbl8 group. */
return group_idx;
return -ENOSPC;
}
-static inline void
+static void
tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = INVALID,
+ };
+ zero_tbl8_entry.next_hop = 0;
+
+ __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
}
-static inline void
+static void
tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
+
+ __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t next_hop)
{
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
continue;
}
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
return 0;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint32_t next_hop)
{
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
continue;
}
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
return 0;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
uint8_t next_hop)
{
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
- lpm->tbl8[i].valid = VALID;
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+ new_tbl8_entry.next_hop = next_hop;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
/*
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Populate new tbl8 with tbl24 value. */
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
- lpm->tbl8[i].next_hop =
- lpm->tbl24[tbl24_index].next_hop;
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = lpm->tbl24[tbl24_index].depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+ new_tbl8_entry.next_hop =
+ lpm->tbl24[tbl24_index].next_hop;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
/* Insert new rule into the tbl8 entry. */
for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+ new_tbl8_entry.next_hop = next_hop;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
/*
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
* Setting tbl8 entry in one go to avoid race
* condition
*/
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
return 0;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
uint32_t next_hop)
{
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
- lpm->tbl8[i].valid = VALID;
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ .next_hop = next_hop,
+ };
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
/*
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* The tbl24 entry must be written only after the
+ * tbl8 entries are written.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Populate new tbl8 with tbl24 value. */
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
- lpm->tbl8[i].next_hop =
- lpm->tbl24[tbl24_index].next_hop;
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = lpm->tbl24[tbl24_index].depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ .next_hop = lpm->tbl24[tbl24_index].next_hop,
+ };
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
/* Insert new rule into the tbl8 entry. */
for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ .next_hop = next_hop,
+ };
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
/*
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* The tbl24 entry must be written only after the
+ * tbl8 entries are written.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
* Setting tbl8 entry in one go to avoid race
* condition
*/
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
continue;
}
MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
-static inline int32_t
+static int32_t
find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t *sub_rule_depth)
{
return -1;
}
-static inline int32_t
+static int32_t
find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint8_t *sub_rule_depth)
{
return -1;
}
-static inline int32_t
+static int32_t
delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i].valid = INVALID;
+ struct rte_lpm_tbl_entry_v20
+ zero_tbl24_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = 0,
+ };
+ zero_tbl24_entry.next_hop = 0;
+ __atomic_store(&lpm->tbl24[i],
+ &zero_tbl24_entry, __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
}
return 0;
}
-static inline int32_t
+static int32_t
delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
/* Calculate the range and index into Table24. */
tbl24_range = depth_to_range(depth);
tbl24_index = (ip_masked >> 8);
+ struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
/*
* Firstly check the sub_rule_index. A -1 indicates no replacement rule
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i].valid = INVALID;
+ __atomic_store(&lpm->tbl24[i],
+ &zero_tbl24_entry, __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
}
* Return of value > -1 means tbl8 is in use but has all the same values and
* thus can be recycled
*/
-static inline int32_t
+static int32_t
tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
uint32_t tbl8_group_start)
{
return -EINVAL;
}
-static inline int32_t
+static int32_t
tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
uint32_t tbl8_group_start)
{
return -EINVAL;
}
-static inline int32_t
+static int32_t
delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index].valid = 0;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELAXED);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}
return 0;
}
-static inline int32_t
+static int32_t
delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
}
tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index].valid = 0;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELAXED);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}
#undef group_idx