/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
continue;
}
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* The tbl24 entry must be written only after the
+ * tbl8 entries are written.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* The tbl24 entry must be written only after the
+ * tbl8 entries are written.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
/* Calculate the range and index into Table24. */
tbl24_range = depth_to_range(depth);
tbl24_index = (ip_masked >> 8);
+ struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
/*
* Firstly check the sub_rule_index. A -1 indicates no replacement rule
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i].valid = INVALID;
+ __atomic_store(&lpm->tbl24[i],
+ &zero_tbl24_entry, __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index].valid = 0;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}
#undef group_idx