Compiler could generate non-atomic stores for whole table entry
updating. This may cause incorrect nexthop to be returned, if
the byte with valid flag is updated prior to the byte with nexthop
is updated.
Besides, field by field updating of table entries follow
read-modify-write sequences. The operations are not atomic,
nor efficient. And could cause entries out of synchronization.
Changed to use atomic store to update whole table entry.
Suggested-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Suggested-by: Gavin Hu <gavin.hu@arm.com>
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = VALID,
+ };
+ new_tbl8_entry.next_hop = 0;
+
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ __atomic_store(tbl8_entry, &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Return group index for allocated tbl8 group. */
return group_idx;
/* Return group index for allocated tbl8 group. */
return group_idx;
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .next_hop = 0,
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = VALID,
+ };
+
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ __atomic_store(tbl8_entry, &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Return group index for allocated tbl8 group. */
return group_idx;
/* Return group index for allocated tbl8 group. */
return group_idx;
tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = INVALID,
+ };
+ zero_tbl8_entry.next_hop = 0;
+
+ __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
}
static void
tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
}
static void
tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
+
+ __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
}
static __rte_noinline int32_t
}
static __rte_noinline int32_t
* Setting tbl8 entry in one go to avoid
* race conditions
*/
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
* Setting tbl8 entry in one go to avoid
* race conditions
*/
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
- lpm->tbl8[i].valid = VALID;
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+ new_tbl8_entry.next_hop = next_hop;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Populate new tbl8 with tbl24 value. */
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
/* Populate new tbl8 with tbl24 value. */
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
- lpm->tbl8[i].next_hop =
- lpm->tbl24[tbl24_index].next_hop;
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = lpm->tbl24[tbl24_index].depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+ new_tbl8_entry.next_hop =
+ lpm->tbl24[tbl24_index].next_hop;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
/* Insert new rule into the tbl8 entry. */
for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
/* Insert new rule into the tbl8 entry. */
for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+ new_tbl8_entry.next_hop = next_hop;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
* Setting tbl8 entry in one go to avoid race
* condition
*/
* Setting tbl8 entry in one go to avoid race
* condition
*/
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
- lpm->tbl8[i].valid = VALID;
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ .next_hop = next_hop,
+ };
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Populate new tbl8 with tbl24 value. */
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
/* Populate new tbl8 with tbl24 value. */
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
- lpm->tbl8[i].next_hop =
- lpm->tbl24[tbl24_index].next_hop;
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = lpm->tbl24[tbl24_index].depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ .next_hop = lpm->tbl24[tbl24_index].next_hop,
+ };
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
/* Insert new rule into the tbl8 entry. */
for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
/* Insert new rule into the tbl8 entry. */
for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
- lpm->tbl8[i].valid = VALID;
- lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .valid_group = lpm->tbl8[i].valid_group,
+ .next_hop = next_hop,
+ };
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
* Setting tbl8 entry in one go to avoid race
* condition
*/
* Setting tbl8 entry in one go to avoid race
* condition
*/
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[j],
+ &new_tbl8_entry,
+ __ATOMIC_RELAXED);
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i] = new_tbl8_entry;
+ __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __ATOMIC_RELAXED);
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}
__atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}