/*
* Converts given depth value to its corresponding range value.
*/
-static inline uint32_t __attribute__((pure))
+static uint32_t __attribute__((pure))
depth_to_range(uint8_t depth)
{
VERIFY_DEPTH(depth);
* are stored in the rule table from 0 - 31.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
-static inline int32_t
+static int32_t
rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
uint8_t next_hop)
{
return rule_index;
}
-static inline int32_t
+static int32_t
rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
uint32_t next_hop)
{
* Delete a rule from the rule table.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
-static inline void
+static void
rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
{
int i;
lpm->rule_info[depth - 1].used_rules--;
}
-static inline void
+static void
rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
{
int i;
* Finds a rule in rule table.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
-static inline int32_t
+static int32_t
rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
{
uint32_t rule_gindex, last_rule, rule_index;
return -EINVAL;
}
-static inline int32_t
+static int32_t
rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
{
uint32_t rule_gindex, last_rule, rule_index;
/*
* Find, clean and allocate a tbl8.
*/
-static inline int32_t
+static int32_t
tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
{
uint32_t group_idx; /* tbl8 group index. */
return -ENOSPC;
}
-static inline int32_t
+static int32_t
tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
{
uint32_t group_idx; /* tbl8 group index. */
return -ENOSPC;
}
-static inline void
+static void
tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
tbl8[tbl8_group_start].valid_group = INVALID;
}
-static inline void
+static void
tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
tbl8[tbl8_group_start].valid_group = INVALID;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t next_hop)
{
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
continue;
}
return 0;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint32_t next_hop)
{
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
continue;
}
return 0;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
uint8_t next_hop)
{
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
return 0;
}
-static inline int32_t
+static __rte_noinline int32_t
add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
uint32_t next_hop)
{
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* The tbl24 entry must be written only after the
+ * tbl8 entries are written.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
.depth = 0,
};
- lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ /* The tbl24 entry must be written only after the
+ * tbl8 entries are written.
+ */
+ __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
-static inline int32_t
+static int32_t
find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t *sub_rule_depth)
{
return -1;
}
-static inline int32_t
+static int32_t
find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint8_t *sub_rule_depth)
{
return -1;
}
-static inline int32_t
+static int32_t
delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i].valid = INVALID;
+ struct rte_lpm_tbl_entry_v20
+ zero_tbl24_entry = {
+ .valid = INVALID,
+ .depth = 0,
+ .valid_group = 0,
+ };
+ zero_tbl24_entry.next_hop = 0;
+ __atomic_store(&lpm->tbl24[i],
+ &zero_tbl24_entry, __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
return 0;
}
-static inline int32_t
+static int32_t
delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
/* Calculate the range and index into Table24. */
tbl24_range = depth_to_range(depth);
tbl24_index = (ip_masked >> 8);
+ struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
/*
* Firstly check the sub_rule_index. A -1 indicates no replacement rule
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i].valid = INVALID;
+ __atomic_store(&lpm->tbl24[i],
+ &zero_tbl24_entry, __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth) {
- lpm->tbl24[i] = new_tbl24_entry;
+ __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __ATOMIC_RELEASE);
} else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
* Return of value > -1 means tbl8 is in use but has all the same values and
* thus can be recycled
*/
-static inline int32_t
+static int32_t
tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
uint32_t tbl8_group_start)
{
return -EINVAL;
}
-static inline int32_t
+static int32_t
tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
uint32_t tbl8_group_start)
{
return -EINVAL;
}
-static inline int32_t
+static int32_t
delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index].valid = 0;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}
return 0;
}
-static inline int32_t
+static int32_t
delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index].valid = 0;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
- /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ /* Set tbl24 before freeing tbl8 to avoid race condition.
+ * Prevent the free of the tbl8 group from hoisting.
+ */
lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ __atomic_thread_fence(__ATOMIC_RELEASE);
tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}
#undef group_idx