#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
#include <rte_malloc.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_lpm_tailq = {
+ .name = "RTE_LPM",
+};
+EAL_REGISTER_TAILQ(rte_lpm_tailq)
+
#define MAX_DEPTH_TBL24 24
enum valid_flag {
return 1 << (MAX_DEPTH_TBL24 - depth);
/* Else if depth is greater than 24 */
- return (1 << (RTE_LPM_MAX_DEPTH - depth));
+ return 1 << (RTE_LPM_MAX_DEPTH - depth);
}
/*
struct rte_tailq_entry *te;
struct rte_lpm_list *lpm_list;
- /* check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
- rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_FOREACH(te, lpm_list, next) {
uint32_t mem_size;
struct rte_lpm_list *lpm_list;
- /* check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM,
- rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 2);
/* Check user arguments. */
if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
if (lpm == NULL)
return;
- /* check that we have an initialised tail queue */
- if ((lpm_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
return rule_index;
}
}
+
+ if (rule_index == lpm->max_rules)
+ return -ENOSPC;
} else {
/* Calculate the position in which the rule will be stored. */
rule_index = 0;
for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
/* If rule is found return the rule index. */
if (lpm->rules_tbl[rule_index].ip == ip_masked)
- return (rule_index);
+ return rule_index;
}
- /* If rule is not found return -E_RTE_NO_TAILQ. */
- return -E_RTE_NO_TAILQ;
+ /* If rule is not found return -EINVAL. */
+ return -EINVAL;
}
/*
* Find, clean and allocate a tbl8.
*/
static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
+tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
{
- uint32_t tbl8_gindex; /* tbl8 group index. */
- struct rte_lpm_tbl8_entry *tbl8_entry;
+ uint32_t group_idx; /* tbl8 group index. */
+ struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
- tbl8_gindex++) {
- tbl8_entry = &tbl8[tbl8_gindex *
+ for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
+ group_idx++) {
+ tbl8_entry = &tbl8[group_idx *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
tbl8_entry->valid_group = VALID;
/* Return group index for allocated tbl8 group. */
- return tbl8_gindex;
+ return group_idx;
}
}
}
static inline void
-tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
tbl8[tbl8_group_start].valid_group = INVALID;
* For invalid OR valid and non-extended tbl 24 entries set
* entry.
*/
- if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+ if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth)) {
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
{ .next_hop = next_hop, },
.valid = VALID,
- .ext_entry = 0,
+ .valid_group = 0,
.depth = depth,
};
/* Setting tbl24 entry in one go to avoid race
- * conditions */
+ * conditions
+ */
lpm->tbl24[i] = new_tbl24_entry;
continue;
}
- /* If tbl24 entry is valid and extended calculate the index
- * into tbl8. */
- tbl8_index = lpm->tbl24[i].tbl8_gindex *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- for (j = tbl8_index; j < tbl8_group_end; j++) {
- if (!lpm->tbl8[j].valid ||
- lpm->tbl8[j].depth <= depth) {
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
- .valid_group = VALID,
- .depth = depth,
- .next_hop = next_hop,
- };
-
- /*
- * Setting tbl8 entry in one go to avoid race
- * conditions
- */
- lpm->tbl8[j] = new_tbl8_entry;
-
- continue;
+ if (lpm->tbl24[i].valid_group == 1) {
+ /* If tbl24 entry is valid and extended calculate the
+ * index into tbl8.
+ */
+ tbl8_index = lpm->tbl24[i].group_idx *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_group_end = tbl8_index +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ for (j = tbl8_index; j < tbl8_group_end; j++) {
+ if (!lpm->tbl8[j].valid ||
+ lpm->tbl8[j].depth <= depth) {
+ struct rte_lpm_tbl_entry
+ new_tbl8_entry = {
+ .valid = VALID,
+ .valid_group = VALID,
+ .depth = depth,
+ .next_hop = next_hop,
+ };
+
+ /*
+ * Setting tbl8 entry in one go to avoid
+ * race conditions
+ */
+ lpm->tbl8[j] = new_tbl8_entry;
+
+ continue;
+ }
}
}
}
* so assign whole structure in one go
*/
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ { .group_idx = (uint8_t)tbl8_group_index, },
.valid = VALID,
- .ext_entry = 1,
+ .valid_group = 1,
.depth = 0,
};
lpm->tbl24[tbl24_index] = new_tbl24_entry;
}/* If valid entry but not extended calculate the index into Table8. */
- else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+ else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
tbl8_group_index = tbl8_alloc(lpm->tbl8);
* so assign whole structure in one go.
*/
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .tbl8_gindex = (uint8_t)tbl8_group_index, },
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ { .group_idx = (uint8_t)tbl8_group_index, },
.valid = VALID,
- .ext_entry = 1,
+ .valid_group = 1,
.depth = 0,
};
else { /*
* If it is valid, extended entry calculate the index into tbl8.
*/
- tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+ tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
tbl8_group_start = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
if (!lpm->tbl8[i].valid ||
lpm->tbl8[i].depth <= depth) {
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
.depth = depth,
.next_hop = next_hop,
*/
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
- if (lpm->tbl24[i].ext_entry == 0 &&
+ if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth ) {
lpm->tbl24[i].valid = INVALID;
- }
- else {
+ } else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
* to be a rule with depth >= 25 in the
* associated TBL8 group.
*/
- tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+ tbl8_group_index = lpm->tbl24[i].group_idx;
tbl8_index = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
* associated with this rule.
*/
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
.valid = VALID,
- .ext_entry = 0,
+ .valid_group = 0,
.depth = sub_rule_depth,
};
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
+ .valid_group = VALID,
.depth = sub_rule_depth,
.next_hop = lpm->rules_tbl
[sub_rule_index].next_hop,
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
- if (lpm->tbl24[i].ext_entry == 0 &&
+ if (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth ) {
lpm->tbl24[i] = new_tbl24_entry;
- }
- else {
+ } else if (lpm->tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
* to be a rule with depth >= 25 in the
* associated TBL8 group.
*/
- tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+ tbl8_group_index = lpm->tbl24[i].group_idx;
tbl8_index = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
* thus can be recycled
*/
static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
uint32_t tbl8_group_end, i;
tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl24_index = ip_masked >> 8;
/* Calculate the index into tbl8 and range. */
- tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+ tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
tbl8_range = depth_to_range(depth);
}
else {
/* Set new tbl8 entry. */
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
.depth = sub_rule_depth,
.valid_group = lpm->tbl8[tbl8_group_start].valid_group,
}
else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
{ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
.valid = VALID,
- .ext_entry = 0,
+ .valid_group = 0,
.depth = lpm->tbl8[tbl8_recycle_index].depth,
};
/*
* Check if rule_to_delete_index was found. If no rule was found the
- * function rule_find returns -E_RTE_NO_TAILQ.
+ * function rule_find returns -EINVAL.
*/
if (rule_to_delete_index < 0)
- return -E_RTE_NO_TAILQ;
+ return -EINVAL;
/* Delete the rule from the rule table. */
rule_delete(lpm, rule_to_delete_index, depth);
/* Delete all rules form the rules table. */
memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
}
-