VALID
};
+/** @internal Rule structure. */
+struct rte_lpm_rule {
+ uint32_t ip; /**< Rule IP address. */
+ uint32_t next_hop; /**< Rule next hop. */
+};
+
+/** @internal Contains metadata about the rules table. */
+struct rte_lpm_rule_info {
+ uint32_t used_rules; /**< Used rules so far. */
+ uint32_t first_rule; /**< Indexes the first rule of a given depth. */
+};
+
/** @internal LPM structure. */
struct __rte_lpm {
- /* LPM metadata. */
+ /* Exposed LPM data. */
struct rte_lpm lpm;
+ /* LPM metadata. */
+ char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
+ uint32_t max_rules; /**< Max. balanced rules per lpm. */
+ uint32_t number_tbl8s; /**< Number of tbl8s. */
+ /**< Rule info table. */
+ struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
+ struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
+
/* RCU config. */
struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
struct rte_lpm *
rte_lpm_find_existing(const char *name)
{
- struct rte_lpm *l = NULL;
+ struct __rte_lpm *i_lpm = NULL;
struct rte_tailq_entry *te;
struct rte_lpm_list *lpm_list;
rte_mcfg_tailq_read_lock();
TAILQ_FOREACH(te, lpm_list, next) {
- l = te->data;
- if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+ i_lpm = te->data;
+ if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
break;
}
rte_mcfg_tailq_read_unlock();
return NULL;
}
- return l;
+ return &i_lpm->lpm;
}
/*
const struct rte_lpm_config *config)
{
char mem_name[RTE_LPM_NAMESIZE];
- struct __rte_lpm *internal_lpm;
+ struct __rte_lpm *i_lpm;
struct rte_lpm *lpm = NULL;
struct rte_tailq_entry *te;
uint32_t mem_size, rules_size, tbl8s_size;
/* guarantee there's no existing */
TAILQ_FOREACH(te, lpm_list, next) {
- lpm = te->data;
- if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
+ i_lpm = te->data;
+ if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
break;
}
if (te != NULL) {
- lpm = NULL;
rte_errno = EEXIST;
goto exit;
}
/* Determine the amount of memory to allocate. */
- mem_size = sizeof(*internal_lpm);
+ mem_size = sizeof(*i_lpm);
rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
}
/* Allocate memory to store the LPM data structures. */
- internal_lpm = rte_zmalloc_socket(mem_name, mem_size,
+ i_lpm = rte_zmalloc_socket(mem_name, mem_size,
RTE_CACHE_LINE_SIZE, socket_id);
- if (internal_lpm == NULL) {
+ if (i_lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
rte_free(te);
rte_errno = ENOMEM;
goto exit;
}
- lpm = &internal_lpm->lpm;
- lpm->rules_tbl = rte_zmalloc_socket(NULL,
+ i_lpm->rules_tbl = rte_zmalloc_socket(NULL,
(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (lpm->rules_tbl == NULL) {
+ if (i_lpm->rules_tbl == NULL) {
RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
- rte_free(internal_lpm);
- internal_lpm = NULL;
- lpm = NULL;
+ rte_free(i_lpm);
+ i_lpm = NULL;
rte_free(te);
rte_errno = ENOMEM;
goto exit;
}
- lpm->tbl8 = rte_zmalloc_socket(NULL,
+ i_lpm->lpm.tbl8 = rte_zmalloc_socket(NULL,
(size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
- if (lpm->tbl8 == NULL) {
+ if (i_lpm->lpm.tbl8 == NULL) {
RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
- rte_free(lpm->rules_tbl);
- rte_free(internal_lpm);
- internal_lpm = NULL;
- lpm = NULL;
+ rte_free(i_lpm->rules_tbl);
+ rte_free(i_lpm);
+ i_lpm = NULL;
rte_free(te);
rte_errno = ENOMEM;
goto exit;
}
/* Save user arguments. */
- lpm->max_rules = config->max_rules;
- lpm->number_tbl8s = config->number_tbl8s;
- strlcpy(lpm->name, name, sizeof(lpm->name));
+ i_lpm->max_rules = config->max_rules;
+ i_lpm->number_tbl8s = config->number_tbl8s;
+ strlcpy(i_lpm->name, name, sizeof(i_lpm->name));
- te->data = lpm;
+ te->data = i_lpm;
+ lpm = &i_lpm->lpm;
TAILQ_INSERT_TAIL(lpm_list, te, next);
void
rte_lpm_free(struct rte_lpm *lpm)
{
- struct __rte_lpm *internal_lpm;
struct rte_lpm_list *lpm_list;
struct rte_tailq_entry *te;
+ struct __rte_lpm *i_lpm;
/* Check user arguments. */
if (lpm == NULL)
return;
+ i_lpm = container_of(lpm, struct __rte_lpm, lpm);
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
/* find our tailq entry */
TAILQ_FOREACH(te, lpm_list, next) {
- if (te->data == (void *) lpm)
+ if (te->data == (void *)i_lpm)
break;
}
if (te != NULL)
rte_mcfg_tailq_write_unlock();
- internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
- if (internal_lpm->dq != NULL)
- rte_rcu_qsbr_dq_delete(internal_lpm->dq);
- rte_free(lpm->tbl8);
- rte_free(lpm->rules_tbl);
- rte_free(internal_lpm);
+ if (i_lpm->dq != NULL)
+ rte_rcu_qsbr_dq_delete(i_lpm->dq);
+ rte_free(i_lpm->lpm.tbl8);
+ rte_free(i_lpm->rules_tbl);
+ rte_free(i_lpm);
rte_free(te);
}
static void
__lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
{
- struct rte_lpm_tbl_entry *tbl8 = ((struct rte_lpm *)p)->tbl8;
+ struct rte_lpm_tbl_entry *tbl8 = ((struct __rte_lpm *)p)->lpm.tbl8;
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
uint32_t tbl8_group_index = *(uint32_t *)data;
{
struct rte_rcu_qsbr_dq_parameters params = {0};
char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
- struct __rte_lpm *internal_lpm;
+ struct __rte_lpm *i_lpm;
if (lpm == NULL || cfg == NULL) {
rte_errno = EINVAL;
return 1;
}
- internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
- if (internal_lpm->v != NULL) {
+ i_lpm = container_of(lpm, struct __rte_lpm, lpm);
+ if (i_lpm->v != NULL) {
rte_errno = EEXIST;
return 1;
}
} else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
/* Init QSBR defer queue. */
snprintf(rcu_dq_name, sizeof(rcu_dq_name),
- "LPM_RCU_%s", lpm->name);
+ "LPM_RCU_%s", i_lpm->name);
params.name = rcu_dq_name;
params.size = cfg->dq_size;
if (params.size == 0)
- params.size = lpm->number_tbl8s;
+ params.size = i_lpm->number_tbl8s;
params.trigger_reclaim_limit = cfg->reclaim_thd;
params.max_reclaim_size = cfg->reclaim_max;
if (params.max_reclaim_size == 0)
params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
params.esize = sizeof(uint32_t); /* tbl8 group index */
params.free_fn = __lpm_rcu_qsbr_free_resource;
- params.p = lpm;
+ params.p = i_lpm;
params.v = cfg->v;
- internal_lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
- if (internal_lpm->dq == NULL) {
+ i_lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
+ if (i_lpm->dq == NULL) {
RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
return 1;
}
rte_errno = EINVAL;
return 1;
}
- internal_lpm->rcu_mode = cfg->mode;
- internal_lpm->v = cfg->v;
+ i_lpm->rcu_mode = cfg->mode;
+ i_lpm->v = cfg->v;
return 0;
}
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
static int32_t
-rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+rule_add(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
uint32_t next_hop)
{
uint32_t rule_gindex, rule_index, last_rule;
VERIFY_DEPTH(depth);
/* Scan through rule group to see if rule already exists. */
- if (lpm->rule_info[depth - 1].used_rules > 0) {
+ if (i_lpm->rule_info[depth - 1].used_rules > 0) {
/* rule_gindex stands for rule group index. */
- rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
/* Initialise rule_index to point to start of rule group. */
rule_index = rule_gindex;
/* Last rule = Last used rule in this rule group. */
- last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+ last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
for (; rule_index < last_rule; rule_index++) {
/* If rule already exists update next hop and return. */
- if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+ if (i_lpm->rules_tbl[rule_index].ip == ip_masked) {
- if (lpm->rules_tbl[rule_index].next_hop
+ if (i_lpm->rules_tbl[rule_index].next_hop
== next_hop)
return -EEXIST;
- lpm->rules_tbl[rule_index].next_hop = next_hop;
+ i_lpm->rules_tbl[rule_index].next_hop = next_hop;
return rule_index;
}
}
- if (rule_index == lpm->max_rules)
+ if (rule_index == i_lpm->max_rules)
return -ENOSPC;
} else {
/* Calculate the position in which the rule will be stored. */
rule_index = 0;
for (i = depth - 1; i > 0; i--) {
- if (lpm->rule_info[i - 1].used_rules > 0) {
- rule_index = lpm->rule_info[i - 1].first_rule
- + lpm->rule_info[i - 1].used_rules;
+ if (i_lpm->rule_info[i - 1].used_rules > 0) {
+ rule_index = i_lpm->rule_info[i - 1].first_rule
+ + i_lpm->rule_info[i - 1].used_rules;
break;
}
}
- if (rule_index == lpm->max_rules)
+ if (rule_index == i_lpm->max_rules)
return -ENOSPC;
- lpm->rule_info[depth - 1].first_rule = rule_index;
+ i_lpm->rule_info[depth - 1].first_rule = rule_index;
}
/* Make room for the new rule in the array. */
for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
- if (lpm->rule_info[i - 1].first_rule
- + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+ if (i_lpm->rule_info[i - 1].first_rule
+ + i_lpm->rule_info[i - 1].used_rules == i_lpm->max_rules)
return -ENOSPC;
- if (lpm->rule_info[i - 1].used_rules > 0) {
- lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
- + lpm->rule_info[i - 1].used_rules]
- = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
- lpm->rule_info[i - 1].first_rule++;
+ if (i_lpm->rule_info[i - 1].used_rules > 0) {
+ i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule
+ + i_lpm->rule_info[i - 1].used_rules]
+ = i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule];
+ i_lpm->rule_info[i - 1].first_rule++;
}
}
/* Add the new rule. */
- lpm->rules_tbl[rule_index].ip = ip_masked;
- lpm->rules_tbl[rule_index].next_hop = next_hop;
+ i_lpm->rules_tbl[rule_index].ip = ip_masked;
+ i_lpm->rules_tbl[rule_index].next_hop = next_hop;
/* Increment the used rules counter for this rule group. */
- lpm->rule_info[depth - 1].used_rules++;
+ i_lpm->rule_info[depth - 1].used_rules++;
return rule_index;
}
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
static void
-rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
+rule_delete(struct __rte_lpm *i_lpm, int32_t rule_index, uint8_t depth)
{
int i;
VERIFY_DEPTH(depth);
- lpm->rules_tbl[rule_index] =
- lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
- + lpm->rule_info[depth - 1].used_rules - 1];
+ i_lpm->rules_tbl[rule_index] =
+ i_lpm->rules_tbl[i_lpm->rule_info[depth - 1].first_rule
+ + i_lpm->rule_info[depth - 1].used_rules - 1];
for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
- if (lpm->rule_info[i].used_rules > 0) {
- lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
- lpm->rules_tbl[lpm->rule_info[i].first_rule
- + lpm->rule_info[i].used_rules - 1];
- lpm->rule_info[i].first_rule--;
+ if (i_lpm->rule_info[i].used_rules > 0) {
+ i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule - 1] =
+ i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule
+ + i_lpm->rule_info[i].used_rules - 1];
+ i_lpm->rule_info[i].first_rule--;
}
}
- lpm->rule_info[depth - 1].used_rules--;
+ i_lpm->rule_info[depth - 1].used_rules--;
}
/*
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
static int32_t
-rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
+rule_find(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth)
{
uint32_t rule_gindex, last_rule, rule_index;
VERIFY_DEPTH(depth);
- rule_gindex = lpm->rule_info[depth - 1].first_rule;
- last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+ rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
+ last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
/* Scan used rules at given depth to find rule. */
for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
/* If rule is found return the rule index. */
- if (lpm->rules_tbl[rule_index].ip == ip_masked)
+ if (i_lpm->rules_tbl[rule_index].ip == ip_masked)
return rule_index;
}
* Find, clean and allocate a tbl8.
*/
static int32_t
-_tbl8_alloc(struct rte_lpm *lpm)
+_tbl8_alloc(struct __rte_lpm *i_lpm)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) {
- tbl8_entry = &lpm->tbl8[group_idx *
+ for (group_idx = 0; group_idx < i_lpm->number_tbl8s; group_idx++) {
+ tbl8_entry = &i_lpm->lpm.tbl8[group_idx *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
}
static int32_t
-tbl8_alloc(struct rte_lpm *lpm)
+tbl8_alloc(struct __rte_lpm *i_lpm)
{
int32_t group_idx; /* tbl8 group index. */
- struct __rte_lpm *internal_lpm;
- internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
- group_idx = _tbl8_alloc(lpm);
- if (group_idx == -ENOSPC && internal_lpm->dq != NULL) {
+ group_idx = _tbl8_alloc(i_lpm);
+ if (group_idx == -ENOSPC && i_lpm->dq != NULL) {
/* If there are no tbl8 groups try to reclaim one. */
- if (rte_rcu_qsbr_dq_reclaim(internal_lpm->dq, 1,
+ if (rte_rcu_qsbr_dq_reclaim(i_lpm->dq, 1,
NULL, NULL, NULL) == 0)
- group_idx = _tbl8_alloc(lpm);
+ group_idx = _tbl8_alloc(i_lpm);
}
return group_idx;
}
static int32_t
-tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
+tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
{
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
- struct __rte_lpm *internal_lpm;
int status;
- internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
- if (internal_lpm->v == NULL) {
+ if (i_lpm->v == NULL) {
/* Set tbl8 group invalid*/
- __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
__ATOMIC_RELAXED);
- } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
+ } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
/* Wait for quiescent state change. */
- rte_rcu_qsbr_synchronize(internal_lpm->v,
+ rte_rcu_qsbr_synchronize(i_lpm->v,
RTE_QSBR_THRID_INVALID);
/* Set tbl8 group invalid*/
- __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
__ATOMIC_RELAXED);
- } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
+ } else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
/* Push into QSBR defer queue. */
- status = rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+ status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
(void *)&tbl8_group_start);
if (status == 1) {
RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
}
static __rte_noinline int32_t
-add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
uint32_t next_hop)
{
#define group_idx next_hop
* For invalid OR valid and non-extended tbl 24 entries set
* entry.
*/
- if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
- lpm->tbl24[i].depth <= depth)) {
+ if (!i_lpm->lpm.tbl24[i].valid || (i_lpm->lpm.tbl24[i].valid_group == 0 &&
+ i_lpm->lpm.tbl24[i].depth <= depth)) {
struct rte_lpm_tbl_entry new_tbl24_entry = {
.next_hop = next_hop,
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
__ATOMIC_RELEASE);
continue;
}
- if (lpm->tbl24[i].valid_group == 1) {
+ if (i_lpm->lpm.tbl24[i].valid_group == 1) {
/* If tbl24 entry is valid and extended calculate the
* index into tbl8.
*/
- tbl8_index = lpm->tbl24[i].group_idx *
+ tbl8_index = i_lpm->lpm.tbl24[i].group_idx *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl8_group_end = tbl8_index +
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
for (j = tbl8_index; j < tbl8_group_end; j++) {
- if (!lpm->tbl8[j].valid ||
- lpm->tbl8[j].depth <= depth) {
+ if (!i_lpm->lpm.tbl8[j].valid ||
+ i_lpm->lpm.tbl8[j].depth <= depth) {
struct rte_lpm_tbl_entry
new_tbl8_entry = {
.valid = VALID,
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- __atomic_store(&lpm->tbl8[j],
+ __atomic_store(&i_lpm->lpm.tbl8[j],
&new_tbl8_entry,
__ATOMIC_RELAXED);
}
static __rte_noinline int32_t
-add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
uint32_t next_hop)
{
#define group_idx next_hop
tbl24_index = (ip_masked >> 8);
tbl8_range = depth_to_range(depth);
- if (!lpm->tbl24[tbl24_index].valid) {
+ if (!i_lpm->lpm.tbl24[tbl24_index].valid) {
/* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm);
+ tbl8_group_index = tbl8_alloc(i_lpm);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
.depth = depth,
- .valid_group = lpm->tbl8[i].valid_group,
+ .valid_group = i_lpm->lpm.tbl8[i].valid_group,
.next_hop = next_hop,
};
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
__ATOMIC_RELAXED);
}
/* The tbl24 entry must be written only after the
* tbl8 entries are written.
*/
- __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELEASE);
} /* If valid entry but not extended calculate the index into Table8. */
- else if (lpm->tbl24[tbl24_index].valid_group == 0) {
+ else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm);
+ tbl8_group_index = tbl8_alloc(i_lpm);
if (tbl8_group_index < 0) {
return tbl8_group_index;
for (i = tbl8_group_start; i < tbl8_group_end; i++) {
struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
- .depth = lpm->tbl24[tbl24_index].depth,
- .valid_group = lpm->tbl8[i].valid_group,
- .next_hop = lpm->tbl24[tbl24_index].next_hop,
+ .depth = i_lpm->lpm.tbl24[tbl24_index].depth,
+ .valid_group = i_lpm->lpm.tbl8[i].valid_group,
+ .next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
};
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
__ATOMIC_RELAXED);
}
struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
.depth = depth,
- .valid_group = lpm->tbl8[i].valid_group,
+ .valid_group = i_lpm->lpm.tbl8[i].valid_group,
.next_hop = next_hop,
};
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
__ATOMIC_RELAXED);
}
/* The tbl24 entry must be written only after the
* tbl8 entries are written.
*/
- __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELEASE);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
*/
- tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
+ tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
tbl8_group_start = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- if (!lpm->tbl8[i].valid ||
- lpm->tbl8[i].depth <= depth) {
+ if (!i_lpm->lpm.tbl8[i].valid ||
+ i_lpm->lpm.tbl8[i].depth <= depth) {
struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
.depth = depth,
.next_hop = next_hop,
- .valid_group = lpm->tbl8[i].valid_group,
+ .valid_group = i_lpm->lpm.tbl8[i].valid_group,
};
/*
* Setting tbl8 entry in one go to avoid race
* condition
*/
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
__ATOMIC_RELAXED);
continue;
uint32_t next_hop)
{
int32_t rule_index, status = 0;
+ struct __rte_lpm *i_lpm;
uint32_t ip_masked;
/* Check user arguments. */
if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
return -EINVAL;
+ i_lpm = container_of(lpm, struct __rte_lpm, lpm);
ip_masked = ip & depth_to_mask(depth);
/* Add the rule to the rule table. */
- rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+ rule_index = rule_add(i_lpm, ip_masked, depth, next_hop);
/* Skip table entries update if The rule is the same as
* the rule in the rules table.
}
if (depth <= MAX_DEPTH_TBL24) {
- status = add_depth_small(lpm, ip_masked, depth, next_hop);
+ status = add_depth_small(i_lpm, ip_masked, depth, next_hop);
} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
- status = add_depth_big(lpm, ip_masked, depth, next_hop);
+ status = add_depth_big(i_lpm, ip_masked, depth, next_hop);
/*
* If add fails due to exhaustion of tbl8 extensions delete
* rule that was added to rule table.
*/
if (status < 0) {
- rule_delete(lpm, rule_index, depth);
+ rule_delete(i_lpm, rule_index, depth);
return status;
}
rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
uint32_t *next_hop)
{
+ struct __rte_lpm *i_lpm;
uint32_t ip_masked;
int32_t rule_index;
return -EINVAL;
/* Look for the rule using rule_find. */
+ i_lpm = container_of(lpm, struct __rte_lpm, lpm);
ip_masked = ip & depth_to_mask(depth);
- rule_index = rule_find(lpm, ip_masked, depth);
+ rule_index = rule_find(i_lpm, ip_masked, depth);
if (rule_index >= 0) {
- *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ *next_hop = i_lpm->rules_tbl[rule_index].next_hop;
return 1;
}
}
static int32_t
-find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+find_previous_rule(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
uint8_t *sub_rule_depth)
{
int32_t rule_index;
for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
ip_masked = ip & depth_to_mask(prev_depth);
- rule_index = rule_find(lpm, ip_masked, prev_depth);
+ rule_index = rule_find(i_lpm, ip_masked, prev_depth);
if (rule_index >= 0) {
*sub_rule_depth = prev_depth;
}
static int32_t
-delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
+delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
#define group_idx next_hop
*/
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
- if (lpm->tbl24[i].valid_group == 0 &&
- lpm->tbl24[i].depth <= depth) {
- __atomic_store(&lpm->tbl24[i],
+ if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
+ i_lpm->lpm.tbl24[i].depth <= depth) {
+ __atomic_store(&i_lpm->lpm.tbl24[i],
&zero_tbl24_entry, __ATOMIC_RELEASE);
- } else if (lpm->tbl24[i].valid_group == 1) {
+ } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
* to be a rule with depth >= 25 in the
* associated TBL8 group.
*/
- tbl8_group_index = lpm->tbl24[i].group_idx;
+ tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
tbl8_index = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
for (j = tbl8_index; j < (tbl8_index +
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
- if (lpm->tbl8[j].depth <= depth)
- lpm->tbl8[j].valid = INVALID;
+ if (i_lpm->lpm.tbl8[j].depth <= depth)
+ i_lpm->lpm.tbl8[j].valid = INVALID;
}
}
}
*/
struct rte_lpm_tbl_entry new_tbl24_entry = {
- .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
.valid = VALID,
.valid_group = 0,
.depth = sub_rule_depth,
.valid = VALID,
.valid_group = VALID,
.depth = sub_rule_depth,
- .next_hop = lpm->rules_tbl
+ .next_hop = i_lpm->rules_tbl
[sub_rule_index].next_hop,
};
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
- if (lpm->tbl24[i].valid_group == 0 &&
- lpm->tbl24[i].depth <= depth) {
- __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
+ if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
+ i_lpm->lpm.tbl24[i].depth <= depth) {
+ __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
__ATOMIC_RELEASE);
- } else if (lpm->tbl24[i].valid_group == 1) {
+ } else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
* to be a rule with depth >= 25 in the
* associated TBL8 group.
*/
- tbl8_group_index = lpm->tbl24[i].group_idx;
+ tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
tbl8_index = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
for (j = tbl8_index; j < (tbl8_index +
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
- if (lpm->tbl8[j].depth <= depth)
- __atomic_store(&lpm->tbl8[j],
+ if (i_lpm->lpm.tbl8[j].depth <= depth)
+ __atomic_store(&i_lpm->lpm.tbl8[j],
&new_tbl8_entry,
__ATOMIC_RELAXED);
}
}
static int32_t
-delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
+delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
#define group_idx next_hop
tbl24_index = ip_masked >> 8;
/* Calculate the index into tbl8 and range. */
- tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
+ tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
tbl8_range = depth_to_range(depth);
* rule_to_delete must be removed or modified.
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- if (lpm->tbl8[i].depth <= depth)
- lpm->tbl8[i].valid = INVALID;
+ if (i_lpm->lpm.tbl8[i].depth <= depth)
+ i_lpm->lpm.tbl8[i].valid = INVALID;
}
} else {
/* Set new tbl8 entry. */
struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
.depth = sub_rule_depth,
- .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
- .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ .valid_group = i_lpm->lpm.tbl8[tbl8_group_start].valid_group,
+ .next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
};
/*
* rule_to_delete must be modified.
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- if (lpm->tbl8[i].depth <= depth)
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+ if (i_lpm->lpm.tbl8[i].depth <= depth)
+ __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
__ATOMIC_RELAXED);
}
}
* associated tbl24 entry.
*/
- tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
+ tbl8_recycle_index = tbl8_recycle_check(i_lpm->lpm.tbl8, tbl8_group_start);
if (tbl8_recycle_index == -EINVAL) {
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- lpm->tbl24[tbl24_index].valid = 0;
+ i_lpm->lpm.tbl24[tbl24_index].valid = 0;
__atomic_thread_fence(__ATOMIC_RELEASE);
- status = tbl8_free(lpm, tbl8_group_start);
+ status = tbl8_free(i_lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
struct rte_lpm_tbl_entry new_tbl24_entry = {
- .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
+ .next_hop = i_lpm->lpm.tbl8[tbl8_recycle_index].next_hop,
.valid = VALID,
.valid_group = 0,
- .depth = lpm->tbl8[tbl8_recycle_index].depth,
+ .depth = i_lpm->lpm.tbl8[tbl8_recycle_index].depth,
};
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+ __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
- status = tbl8_free(lpm, tbl8_group_start);
+ status = tbl8_free(i_lpm, tbl8_group_start);
}
#undef group_idx
return status;
rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
{
int32_t rule_to_delete_index, sub_rule_index;
+ struct __rte_lpm *i_lpm;
uint32_t ip_masked;
uint8_t sub_rule_depth;
/*
return -EINVAL;
}
+ i_lpm = container_of(lpm, struct __rte_lpm, lpm);
ip_masked = ip & depth_to_mask(depth);
/*
* Find the index of the input rule, that needs to be deleted, in the
* rule table.
*/
- rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+ rule_to_delete_index = rule_find(i_lpm, ip_masked, depth);
/*
* Check if rule_to_delete_index was found. If no rule was found the
return -EINVAL;
/* Delete the rule from the rule table. */
- rule_delete(lpm, rule_to_delete_index, depth);
+ rule_delete(i_lpm, rule_to_delete_index, depth);
/*
* Find rule to replace the rule_to_delete. If there is no rule to
* entries associated with this rule.
*/
sub_rule_depth = 0;
- sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
+ sub_rule_index = find_previous_rule(i_lpm, ip, depth, &sub_rule_depth);
/*
* If the input depth value is less than 25 use function
* delete_depth_small otherwise use delete_depth_big.
*/
if (depth <= MAX_DEPTH_TBL24) {
- return delete_depth_small(lpm, ip_masked, depth,
+ return delete_depth_small(i_lpm, ip_masked, depth,
sub_rule_index, sub_rule_depth);
} else { /* If depth > MAX_DEPTH_TBL24 */
- return delete_depth_big(lpm, ip_masked, depth, sub_rule_index,
+ return delete_depth_big(i_lpm, ip_masked, depth, sub_rule_index,
sub_rule_depth);
}
}
void
rte_lpm_delete_all(struct rte_lpm *lpm)
{
+ struct __rte_lpm *i_lpm;
+
+ i_lpm = container_of(lpm, struct __rte_lpm, lpm);
/* Zero rule information. */
- memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
+ memset(i_lpm->rule_info, 0, sizeof(i_lpm->rule_info));
/* Zero tbl24. */
- memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+ memset(i_lpm->lpm.tbl24, 0, sizeof(i_lpm->lpm.tbl24));
/* Zero tbl8. */
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
- * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
+ memset(i_lpm->lpm.tbl8, 0, sizeof(i_lpm->lpm.tbl8[0])
+ * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * i_lpm->number_tbl8s);
/* Delete all rules form the rules table. */
- memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
+ memset(i_lpm->rules_tbl, 0, sizeof(i_lpm->rules_tbl[0]) * i_lpm->max_rules);
}