- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-TAILQ_HEAD(rte_lpm_list, rte_lpm);
-
+TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_lpm_tailq = {
+ .name = "RTE_LPM",
+};
+EAL_REGISTER_TAILQ(rte_lpm_tailq)
+
- /* check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- TAILQ_FOREACH(l, lpm_list, next) {
+ TAILQ_FOREACH(te, lpm_list, next) {
+ l = (struct rte_lpm *) te->data;
if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
break;
}
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
break;
}
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
- /* check that we have an initialised tail queue */
- if ((lpm_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM, rte_lpm_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
- rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
-
- /*
- * Pad out max_rules so that each depth is given the same number of
- * rules.
- */
- if (max_rules % RTE_LPM_MAX_DEPTH) {
- max_rules += RTE_LPM_MAX_DEPTH -
- (max_rules % RTE_LPM_MAX_DEPTH);
- }
+ snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
/* Determine the amount of memory to allocate. */
mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
/* Determine the amount of memory to allocate. */
mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
- TAILQ_FOREACH(lpm, lpm_list, next) {
+ TAILQ_FOREACH(te, lpm_list, next) {
+ lpm = (struct rte_lpm *) te->data;
+ if (te != NULL)
+ goto exit;
+
+ /* allocate tailq entry */
+ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
/* Allocate memory to store the LPM data structures. */
lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
/* Allocate memory to store the LPM data structures. */
lpm = (struct rte_lpm *)rte_zmalloc_socket(mem_name, mem_size,
- lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH;
- rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+ lpm->max_rules = max_rules;
+ snprintf(lpm->name, sizeof(lpm->name), "%s", name);
- RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM, rte_lpm_list, lpm);
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find our tailq entry */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ if (te->data == (void *) lpm)
+ break;
+ }
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(lpm_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
- /* rule_gindex stands for rule group index. */
- rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
- /* Initialise rule_index to point to start of rule group. */
- rule_index = rule_gindex;
- /* Last rule = Last used rule in this rule group. */
- last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
-
- /* If rule already exists update its next_hop and return. */
- if (lpm->rules_tbl[rule_index].ip == ip_masked) {
- lpm->rules_tbl[rule_index].next_hop = next_hop;
+ /* rule_gindex stands for rule group index. */
+ rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ /* Initialise rule_index to point to start of rule group. */
+ rule_index = rule_gindex;
+ /* Last rule = Last used rule in this rule group. */
+ last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+ for (; rule_index < last_rule; rule_index++) {
+
+ /* If rule already exists update its next_hop and return. */
+ if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+ return rule_index;
+ }
+ }
+
+ if (rule_index == lpm->max_rules)
+ return -ENOSPC;
+ } else {
+ /* Calculate the position in which the rule will be stored. */
+ rule_index = 0;
+
+ for (i = depth - 1; i > 0; i--) {
+ if (lpm->rule_info[i - 1].used_rules > 0) {
+ rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
+ break;
+ }
- /*
- * If rule does not exist check if there is space to add a new rule to
- * this rule group. If there is no space return error. */
- if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) {
- return -ENOSPC;
+ /* Make room for the new rule in the array. */
+ for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
+ if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+ return -ENOSPC;
+
+ if (lpm->rule_info[i - 1].used_rules > 0) {
+ lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
+ = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
+ lpm->rule_info[i - 1].first_rule++;
+ }
lpm->rules_tbl[rule_index].ip = ip_masked;
lpm->rules_tbl[rule_index].next_hop = next_hop;
/* Increment the used rules counter for this rule group. */
lpm->rules_tbl[rule_index].ip = ip_masked;
lpm->rules_tbl[rule_index].next_hop = next_hop;
/* Increment the used rules counter for this rule group. */
- rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
- last_rule_index = rule_gindex +
- (lpm->used_rules_at_depth[depth - 1]) - 1;
- /*
- * Overwrite redundant rule with last rule in group and decrement rule
- * counter.
- */
- lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index];
- lpm->used_rules_at_depth[depth - 1]--;
-}
+ lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+ + lpm->rule_info[depth - 1].used_rules - 1];
+ for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
+ if (lpm->rule_info[i].used_rules > 0) {
+ lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
+ lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
+ lpm->rule_info[i].first_rule--;
+ }
+ }
+
+ lpm->rule_info[depth - 1].used_rules--;
+}
- rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
- last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
+ rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
/* Scan used rules at given depth to find rule. */
for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
/* Scan used rules at given depth to find rule. */
for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+/*
+ * Look for a rule in the high-level rules table
+ */
+int
+rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint8_t *next_hop)
+{
+ uint32_t ip_masked;
+ int32_t rule_index;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) ||
+ (next_hop == NULL) ||
+ (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Look for the rule using rule_find. */
+ ip_masked = ip & depth_to_mask(depth);
+ rule_index = rule_find(lpm, ip_masked, depth);
+
+ if (rule_index >= 0) {
+ *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ return 1;
+ }
+
+ /* If rule is not found return 0. */
+ return 0;
+}
+
-find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
{
uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
{
uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
if (lpm->tbl24[i].ext_entry == 0 &&
lpm->tbl24[i].depth <= depth ) {
lpm->tbl24[i].valid = INVALID;
if (lpm->tbl24[i].ext_entry == 0 &&
lpm->tbl24[i].depth <= depth ) {
lpm->tbl24[i].valid = INVALID;
struct rte_lpm_tbl24_entry new_tbl24_entry = {
{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
.valid = VALID,
.ext_entry = 0,
struct rte_lpm_tbl24_entry new_tbl24_entry = {
{.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
.valid = VALID,
.ext_entry = 0,
.next_hop = lpm->rules_tbl
[sub_rule_index].next_hop,
};
.next_hop = lpm->rules_tbl
[sub_rule_index].next_hop,
};
tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
tbl8_index = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
tbl8_index = tbl8_group_index *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
{
uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
tbl8_range, i;
{
uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
tbl8_range, i;
/*
* Check input arguments. Note: IP must be a positive integer of 32
* bits in length therefore it need not be checked.
/*
* Check input arguments. Note: IP must be a positive integer of 32
* bits in length therefore it need not be checked.
/* Delete the rule from the rule table. */
rule_delete(lpm, rule_to_delete_index, depth);
/* Delete the rule from the rule table. */
rule_delete(lpm, rule_to_delete_index, depth);
* replace the rule_to_delete we return -1 and invalidate the table
* entries associated with this rule.
*/
* replace the rule_to_delete we return -1 and invalidate the table
* entries associated with this rule.
*/
- sub_rule_index = find_previous_rule(lpm, ip, depth);
+ sub_rule_depth = 0;
+ sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
*/
if (depth <= MAX_DEPTH_TBL24) {
return delete_depth_small(lpm, ip_masked, depth,
*/
if (depth <= MAX_DEPTH_TBL24) {
return delete_depth_small(lpm, ip_masked, depth,
- return delete_depth_big(lpm, ip_masked, depth, sub_rule_index);
+ return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
- /* Zero used rules counter. */
- memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth));
+ /* Zero rule information. */
+ memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
/* Delete all rules form the rules table. */
memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
/* Delete all rules form the rules table. */
- memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) *
- (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH));
+ memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);