-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
-#include <errno.h>
#include <sys/queue.h>
#include <rte_log.h>
#include <rte_common.h>
#include <rte_memory.h>
#include <rte_malloc.h>
-#include <rte_memzone.h>
#include <rte_memcpy.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
VALID
};
-TAILQ_HEAD(rte_lpm6_list, rte_lpm6);
+TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_lpm6_tailq = {
+ .name = "RTE_LPM6",
+};
+EAL_REGISTER_TAILQ(rte_lpm6_tailq)
/** Tbl entry structure. It is the same for both tbl24 and tbl8 */
struct rte_lpm6_tbl_entry {
uint32_t next_hop: 21; /**< Next hop / next table to be checked. */
uint32_t depth :8; /**< Rule depth. */
-
+
/* Flags. */
uint32_t valid :1; /**< Validation flag. */
uint32_t valid_group :1; /**< Group validation flag. */
/** Rules tbl entry structure. */
struct rte_lpm6_rule {
uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
- uint8_t next_hop; /**< Rule next hop. */
+ uint32_t next_hop; /**< Rule next hop. */
uint8_t depth; /**< Rule depth. */
};
/** LPM6 structure. */
struct rte_lpm6 {
- TAILQ_ENTRY(rte_lpm6) next; /**< Next in list. */
-
/* LPM metadata. */
char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */
uint32_t max_rules; /**< Max number of rules. */
{
int16_t part_depth, mask;
int i;
-
+
part_depth = depth;
for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) {
{
char mem_name[RTE_LPM6_NAMESIZE];
struct rte_lpm6 *lpm = NULL;
+ struct rte_tailq_entry *te;
uint64_t mem_size, rules_size;
struct rte_lpm6_list *lpm_list;
- /* Check that we have an initialised tail queue */
- if ((lpm_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6, rte_lpm6_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t));
return NULL;
}
- rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+ snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
/* Determine the amount of memory to allocate. */
mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
/* Guarantee there's no existing */
- TAILQ_FOREACH(lpm, lpm_list, next) {
+ TAILQ_FOREACH(te, lpm_list, next) {
+ lpm = (struct rte_lpm6 *) te->data;
if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0)
break;
}
- if (lpm != NULL)
+ lpm = NULL;
+ if (te != NULL) {
+ rte_errno = EEXIST;
goto exit;
+ }
+
+ /* allocate tailq entry */
+ te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n");
+ rte_errno = ENOMEM;
+ goto exit;
+ }
/* Allocate memory to store the LPM data structures. */
- lpm = (struct rte_lpm6 *)rte_zmalloc_socket(mem_name, (size_t)mem_size,
- CACHE_LINE_SIZE, socket_id);
-
+ lpm = rte_zmalloc_socket(mem_name, (size_t)mem_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
if (lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
- lpm->rules_tbl = (struct rte_lpm6_rule *)rte_zmalloc_socket(NULL,
- (size_t)rules_size, CACHE_LINE_SIZE, socket_id);
-
+ lpm->rules_tbl = rte_zmalloc_socket(NULL,
+ (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
+
if (lpm->rules_tbl == NULL) {
- RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ RTE_LOG(ERR, LPM, "LPM rules_tbl allocation failed\n");
rte_free(lpm);
+ lpm = NULL;
+ rte_free(te);
+ rte_errno = ENOMEM;
goto exit;
}
/* Save user arguments. */
lpm->max_rules = config->max_rules;
lpm->number_tbl8s = config->number_tbl8s;
- rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+ snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+ te->data = (void *) lpm;
- TAILQ_INSERT_TAIL(lpm_list, lpm, next);
+ TAILQ_INSERT_TAIL(lpm_list, te, next);
-exit:
+exit:
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
return lpm;
struct rte_lpm6 *
rte_lpm6_find_existing(const char *name)
{
- struct rte_lpm6 *l;
+ struct rte_lpm6 *l = NULL;
+ struct rte_tailq_entry *te;
struct rte_lpm6_list *lpm_list;
- /* Check that we have an initialised tail queue */
- if ((lpm_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_LPM6,
- rte_lpm6_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(l, lpm_list, next) {
+ TAILQ_FOREACH(te, lpm_list, next) {
+ l = (struct rte_lpm6 *) te->data;
if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
break;
}
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
- if (l == NULL)
+ if (te == NULL) {
rte_errno = ENOENT;
+ return NULL;
+ }
return l;
}
void
rte_lpm6_free(struct rte_lpm6 *lpm)
{
+ struct rte_lpm6_list *lpm_list;
+ struct rte_tailq_entry *te;
+
/* Check user arguments. */
if (lpm == NULL)
return;
- RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_LPM6, rte_lpm6_list, lpm);
+ lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find our tailq entry */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ if (te->data == (void *) lpm)
+ break;
+ }
+
+ if (te != NULL)
+ TAILQ_REMOVE(lpm_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
rte_free(lpm->rules_tbl);
rte_free(lpm);
+ rte_free(te);
}
/*
* the nexthop if so. Otherwise it adds a new rule if enough space is available.
*/
static inline int32_t
-rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth)
+rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint32_t next_hop, uint8_t depth)
{
uint32_t rule_index;
-
+
/* Scan through rule list to see if rule already exists. */
for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
/* Increment the used rules counter for this rule group. */
lpm->used_rules++;
-
+
return rule_index;
}
*/
static void
expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
- uint8_t next_hop)
+ uint32_t next_hop)
{
uint32_t tbl8_group_end, tbl8_gindex_next, j;
static inline int
add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
- uint8_t first_byte, uint8_t depth, uint8_t next_hop)
+ uint8_t first_byte, uint8_t depth, uint32_t next_hop)
{
uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
int32_t tbl8_gindex;
int8_t bitshift;
uint8_t bits_covered;
-
+
/*
* Calculate index to the table based on the number and position
* of the bytes being inspected in this step.
tbl_index = 0;
for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) {
bitshift = (int8_t)((bytes - i)*BYTE_SIZE);
-
+
if (bitshift < 0) bitshift = 0;
tbl_index = tbl_index | ip[i-1] << bitshift;
}
/* Number of bits covered in this step */
bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE);
-
+
/*
- * If depth if smaller than this number (ie this is the last step)
+ * If depth if smaller than this number (ie this is the last step)
* expand the rule across the relevant positions in the table.
*/
if (depth <= bits_covered) {
tbl[i] = new_tbl_entry;
} else if (tbl[i].ext_entry == 1) {
-
+
/*
* If tbl entry is valid and extended calculate the index
* into next tbl8 and expand the rule across the data structure.
*/
- tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
+ tbl8_gindex = tbl[i].lpm6_tbl8_gindex *
RTE_LPM6_TBL8_GROUP_NUM_ENTRIES;
expand_rule(lpm, tbl8_gindex, depth, next_hop);
}
- }
-
+ }
+
return 0;
}
/*
tbl8_gindex = (lpm->next_tbl8)++;
else
return -ENOSPC;
-
+
struct rte_lpm6_tbl_entry new_tbl_entry = {
.lpm6_tbl8_gindex = tbl8_gindex,
.depth = 0,
tbl[tbl_index] = new_tbl_entry;
}
-
+
*tbl_next = &(lpm->tbl8[tbl[tbl_index].lpm6_tbl8_gindex *
RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]);
}
-
+
return 1;
}
* Add a route
*/
int
-rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
uint8_t next_hop)
+{
+ return rte_lpm6_add_v1705(lpm, ip, depth, next_hop);
+}
+VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);
+
+int
+rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t next_hop)
{
struct rte_lpm6_tbl_entry *tbl;
- struct rte_lpm6_tbl_entry *tbl_next;
+ struct rte_lpm6_tbl_entry *tbl_next = NULL;
int32_t rule_index;
int status;
uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE];
int i;
-
+
/* Check user arguments. */
if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
return -EINVAL;
-
+
/* Copy the IP and mask it to avoid modifying user's input data. */
memcpy(masked_ip, ip, RTE_LPM6_IPV6_ADDR_SIZE);
mask_ip(masked_ip, depth);
depth, next_hop);
if (status < 0) {
rte_lpm6_delete(lpm, masked_ip, depth);
-
+
return status;
}
- /*
+ /*
* Inspect one by one the rest of the bytes until
* the process is completed.
*/
depth, next_hop);
if (status < 0) {
rte_lpm6_delete(lpm, masked_ip, depth);
-
+
return status;
}
}
-
+
return status;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,
+ uint8_t depth, uint32_t next_hop),
+ rte_lpm6_add_v1705);
/*
* Takes a pointer to a table entry and inspect one level.
static inline int
lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
- uint8_t first_byte, uint8_t *next_hop)
+ uint8_t first_byte, uint32_t *next_hop)
{
uint32_t tbl8_index, tbl_entry;
-
+
/* Take the integer value from the pointer. */
tbl_entry = *(const uint32_t *)tbl;
-
+
/* If it is valid and extended we calculate the new pointer to return. */
if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
return 1;
} else {
/* If not extended then we can have a match. */
- *next_hop = (uint8_t)tbl_entry;
+ *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
}
}
* Looks up an IP
*/
int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
+rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
+{
+ uint32_t next_hop32 = 0;
+ int32_t status;
+
+ /* DEBUG: Check user input arguments. */
+ if (next_hop == NULL)
+ return -EINVAL;
+
+ status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);
+ if (status == 0)
+ *next_hop = (uint8_t)next_hop32;
+
+ return status;
+}
+VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);
+
+int
+rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
+ uint32_t *next_hop)
{
const struct rte_lpm6_tbl_entry *tbl;
- const struct rte_lpm6_tbl_entry *tbl_next;
+ const struct rte_lpm6_tbl_entry *tbl_next = NULL;
int status;
uint8_t first_byte;
uint32_t tbl24_index;
-
+
/* DEBUG: Check user input arguments. */
if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) {
return -EINVAL;
}
-
+
first_byte = LOOKUP_FIRST_BYTE;
tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2];
/* Calculate pointer to the first entry to be inspected */
tbl = &lpm->tbl24[tbl24_index];
-
+
do {
- /* Continue inspecting following levels until success or failure */
+ /* Continue inspecting following levels until success or failure */
status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop);
tbl = tbl_next;
} while (status == 1);
-
+
return status;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,
+ uint32_t *next_hop), rte_lpm6_lookup_v1705);
/*
* Looks up a group of IP addresses
*/
int
-rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
int16_t * next_hops, unsigned n)
{
unsigned i;
const struct rte_lpm6_tbl_entry *tbl;
- const struct rte_lpm6_tbl_entry *tbl_next;
- uint32_t tbl24_index;
- uint8_t first_byte, next_hop;
+ const struct rte_lpm6_tbl_entry *tbl_next = NULL;
+ uint32_t tbl24_index, next_hop;
+ uint8_t first_byte;
int status;
/* DEBUG: Check user input arguments. */
/* Calculate pointer to the first entry to be inspected */
tbl = &lpm->tbl24[tbl24_index];
-
+
do {
- /* Continue inspecting following levels until success or failure */
+ /* Continue inspecting following levels until success or failure */
status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++,
&next_hop);
tbl = tbl_next;
} while (status == 1);
-
+
+ if (status < 0)
+ next_hops[i] = -1;
+ else
+ next_hops[i] = (int16_t)next_hop;
+ }
+
+ return 0;
+}
+VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);
+
+int
+rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int32_t *next_hops, unsigned int n)
+{
+ unsigned int i;
+ const struct rte_lpm6_tbl_entry *tbl;
+ const struct rte_lpm6_tbl_entry *tbl_next = NULL;
+ uint32_t tbl24_index, next_hop;
+ uint8_t first_byte;
+ int status;
+
+ /* DEBUG: Check user input arguments. */
+ if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ first_byte = LOOKUP_FIRST_BYTE;
+ tbl24_index = (ips[i][0] << BYTES2_SIZE) |
+ (ips[i][1] << BYTE_SIZE) | ips[i][2];
+
+ /* Calculate pointer to the first entry to be inspected */
+ tbl = &lpm->tbl24[tbl24_index];
+
+ do {
+ /* Continue inspecting following levels
+ * until success or failure
+ */
+ status = lookup_step(lpm, tbl, &tbl_next, ips[i],
+ first_byte++, &next_hop);
+ tbl = tbl_next;
+ } while (status == 1);
+
if (status < 0)
next_hops[i] = -1;
else
- next_hops[i] = next_hop;
+ next_hops[i] = (int32_t)next_hop;
}
-
+
return 0;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int32_t *next_hops, unsigned int n),
+ rte_lpm6_lookup_bulk_func_v1705);
/*
* Finds a rule in rule table.
rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
{
uint32_t rule_index;
-
+
/* Scan used rules at given depth to find rule. */
for (rule_index = 0; rule_index < lpm->used_rules; rule_index++) {
/* If rule is found return the rule index. */
if ((memcmp (lpm->rules_tbl[rule_index].ip, ip,
RTE_LPM6_IPV6_ADDR_SIZE) == 0) &&
lpm->rules_tbl[rule_index].depth == depth) {
-
+
return rule_index;
}
}
return -ENOENT;
}
+/*
+ * Look for a rule in the high-level rules table
+ */
+int
+rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint8_t *next_hop)
+{
+ uint32_t next_hop32 = 0;
+ int32_t status;
+
+ /* DEBUG: Check user input arguments. */
+ if (next_hop == NULL)
+ return -EINVAL;
+
+ status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);
+ if (status > 0)
+ *next_hop = (uint8_t)next_hop32;
+
+ return status;
+
+}
+VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);
+
+int
+rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t *next_hop)
+{
+ uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
+ int32_t rule_index;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) || next_hop == NULL || ip == NULL ||
+ (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Copy the IP and mask it to avoid modifying user's input data. */
+ memcpy(ip_masked, ip, RTE_LPM6_IPV6_ADDR_SIZE);
+ mask_ip(ip_masked, depth);
+
+ /* Look for the rule using rule_find. */
+ rule_index = rule_find(lpm, ip_masked, depth);
+
+ if (rule_index >= 0) {
+ *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ return 1;
+ }
+
+ /* If rule is not found return 0. */
+ return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,
+ uint8_t *ip, uint8_t depth, uint32_t *next_hop),
+ rte_lpm6_is_rule_present_v1705);
+
/*
* Delete a rule from the rule table.
* NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
int32_t rule_to_delete_index;
uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
unsigned i;
-
+
/*
* Check input arguments.
*/
/* Delete the rule from the rule table. */
rule_delete(lpm, rule_to_delete_index);
-
- /*
+
+ /*
* Set all the table entries to 0 (ie delete every rule
* from the data structure.
*/
memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
* RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
-
- /*
+
+ /*
* Add every rule again (except for the one that was removed from
* the rules table).
*/
rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
lpm->rules_tbl[i].next_hop);
}
-
+
return 0;
}
int32_t rule_to_delete_index;
uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
unsigned i;
-
+
/*
* Check input arguments.
*/
if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) {
return -EINVAL;
}
-
+
for (i = 0; i < n; i++) {
/* Copy the IP and mask it to avoid modifying user's input data. */
memcpy(ip_masked, ips[i], RTE_LPM6_IPV6_ADDR_SIZE);
/* Delete the rule from the rule table. */
rule_delete(lpm, rule_to_delete_index);
}
-
- /*
+
+ /*
* Set all the table entries to 0 (ie delete every rule
* from the data structure.
*/
memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
* RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
-
- /*
+
+ /*
* Add every rule again (except for the ones that were removed from
* the rules table).
*/
rte_lpm6_add(lpm, lpm->rules_tbl[i].ip, lpm->rules_tbl[i].depth,
lpm->rules_tbl[i].next_hop);
}
-
+
return 0;
}
{
/* Zero used rules counter. */
lpm->used_rules = 0;
-
+
/* Zero next tbl8 index. */
lpm->next_tbl8 = 0;