} \
} while(0)
-typedef int32_t (* rte_lpm_test)(void);
+typedef int32_t (*rte_lpm_test)(void);
static int32_t test0(void);
static int32_t test1(void);
test3(void)
{
struct rte_lpm *lpm = NULL;
- uint32_t ip = IPv4(0, 0, 0, 0);
- uint8_t depth = 24, next_hop = 100;
+ uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
+ uint8_t depth = 24;
int32_t status = 0;
/* rte_lpm_add: lpm == NULL */
{
#if defined(RTE_LIBRTE_LPM_DEBUG)
struct rte_lpm *lpm = NULL;
- uint32_t ip = IPv4(0, 0, 0, 0);
- uint8_t next_hop_return = 0;
+ uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
int32_t status = 0;
/* rte_lpm_lookup: lpm == NULL */
test6(void)
{
struct rte_lpm *lpm = NULL;
- uint32_t ip = IPv4(0, 0, 0, 0);
- uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+ uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
+ uint8_t depth = 24;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
test7(void)
{
__m128i ipx4;
- uint16_t hop[4];
+ uint32_t hop[4];
struct rte_lpm *lpm = NULL;
- uint32_t ip = IPv4(0, 0, 0, 0);
- uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
+ uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
+ uint8_t depth = 32;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
ipx4 = _mm_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
- rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
TEST_LPM_ASSERT(hop[0] == next_hop_add);
- TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
- TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
+ TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
TEST_LPM_ASSERT(hop[3] == next_hop_add);
status = rte_lpm_delete(lpm, ip, depth);
test8(void)
{
__m128i ipx4;
- uint16_t hop[4];
+ uint32_t hop[4];
struct rte_lpm *lpm = NULL;
uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
- uint8_t depth, next_hop_add, next_hop_return;
+ uint32_t next_hop_add, next_hop_return;
+ uint8_t depth;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
(next_hop_return == next_hop_add));
ipx4 = _mm_set_epi32(ip2, ip1, ip2, ip1);
- rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
- TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
+ TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
TEST_LPM_ASSERT(hop[1] == next_hop_add);
- TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
TEST_LPM_ASSERT(hop[3] == next_hop_add);
}
if (depth != 1) {
TEST_LPM_ASSERT((status == 0) &&
(next_hop_return == next_hop_add));
- }
- else {
+ } else {
TEST_LPM_ASSERT(status == -ENOENT);
}
TEST_LPM_ASSERT(status == -ENOENT);
ipx4 = _mm_set_epi32(ip1, ip1, ip2, ip2);
- rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
if (depth != 1) {
TEST_LPM_ASSERT(hop[0] == next_hop_add);
TEST_LPM_ASSERT(hop[1] == next_hop_add);
} else {
- TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
- TEST_LPM_ASSERT(hop[1] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
+ TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
}
- TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
- TEST_LPM_ASSERT(hop[3] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
+ TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
}
rte_lpm_free(lpm);
{
struct rte_lpm *lpm = NULL;
uint32_t ip, ip_1, ip_2;
- uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
- next_hop_add_2, next_hop_return;
+ uint8_t depth, depth_1, depth_2;
+ uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
int32_t status = 0;
/* Add & lookup to hit invalid TBL24 entry */
{
struct rte_lpm *lpm = NULL;
- uint32_t ip;
- uint8_t depth, next_hop_add, next_hop_return;
+ uint32_t ip, next_hop_add, next_hop_return;
+ uint8_t depth;
int32_t status = 0;
/* Add rule that covers a TBL24 range previously invalid & lookup
{
struct rte_lpm *lpm = NULL;
- uint32_t ip;
- uint8_t depth, next_hop_add, next_hop_return;
+ uint32_t ip, next_hop_add, next_hop_return;
+ uint8_t depth;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
test12(void)
{
__m128i ipx4;
- uint16_t hop[4];
+ uint32_t hop[4];
struct rte_lpm *lpm = NULL;
- uint32_t ip, i;
- uint8_t depth, next_hop_add, next_hop_return;
+ uint32_t ip, i, next_hop_add, next_hop_return;
+ uint8_t depth;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
(next_hop_return == next_hop_add));
ipx4 = _mm_set_epi32(ip, ip + 1, ip, ip - 1);
- rte_lpm_lookupx4(lpm, ipx4, hop, UINT16_MAX);
- TEST_LPM_ASSERT(hop[0] == UINT16_MAX);
+ rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
+ TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
TEST_LPM_ASSERT(hop[1] == next_hop_add);
- TEST_LPM_ASSERT(hop[2] == UINT16_MAX);
+ TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
TEST_LPM_ASSERT(hop[3] == next_hop_add);
status = rte_lpm_delete(lpm, ip, depth);
test13(void)
{
struct rte_lpm *lpm = NULL;
- uint32_t ip, i;
- uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
+ uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
+ uint8_t depth;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
* that we have enough storage for all rules at that depth*/
struct rte_lpm *lpm = NULL;
- uint32_t ip;
- uint8_t depth, next_hop_add, next_hop_return;
+ uint32_t ip, next_hop_add, next_hop_return;
+ uint8_t depth;
int32_t status = 0;
/* Add enough space for 256 rules for every depth */
256 * 32, 0);
/* ip loops through all possibilities for top 24 bits of address */
- for (ip = 0; ip < 0xFFFFFF; ip++){
+ for (ip = 0; ip < 0xFFFFFF; ip++) {
/* add an entry within a different tbl8 each time, since
* depth >24 and the top 24 bits are different */
if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
const uint8_t d_ip_10_32 = 32,
d_ip_10_24 = 24,
d_ip_20_25 = 25;
- const uint8_t next_hop_ip_10_32 = 100,
+ const uint32_t next_hop_ip_10_32 = 100,
next_hop_ip_10_24 = 105,
next_hop_ip_20_25 = 111;
- uint8_t next_hop_return = 0;
+ uint32_t next_hop_return = 0;
int32_t status = 0;
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
return -1;
status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
- uint8_t test_hop_10_32 = next_hop_return;
+ uint32_t test_hop_10_32 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
return -1;
status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
- uint8_t test_hop_10_24 = next_hop_return;
+ uint32_t test_hop_10_24 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
return -1;
status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
- uint8_t test_hop_20_25 = next_hop_return;
+ uint32_t test_hop_20_25 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
return -1;
}
- if (test_hop_10_24 == test_hop_20_25){
+ if (test_hop_10_24 == test_hop_20_25) {
printf("Next hop return equal\n");
return -1;
}
printf("--------------------------- \n");
/* Count depths. */
- for(i = 1; i <= 32; i++) {
+ for (i = 1; i <= 32; i++) {
unsigned depth_counter = 0;
double percent_hits;
struct rte_lpm *lpm = NULL;
uint64_t begin, total_time, lpm_used_entries = 0;
unsigned i, j;
- uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ uint32_t next_hop_add = 0xAA, next_hop_return = 0;
int status = 0;
uint64_t cache_line_counter = 0;
int64_t count = 0;
if (lpm->tbl24[i].valid)
lpm_used_entries++;
- if (i % 32 == 0){
+ if (i % 32 == 0) {
if ((uint64_t)count < lpm_used_entries) {
cache_line_counter++;
count = lpm_used_entries;
printf("64 byte Cache entries used = %u (%u bytes)\n",
(unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
- printf("Average LPM Add: %g cycles\n", (double)total_time / NUM_ROUTE_ENTRIES);
+ printf("Average LPM Add: %g cycles\n",
+ (double)total_time / NUM_ROUTE_ENTRIES);
/* Measure single Lookup */
total_time = 0;
count = 0;
- for (i = 0; i < ITERATIONS; i ++) {
+ for (i = 0; i < ITERATIONS; i++) {
static uint32_t ip_batch[BATCH_SIZE];
- for (j = 0; j < BATCH_SIZE; j ++)
+ for (j = 0; j < BATCH_SIZE; j++)
ip_batch[j] = rte_rand();
/* Lookup per batch */
begin = rte_rdtsc();
- for (j = 0; j < BATCH_SIZE; j ++) {
+ for (j = 0; j < BATCH_SIZE; j++) {
if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
count++;
}
/* Measure bulk Lookup */
total_time = 0;
count = 0;
- for (i = 0; i < ITERATIONS; i ++) {
+ for (i = 0; i < ITERATIONS; i++) {
static uint32_t ip_batch[BATCH_SIZE];
- uint16_t next_hops[BULK_SIZE];
+ uint32_t next_hops[BULK_SIZE];
/* Create array of random IP addresses */
- for (j = 0; j < BATCH_SIZE; j ++)
+ for (j = 0; j < BATCH_SIZE; j++)
ip_batch[j] = rte_rand();
/* Lookup per batch */
count = 0;
for (i = 0; i < ITERATIONS; i++) {
static uint32_t ip_batch[BATCH_SIZE];
- uint16_t next_hops[4];
+ uint32_t next_hops[4];
/* Create array of random IP addresses */
for (j = 0; j < BATCH_SIZE; j++)
ipx4 = _mm_loadu_si128((__m128i *)(ip_batch + j));
ipx4 = *(__m128i *)(ip_batch + j);
- rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT16_MAX);
+ rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
for (k = 0; k < RTE_DIM(next_hops); k++)
- if (unlikely(next_hops[k] == UINT16_MAX))
+ if (unlikely(next_hops[k] == UINT32_MAX))
count++;
}
/*
* Find an existing lpm table and return a pointer to it.
*/
+struct rte_lpm_v20 *
+rte_lpm_find_existing_v20(const char *name)
+{
+ struct rte_lpm_v20 *l = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_lpm_list *lpm_list;
+
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_FOREACH(te, lpm_list, next) {
+ l = (struct rte_lpm_v20 *) te->data;
+ if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return l;
+}
+VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0);
+
struct rte_lpm *
-rte_lpm_find_existing(const char *name)
+rte_lpm_find_existing_v1604(const char *name)
{
struct rte_lpm *l = NULL;
struct rte_tailq_entry *te;
return l;
}
+BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04);
+MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name),
+ rte_lpm_find_existing_v1604);
/*
* Allocates memory for LPM object
*/
+struct rte_lpm_v20 *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules,
+ __rte_unused int flags)
+{
+ char mem_name[RTE_LPM_NAMESIZE];
+ struct rte_lpm_v20 *lpm = NULL;
+ struct rte_tailq_entry *te;
+ uint32_t mem_size;
+ struct rte_lpm_list *lpm_list;
+
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2);
+
+ /* Check user arguments. */
+ if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+ /* Determine the amount of memory to allocate. */
+ mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* guarantee there's no existing */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ lpm = (struct rte_lpm_v20 *) te->data;
+ if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
+ break;
+ }
+ if (te != NULL)
+ goto exit;
+
+ /* allocate tailq entry */
+ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
+ goto exit;
+ }
+
+ /* Allocate memory to store the LPM data structures. */
+ lpm = (struct rte_lpm_v20 *)rte_zmalloc_socket(mem_name, mem_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (lpm == NULL) {
+ RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(te);
+ goto exit;
+ }
+
+ /* Save user arguments. */
+ lpm->max_rules = max_rules;
+ snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+
+ te->data = (void *) lpm;
+
+ TAILQ_INSERT_TAIL(lpm_list, te, next);
+
+exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return lpm;
+}
+VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
+
struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules,
+rte_lpm_create_v1604(const char *name, int socket_id, int max_rules,
__rte_unused int flags)
{
char mem_name[RTE_LPM_NAMESIZE];
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 2);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
/* Check user arguments. */
- if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
+ if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
rte_errno = EINVAL;
return NULL;
}
return lpm;
}
+BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
+MAP_STATIC_SYMBOL(
+ struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
+ int max_rules, int flags), rte_lpm_create_v1604);
/*
* Deallocates memory for given LPM table.
*/
void
-rte_lpm_free(struct rte_lpm *lpm)
+rte_lpm_free_v20(struct rte_lpm_v20 *lpm)
+{
+ struct rte_lpm_list *lpm_list;
+ struct rte_tailq_entry *te;
+
+ /* Check user arguments. */
+ if (lpm == NULL)
+ return;
+
+ lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find our tailq entry */
+ TAILQ_FOREACH(te, lpm_list, next) {
+ if (te->data == (void *) lpm)
+ break;
+ }
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(lpm_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(lpm);
+ rte_free(te);
+}
+VERSION_SYMBOL(rte_lpm_free, _v20, 2.0);
+
+void
+rte_lpm_free_v1604(struct rte_lpm *lpm)
{
struct rte_lpm_list *lpm_list;
struct rte_tailq_entry *te;
rte_free(lpm);
rte_free(te);
}
+BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04);
+MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm),
+ rte_lpm_free_v1604);
/*
* Adds a rule to the rule table.
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
static inline int32_t
-rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
uint8_t next_hop)
{
uint32_t rule_gindex, rule_index, last_rule;
for (i = depth - 1; i > 0; i--) {
if (lpm->rule_info[i - 1].used_rules > 0) {
- rule_index = lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules;
+ rule_index = lpm->rule_info[i - 1].first_rule
+ + lpm->rule_info[i - 1].used_rules;
+ break;
+ }
+ }
+ if (rule_index == lpm->max_rules)
+ return -ENOSPC;
+
+ lpm->rule_info[depth - 1].first_rule = rule_index;
+ }
+
+ /* Make room for the new rule in the array. */
+ for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
+ if (lpm->rule_info[i - 1].first_rule
+ + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+ return -ENOSPC;
+
+ if (lpm->rule_info[i - 1].used_rules > 0) {
+ lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
+ + lpm->rule_info[i - 1].used_rules]
+ = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
+ lpm->rule_info[i - 1].first_rule++;
+ }
+ }
+
+ /* Add the new rule. */
+ lpm->rules_tbl[rule_index].ip = ip_masked;
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+ /* Increment the used rules counter for this rule group. */
+ lpm->rule_info[depth - 1].used_rules++;
+
+ return rule_index;
+}
+
+static inline int32_t
+rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+ uint32_t next_hop)
+{
+ uint32_t rule_gindex, rule_index, last_rule;
+ int i;
+
+ VERIFY_DEPTH(depth);
+
+ /* Scan through rule group to see if rule already exists. */
+ if (lpm->rule_info[depth - 1].used_rules > 0) {
+
+ /* rule_gindex stands for rule group index. */
+ rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ /* Initialise rule_index to point to start of rule group. */
+ rule_index = rule_gindex;
+ /* Last rule = Last used rule in this rule group. */
+ last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+ for (; rule_index < last_rule; rule_index++) {
+
+ /* If rule already exists update its next_hop and return. */
+ if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+ lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+ return rule_index;
+ }
+ }
+
+ if (rule_index == lpm->max_rules)
+ return -ENOSPC;
+ } else {
+ /* Calculate the position in which the rule will be stored. */
+ rule_index = 0;
+
+ for (i = depth - 1; i > 0; i--) {
+ if (lpm->rule_info[i - 1].used_rules > 0) {
+ rule_index = lpm->rule_info[i - 1].first_rule
+ + lpm->rule_info[i - 1].used_rules;
break;
}
}
/* Make room for the new rule in the array. */
for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
- if (lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
+ if (lpm->rule_info[i - 1].first_rule
+ + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
return -ENOSPC;
if (lpm->rule_info[i - 1].used_rules > 0) {
- lpm->rules_tbl[lpm->rule_info[i - 1].first_rule + lpm->rule_info[i - 1].used_rules]
+ lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
+ + lpm->rule_info[i - 1].used_rules]
= lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
lpm->rule_info[i - 1].first_rule++;
}
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
static inline void
-rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
+rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth)
+{
+ int i;
+
+ VERIFY_DEPTH(depth);
+
+ lpm->rules_tbl[rule_index] =
+ lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+ + lpm->rule_info[depth - 1].used_rules - 1];
+
+ for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
+ if (lpm->rule_info[i].used_rules > 0) {
+ lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
+ lpm->rules_tbl[lpm->rule_info[i].first_rule
+ + lpm->rule_info[i].used_rules - 1];
+ lpm->rule_info[i].first_rule--;
+ }
+ }
+
+ lpm->rule_info[depth - 1].used_rules--;
+}
+
+static inline void
+rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
{
int i;
VERIFY_DEPTH(depth);
- lpm->rules_tbl[rule_index] = lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+ lpm->rules_tbl[rule_index] =
+ lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
+ lpm->rule_info[depth - 1].used_rules - 1];
for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
if (lpm->rule_info[i].used_rules > 0) {
lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
- lpm->rules_tbl[lpm->rule_info[i].first_rule + lpm->rule_info[i].used_rules - 1];
+ lpm->rules_tbl[lpm->rule_info[i].first_rule
+ + lpm->rule_info[i].used_rules - 1];
lpm->rule_info[i].first_rule--;
}
}
* NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
*/
static inline int32_t
-rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
+rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth)
+{
+ uint32_t rule_gindex, last_rule, rule_index;
+
+ VERIFY_DEPTH(depth);
+
+ rule_gindex = lpm->rule_info[depth - 1].first_rule;
+ last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
+
+ /* Scan used rules at given depth to find rule. */
+ for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+ /* If rule is found return the rule index. */
+ if (lpm->rules_tbl[rule_index].ip == ip_masked)
+ return rule_index;
+ }
+
+ /* If rule is not found return -EINVAL. */
+ return -EINVAL;
+}
+
+static inline int32_t
+rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
{
uint32_t rule_gindex, last_rule, rule_index;
* Find, clean and allocate a tbl8.
*/
static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
+tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
+{
+ uint32_t group_idx; /* tbl8 group index. */
+ struct rte_lpm_tbl_entry_v20 *tbl8_entry;
+
+ /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
+ for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
+ group_idx++) {
+ tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+ /* If a free tbl8 group is found clean it and set as VALID. */
+ if (!tbl8_entry->valid_group) {
+ memset(&tbl8_entry[0], 0,
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
+ sizeof(tbl8_entry[0]));
+
+ tbl8_entry->valid_group = VALID;
+
+ /* Return group index for allocated tbl8 group. */
+ return group_idx;
+ }
+ }
+
+ /* If there are no tbl8 groups free then return error. */
+ return -ENOSPC;
+}
+
+static inline int32_t
+tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
group_idx++) {
- tbl8_entry = &tbl8[group_idx *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+ tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
memset(&tbl8_entry[0], 0,
}
static inline void
-tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
+{
+ /* Set tbl8 group invalid*/
+ tbl8[tbl8_group_start].valid_group = INVALID;
+}
+
+static inline void
+tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
tbl8[tbl8_group_start].valid_group = INVALID;
}
static inline int32_t
-add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t next_hop)
{
uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
lpm->tbl24[i].depth <= depth)) {
- struct rte_lpm_tbl_entry new_tbl24_entry = {
+ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
{ .next_hop = next_hop, },
.valid = VALID,
.valid_group = 0,
for (j = tbl8_index; j < tbl8_group_end; j++) {
if (!lpm->tbl8[j].valid ||
lpm->tbl8[j].depth <= depth) {
- struct rte_lpm_tbl_entry
+ struct rte_lpm_tbl_entry_v20
new_tbl8_entry = {
.valid = VALID,
.valid_group = VALID,
}
static inline int32_t
-add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
- uint8_t next_hop)
+add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+ uint32_t next_hop)
{
- uint32_t tbl24_index;
- int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
- tbl8_range, i;
+#define group_idx next_hop
+ uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
- tbl24_index = (ip_masked >> 8);
- tbl8_range = depth_to_range(depth);
+ /* Calculate the index into Table24. */
+ tbl24_index = ip >> 8;
+ tbl24_range = depth_to_range(depth);
- if (!lpm->tbl24[tbl24_index].valid) {
- /* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8);
+ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+ /*
+ * For invalid OR valid and non-extended tbl 24 entries set
+ * entry.
+ */
+ if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
+ lpm->tbl24[i].depth <= depth)) {
+
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ .next_hop = next_hop,
+ .valid = VALID,
+ .valid_group = 0,
+ .depth = depth,
+ };
+
+ /* Setting tbl24 entry in one go to avoid race
+ * conditions
+ */
+ lpm->tbl24[i] = new_tbl24_entry;
+
+ continue;
+ }
+
+ if (lpm->tbl24[i].valid_group == 1) {
+ /* If tbl24 entry is valid and extended calculate the
+ * index into tbl8.
+ */
+ tbl8_index = lpm->tbl24[i].group_idx *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_group_end = tbl8_index +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ for (j = tbl8_index; j < tbl8_group_end; j++) {
+ if (!lpm->tbl8[j].valid ||
+ lpm->tbl8[j].depth <= depth) {
+ struct rte_lpm_tbl_entry
+ new_tbl8_entry = {
+ .valid = VALID,
+ .valid_group = VALID,
+ .depth = depth,
+ .next_hop = next_hop,
+ };
+
+ /*
+ * Setting tbl8 entry in one go to avoid
+ * race conditions
+ */
+ lpm->tbl8[j] = new_tbl8_entry;
+
+ continue;
+ }
+ }
+ }
+ }
+#undef group_idx
+ return 0;
+}
+
+static inline int32_t
+add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
+ uint8_t next_hop)
+{
+ uint32_t tbl24_index;
+ int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+ tbl8_range, i;
+
+ tbl24_index = (ip_masked >> 8);
+ tbl8_range = depth_to_range(depth);
+
+ if (!lpm->tbl24[tbl24_index].valid) {
+ /* Search for a free tbl8 group. */
+ tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
* so assign whole structure in one go
*/
- struct rte_lpm_tbl_entry new_tbl24_entry = {
+ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
{ .group_idx = (uint8_t)tbl8_group_index, },
.valid = VALID,
.valid_group = 1,
lpm->tbl24[tbl24_index] = new_tbl24_entry;
- }/* If valid entry but not extended calculate the index into Table8. */
+ } /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8);
+ tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
if (tbl8_group_index < 0) {
return tbl8_group_index;
* so assign whole structure in one go.
*/
- struct rte_lpm_tbl_entry new_tbl24_entry = {
+ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
{ .group_idx = (uint8_t)tbl8_group_index, },
.valid = VALID,
.valid_group = 1,
lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ } else { /*
+ * If it is valid, extended entry calculate the index into tbl8.
+ */
+ tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
+ tbl8_group_start = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+
+ if (!lpm->tbl8[i].valid ||
+ lpm->tbl8[i].depth <= depth) {
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = depth,
+ .next_hop = next_hop,
+ .valid_group = lpm->tbl8[i].valid_group,
+ };
+
+ /*
+ * Setting tbl8 entry in one go to avoid race
+ * condition
+ */
+ lpm->tbl8[i] = new_tbl8_entry;
+
+ continue;
+ }
+ }
}
- else { /*
+
+ return 0;
+}
+
+static inline int32_t
+add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+ uint32_t next_hop)
+{
+#define group_idx next_hop
+ uint32_t tbl24_index;
+ int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+ tbl8_range, i;
+
+ tbl24_index = (ip_masked >> 8);
+ tbl8_range = depth_to_range(depth);
+
+ if (!lpm->tbl24[tbl24_index].valid) {
+ /* Search for a free tbl8 group. */
+ tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+
+ /* Check tbl8 allocation was successful. */
+ if (tbl8_group_index < 0) {
+ return tbl8_group_index;
+ }
+
+ /* Find index into tbl8 and range. */
+ tbl8_index = (tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+ (ip_masked & 0xFF);
+
+ /* Set tbl8 entry. */
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+ lpm->tbl8[i].depth = depth;
+ lpm->tbl8[i].next_hop = next_hop;
+ lpm->tbl8[i].valid = VALID;
+ }
+
+ /*
+ * Update tbl24 entry to point to new tbl8 entry. Note: The
+ * ext_flag and tbl8_index need to be updated simultaneously,
+ * so assign whole structure in one go
+ */
+
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ .group_idx = (uint8_t)tbl8_group_index,
+ .valid = VALID,
+ .valid_group = 1,
+ .depth = 0,
+ };
+
+ lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+ } /* If valid entry but not extended calculate the index into Table8. */
+ else if (lpm->tbl24[tbl24_index].valid_group == 0) {
+ /* Search for free tbl8 group. */
+ tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+
+ if (tbl8_group_index < 0) {
+ return tbl8_group_index;
+ }
+
+ tbl8_group_start = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_group_end = tbl8_group_start +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ /* Populate new tbl8 with tbl24 value. */
+ for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+ lpm->tbl8[i].valid = VALID;
+ lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
+ lpm->tbl8[i].next_hop =
+ lpm->tbl24[tbl24_index].next_hop;
+ }
+
+ tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+ /* Insert new rule into the tbl8 entry. */
+ for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
+ if (!lpm->tbl8[i].valid ||
+ lpm->tbl8[i].depth <= depth) {
+ lpm->tbl8[i].valid = VALID;
+ lpm->tbl8[i].depth = depth;
+ lpm->tbl8[i].next_hop = next_hop;
+
+ continue;
+ }
+ }
+
+ /*
+ * Update tbl24 entry to point to new tbl8 entry. Note: The
+ * ext_flag and tbl8_index need to be updated simultaneously,
+ * so assign whole structure in one go.
+ */
+
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ .group_idx = (uint8_t)tbl8_group_index,
+ .valid = VALID,
+ .valid_group = 1,
+ .depth = 0,
+ };
+
+ lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+ } else { /*
* If it is valid, extended entry calculate the index into tbl8.
*/
tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
}
}
}
-
+#undef group_idx
return 0;
}
* Add a route
*/
int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t next_hop)
{
int32_t rule_index, status = 0;
ip_masked = ip & depth_to_mask(depth);
/* Add the rule to the rule table. */
- rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+ rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop);
/* If the is no space available for new rule return error. */
if (rule_index < 0) {
}
if (depth <= MAX_DEPTH_TBL24) {
- status = add_depth_small(lpm, ip_masked, depth, next_hop);
+ status = add_depth_small_v20(lpm, ip_masked, depth, next_hop);
+ } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+ status = add_depth_big_v20(lpm, ip_masked, depth, next_hop);
+
+ /*
+ * If add fails due to exhaustion of tbl8 extensions delete
+ * rule that was added to rule table.
+ */
+ if (status < 0) {
+ rule_delete_v20(lpm, rule_index, depth);
+
+ return status;
+ }
+ }
+
+ return 0;
+}
+VERSION_SYMBOL(rte_lpm_add, _v20, 2.0);
+
+int
+rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+ uint32_t next_hop)
+{
+ int32_t rule_index, status = 0;
+ uint32_t ip_masked;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ return -EINVAL;
+
+ ip_masked = ip & depth_to_mask(depth);
+
+ /* Add the rule to the rule table. */
+ rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop);
+
+ /* If the is no space available for new rule return error. */
+ if (rule_index < 0) {
+ return rule_index;
}
- else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
- status = add_depth_big(lpm, ip_masked, depth, next_hop);
+
+ if (depth <= MAX_DEPTH_TBL24) {
+ status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop);
+ } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+ status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop);
/*
* If add fails due to exhaustion of tbl8 extensions delete
* rule that was added to rule table.
*/
if (status < 0) {
- rule_delete(lpm, rule_index, depth);
+ rule_delete_v1604(lpm, rule_index, depth);
return status;
}
return 0;
}
+BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip,
+ uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604);
+
+/*
+ * Look for a rule in the high-level rules table
+ */
+int
+rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+uint8_t *next_hop)
+{
+ uint32_t ip_masked;
+ int32_t rule_index;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) ||
+ (next_hop == NULL) ||
+ (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Look for the rule using rule_find. */
+ ip_masked = ip & depth_to_mask(depth);
+ rule_index = rule_find_v20(lpm, ip_masked, depth);
+
+ if (rule_index >= 0) {
+ *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ return 1;
+ }
+
+ /* If rule is not found return 0. */
+ return 0;
+}
+VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0);
+
+int
+rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop)
+{
+ uint32_t ip_masked;
+ int32_t rule_index;
+
+ /* Check user arguments. */
+ if ((lpm == NULL) ||
+ (next_hop == NULL) ||
+ (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ return -EINVAL;
+
+ /* Look for the rule using rule_find. */
+ ip_masked = ip & depth_to_mask(depth);
+ rule_index = rule_find_v1604(lpm, ip_masked, depth);
+
+ if (rule_index >= 0) {
+ *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ return 1;
+ }
+
+ /* If rule is not found return 0. */
+ return 0;
+}
+BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip,
+ uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604);
+
+static inline int32_t
+find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+ uint8_t *sub_rule_depth)
+{
+ int32_t rule_index;
+ uint32_t ip_masked;
+ uint8_t prev_depth;
+
+ for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+ ip_masked = ip & depth_to_mask(prev_depth);
+
+ rule_index = rule_find_v20(lpm, ip_masked, prev_depth);
+
+ if (rule_index >= 0) {
+ *sub_rule_depth = prev_depth;
+ return rule_index;
+ }
+ }
+
+ return -1;
+}
+
+static inline int32_t
+find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+ uint8_t *sub_rule_depth)
+{
+ int32_t rule_index;
+ uint32_t ip_masked;
+ uint8_t prev_depth;
+
+ for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+ ip_masked = ip & depth_to_mask(prev_depth);
+
+ rule_index = rule_find_v1604(lpm, ip_masked, prev_depth);
+
+ if (rule_index >= 0) {
+ *sub_rule_depth = prev_depth;
+ return rule_index;
+ }
+ }
+
+ return -1;
+}
+
+static inline int32_t
+delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
+ uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+ uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
+
+ /* Calculate the range and index into Table24. */
+ tbl24_range = depth_to_range(depth);
+ tbl24_index = (ip_masked >> 8);
+
+ /*
+ * Firstly check the sub_rule_index. A -1 indicates no replacement rule
+ * and a positive number indicates a sub_rule_index.
+ */
+ if (sub_rule_index < 0) {
+ /*
+ * If no replacement rule exists then invalidate entries
+ * associated with this rule.
+ */
+ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+ if (lpm->tbl24[i].valid_group == 0 &&
+ lpm->tbl24[i].depth <= depth) {
+ lpm->tbl24[i].valid = INVALID;
+ } else if (lpm->tbl24[i].valid_group == 1) {
+ /*
+ * If TBL24 entry is extended, then there has
+ * to be a rule with depth >= 25 in the
+ * associated TBL8 group.
+ */
+
+ tbl8_group_index = lpm->tbl24[i].group_idx;
+ tbl8_index = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-/*
- * Look for a rule in the high-level rules table
- */
-int
-rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop)
-{
- uint32_t ip_masked;
- int32_t rule_index;
+ for (j = tbl8_index; j < (tbl8_index +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
- /* Check user arguments. */
- if ((lpm == NULL) ||
- (next_hop == NULL) ||
- (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
- return -EINVAL;
+ if (lpm->tbl8[j].depth <= depth)
+ lpm->tbl8[j].valid = INVALID;
+ }
+ }
+ }
+ } else {
+ /*
+ * If a replacement rule exists then modify entries
+ * associated with this rule.
+ */
- /* Look for the rule using rule_find. */
- ip_masked = ip & depth_to_mask(depth);
- rule_index = rule_find(lpm, ip_masked, depth);
+ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+ {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+ .valid = VALID,
+ .valid_group = 0,
+ .depth = sub_rule_depth,
+ };
- if (rule_index >= 0) {
- *next_hop = lpm->rules_tbl[rule_index].next_hop;
- return 1;
- }
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .valid_group = VALID,
+ .depth = sub_rule_depth,
+ .next_hop = lpm->rules_tbl
+ [sub_rule_index].next_hop,
+ };
- /* If rule is not found return 0. */
- return 0;
-}
+ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
-static inline int32_t
-find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth)
-{
- int32_t rule_index;
- uint32_t ip_masked;
- uint8_t prev_depth;
+ if (lpm->tbl24[i].valid_group == 0 &&
+ lpm->tbl24[i].depth <= depth) {
+ lpm->tbl24[i] = new_tbl24_entry;
+ } else if (lpm->tbl24[i].valid_group == 1) {
+ /*
+ * If TBL24 entry is extended, then there has
+ * to be a rule with depth >= 25 in the
+ * associated TBL8 group.
+ */
- for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
- ip_masked = ip & depth_to_mask(prev_depth);
+ tbl8_group_index = lpm->tbl24[i].group_idx;
+ tbl8_index = tbl8_group_index *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- rule_index = rule_find(lpm, ip_masked, prev_depth);
+ for (j = tbl8_index; j < (tbl8_index +
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
- if (rule_index >= 0) {
- *sub_rule_depth = prev_depth;
- return rule_index;
+ if (lpm->tbl8[j].depth <= depth)
+ lpm->tbl8[j] = new_tbl8_entry;
+ }
+ }
}
}
- return -1;
+ return 0;
}
static inline int32_t
-delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
+delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
+#define group_idx next_hop
uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
/* Calculate the range and index into Table24. */
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
if (lpm->tbl24[i].valid_group == 0 &&
- lpm->tbl24[i].depth <= depth ) {
+ lpm->tbl24[i].depth <= depth) {
lpm->tbl24[i].valid = INVALID;
} else if (lpm->tbl24[i].valid_group == 1) {
/*
}
}
}
- }
- else {
+ } else {
/*
* If a replacement rule exists then modify entries
* associated with this rule.
*/
struct rte_lpm_tbl_entry new_tbl24_entry = {
- {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,},
+ .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
.valid = VALID,
.valid_group = 0,
.depth = sub_rule_depth,
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
if (lpm->tbl24[i].valid_group == 0 &&
- lpm->tbl24[i].depth <= depth ) {
+ lpm->tbl24[i].depth <= depth) {
lpm->tbl24[i] = new_tbl24_entry;
} else if (lpm->tbl24[i].valid_group == 1) {
/*
}
}
}
-
+#undef group_idx
return 0;
}
* thus can be recycled
*/
static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8,
+ uint32_t tbl8_group_start)
+{
+ uint32_t tbl8_group_end, i;
+ tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+ /*
+ * Check the first entry of the given tbl8. If it is invalid we know
+ * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
+ * (As they would affect all entries in a tbl8) and thus this table
+ * can not be recycled.
+ */
+ if (tbl8[tbl8_group_start].valid) {
+ /*
+ * If first entry is valid check if the depth is less than 24
+ * and if so check the rest of the entries to verify that they
+ * are all of this depth.
+ */
+ if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
+ for (i = (tbl8_group_start + 1); i < tbl8_group_end;
+ i++) {
+
+ if (tbl8[i].depth !=
+ tbl8[tbl8_group_start].depth) {
+
+ return -EEXIST;
+ }
+ }
+ /* If all entries are the same return the tb8 index */
+ return tbl8_group_start;
+ }
+
+ return -EEXIST;
+ }
+ /*
+ * If the first entry is invalid check if the rest of the entries in
+ * the tbl8 are invalid.
+ */
+ for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
+ if (tbl8[i].valid)
+ return -EEXIST;
+ }
+ /* If no valid entries are found then return -EINVAL. */
+ return -EINVAL;
+}
+
+static inline int32_t
+tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8,
+ uint32_t tbl8_group_start)
{
uint32_t tbl8_group_end, i;
tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
}
static inline int32_t
-delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
+delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
{
uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
if (lpm->tbl8[i].depth <= depth)
lpm->tbl8[i].valid = INVALID;
}
+ } else {
+ /* Set new tbl8 entry. */
+ struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+ .valid = VALID,
+ .depth = sub_rule_depth,
+ .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
+ .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ };
+
+ /*
+ * Loop through the range of entries on tbl8 for which the
+ * rule_to_delete must be modified.
+ */
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+ if (lpm->tbl8[i].depth <= depth)
+ lpm->tbl8[i] = new_tbl8_entry;
+ }
+ }
+
+ /*
+ * Check if there are any valid entries in this tbl8 group. If all
+ * tbl8 entries are invalid we can free the tbl8 and invalidate the
+ * associated tbl24 entry.
+ */
+
+ tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start);
+
+ if (tbl8_recycle_index == -EINVAL) {
+ /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ lpm->tbl24[tbl24_index].valid = 0;
+ tbl8_free_v20(lpm->tbl8, tbl8_group_start);
+ } else if (tbl8_recycle_index > -1) {
+ /* Update tbl24 entry. */
+ struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
+ { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+ .valid = VALID,
+ .valid_group = 0,
+ .depth = lpm->tbl8[tbl8_recycle_index].depth,
+ };
+
+ /* Set tbl24 before freeing tbl8 to avoid race condition. */
+ lpm->tbl24[tbl24_index] = new_tbl24_entry;
+ tbl8_free_v20(lpm->tbl8, tbl8_group_start);
}
- else {
+
+ return 0;
+}
+
+static inline int32_t
+delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
+ uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
+{
+#define group_idx next_hop
+ uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
+ tbl8_range, i;
+ int32_t tbl8_recycle_index;
+
+ /*
+ * Calculate the index into tbl24 and range. Note: All depths larger
+ * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
+ */
+ tbl24_index = ip_masked >> 8;
+
+ /* Calculate the index into tbl8 and range. */
+ tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
+ tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+ tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+ tbl8_range = depth_to_range(depth);
+
+ if (sub_rule_index < 0) {
+ /*
+ * Loop through the range of entries on tbl8 for which the
+ * rule_to_delete must be removed or modified.
+ */
+ for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+ if (lpm->tbl8[i].depth <= depth)
+ lpm->tbl8[i].valid = INVALID;
+ }
+ } else {
/* Set new tbl8 entry. */
struct rte_lpm_tbl_entry new_tbl8_entry = {
.valid = VALID,
* associated tbl24 entry.
*/
- tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
+ tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start);
- if (tbl8_recycle_index == -EINVAL){
+ if (tbl8_recycle_index == -EINVAL) {
/* Set tbl24 before freeing tbl8 to avoid race condition. */
lpm->tbl24[tbl24_index].valid = 0;
- tbl8_free(lpm->tbl8, tbl8_group_start);
- }
- else if (tbl8_recycle_index > -1) {
+ tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
+ } else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
struct rte_lpm_tbl_entry new_tbl24_entry = {
- { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, },
+ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
.valid = VALID,
.valid_group = 0,
.depth = lpm->tbl8[tbl8_recycle_index].depth,
/* Set tbl24 before freeing tbl8 to avoid race condition. */
lpm->tbl24[tbl24_index] = new_tbl24_entry;
- tbl8_free(lpm->tbl8, tbl8_group_start);
+ tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
}
-
+#undef group_idx
return 0;
}
* Deletes a rule
*/
int
-rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth)
{
int32_t rule_to_delete_index, sub_rule_index;
uint32_t ip_masked;
* Find the index of the input rule, that needs to be deleted, in the
* rule table.
*/
- rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+ rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth);
/*
* Check if rule_to_delete_index was found. If no rule was found the
return -EINVAL;
/* Delete the rule from the rule table. */
- rule_delete(lpm, rule_to_delete_index, depth);
+ rule_delete_v20(lpm, rule_to_delete_index, depth);
/*
* Find rule to replace the rule_to_delete. If there is no rule to
* entries associated with this rule.
*/
sub_rule_depth = 0;
- sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
+ sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth);
/*
* If the input depth value is less than 25 use function
* delete_depth_small otherwise use delete_depth_big.
*/
if (depth <= MAX_DEPTH_TBL24) {
- return delete_depth_small(lpm, ip_masked, depth,
+ return delete_depth_small_v20(lpm, ip_masked, depth,
sub_rule_index, sub_rule_depth);
+ } else { /* If depth > MAX_DEPTH_TBL24 */
+ return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index,
+ sub_rule_depth);
+ }
+}
+VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0);
+
+int
+rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+{
+ int32_t rule_to_delete_index, sub_rule_index;
+ uint32_t ip_masked;
+ uint8_t sub_rule_depth;
+ /*
+ * Check input arguments. Note: IP must be a positive integer of 32
+ * bits in length therefore it need not be checked.
+ */
+ if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
+ return -EINVAL;
}
- else { /* If depth > MAX_DEPTH_TBL24 */
- return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth);
+
+ ip_masked = ip & depth_to_mask(depth);
+
+ /*
+ * Find the index of the input rule, that needs to be deleted, in the
+ * rule table.
+ */
+ rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth);
+
+ /*
+ * Check if rule_to_delete_index was found. If no rule was found the
+ * function rule_find returns -EINVAL.
+ */
+ if (rule_to_delete_index < 0)
+ return -EINVAL;
+
+ /* Delete the rule from the rule table. */
+ rule_delete_v1604(lpm, rule_to_delete_index, depth);
+
+ /*
+ * Find rule to replace the rule_to_delete. If there is no rule to
+ * replace the rule_to_delete we return -1 and invalidate the table
+ * entries associated with this rule.
+ */
+ sub_rule_depth = 0;
+ sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth);
+
+ /*
+ * If the input depth value is less than 25 use function
+ * delete_depth_small otherwise use delete_depth_big.
+ */
+ if (depth <= MAX_DEPTH_TBL24) {
+ return delete_depth_small_v1604(lpm, ip_masked, depth,
+ sub_rule_index, sub_rule_depth);
+ } else { /* If depth > MAX_DEPTH_TBL24 */
+ return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index,
+ sub_rule_depth);
}
}
+BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04);
+MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip,
+ uint8_t depth), rte_lpm_delete_v1604);
/*
* Delete all rules from the LPM table.
*/
void
-rte_lpm_delete_all(struct rte_lpm *lpm)
+rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm)
+{
+ /* Zero rule information. */
+ memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
+
+ /* Zero tbl24. */
+ memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+ /* Zero tbl8. */
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+
+ /* Delete all rules form the rules table. */
+ memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
+}
+VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0);
+
+void
+rte_lpm_delete_all_v1604(struct rte_lpm *lpm)
{
/* Zero rule information. */
memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
/* Delete all rules form the rules table. */
memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
}
+BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04);
+MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm),
+ rte_lpm_delete_all_v1604);
#include <rte_memory.h>
#include <rte_common.h>
#include <rte_vect.h>
+#include <rte_compat.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @internal bitmask with valid and valid_group fields set */
-#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03000000
/** Bitmask used to indicate successful lookup */
-#define RTE_LPM_LOOKUP_SUCCESS 0x0100
+#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** @internal Tbl24 entry structure. */
-struct rte_lpm_tbl_entry {
+struct rte_lpm_tbl_entry_v20 {
/**
* Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
* a group index pointing to a tbl8 structure (tbl24 only, when
uint8_t depth :6; /**< Rule depth. */
};
-#else
struct rte_lpm_tbl_entry {
+ /**
+ * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
+ * a group index pointing to a tbl8 structure (tbl24 only, when
+ * valid_group is set)
+ */
+ uint32_t next_hop :24;
+ /* Using single uint8_t to store 3 values. */
+ uint32_t valid :1; /**< Validation flag. */
+ /**
+ * For tbl24:
+ * - valid_group == 0: entry stores a next hop
+ * - valid_group == 1: entry stores a group_index pointing to a tbl8
+ * For tbl8:
+ * - valid_group indicates whether the current tbl8 is in use or not
+ */
+ uint32_t valid_group :1;
+ uint32_t depth :6; /**< Rule depth. */
+};
+
+#else
+struct rte_lpm_tbl_entry_v20 {
uint8_t depth :6;
uint8_t valid_group :1;
uint8_t valid :1;
};
};
+struct rte_lpm_tbl_entry {
+ uint32_t depth :6;
+ uint32_t valid_group :1;
+ uint32_t valid :1;
+ uint32_t next_hop :24;
+
+};
+
#endif
/** @internal Rule structure. */
-struct rte_lpm_rule {
+struct rte_lpm_rule_v20 {
uint32_t ip; /**< Rule IP address. */
uint8_t next_hop; /**< Rule next hop. */
};
+struct rte_lpm_rule {
+ uint32_t ip; /**< Rule IP address. */
+ uint32_t next_hop; /**< Rule next hop. */
+};
+
/** @internal Contains metadata about the rules table. */
struct rte_lpm_rule_info {
uint32_t used_rules; /**< Used rules so far. */
};
/** @internal LPM structure. */
+struct rte_lpm_v20 {
+ /* LPM metadata. */
+ char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
+ uint32_t max_rules; /**< Max. balanced rules per lpm. */
+ struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
+
+ /* LPM Tables. */
+ struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
+ __rte_cache_aligned; /**< LPM tbl24 table. */
+ struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
+ __rte_cache_aligned; /**< LPM tbl8 table. */
+ struct rte_lpm_rule_v20 rules_tbl[0] \
+ __rte_cache_aligned; /**< LPM rules. */
+};
+
struct rte_lpm {
/* LPM metadata. */
char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
*/
struct rte_lpm *
rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+struct rte_lpm_v20 *
+rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags);
+struct rte_lpm *
+rte_lpm_create_v1604(const char *name, int socket_id, int max_rules, int flags);
/**
* Find an existing LPM object and return a pointer to it.
*/
struct rte_lpm *
rte_lpm_find_existing(const char *name);
+struct rte_lpm_v20 *
+rte_lpm_find_existing_v20(const char *name);
+struct rte_lpm *
+rte_lpm_find_existing_v1604(const char *name);
/**
* Free an LPM object.
*/
void
rte_lpm_free(struct rte_lpm *lpm);
+void
+rte_lpm_free_v20(struct rte_lpm_v20 *lpm);
+void
+rte_lpm_free_v1604(struct rte_lpm *lpm);
/**
* Add a rule to the LPM table.
* 0 on success, negative value otherwise
*/
int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop);
+int
+rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
+ uint8_t next_hop);
+int
+rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+ uint32_t next_hop);
/**
* Check if a rule is present in the LPM table,
*/
int
rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop);
+int
+rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
uint8_t *next_hop);
+int
+rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+uint32_t *next_hop);
/**
* Delete a rule from the LPM table.
*/
int
rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth);
+int
+rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
/**
* Delete all rules from the LPM table.
*/
void
rte_lpm_delete_all(struct rte_lpm *lpm);
+void
+rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm);
+void
+rte_lpm_delete_all_v1604(struct rte_lpm *lpm);
/**
* Lookup an IP into the LPM table.
* -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
*/
static inline int
-rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint32_t *next_hop)
{
unsigned tbl24_index = (ip >> 8);
- uint16_t tbl_entry;
+ uint32_t tbl_entry;
+ const uint32_t *ptbl;
/* DEBUG: Check user input arguments. */
RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
/* Copy tbl24 entry */
- tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
+ ptbl = (const uint32_t *)(&lpm->tbl24[tbl24_index]);
+ tbl_entry = *ptbl;
/* Copy tbl8 entry (only if needed) */
if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
unsigned tbl8_index = (uint8_t)ip +
- ((uint8_t)tbl_entry * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+ (((uint32_t)tbl_entry & 0x00FFFFFF) *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
- tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+ ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
+ tbl_entry = *ptbl;
}
- *next_hop = (uint8_t)tbl_entry;
+ *next_hop = ((uint32_t)tbl_entry & 0x00FFFFFF);
return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
}
rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
static inline int
-rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
- uint16_t * next_hops, const unsigned n)
+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
+ uint32_t *next_hops, const unsigned n)
{
unsigned i;
unsigned tbl24_indexes[n];
+ const uint32_t *ptbl;
/* DEBUG: Check user input arguments. */
RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
for (i = 0; i < n; i++) {
/* Simply copy tbl24 entry to output */
- next_hops[i] = *(const uint16_t *)&lpm->tbl24[tbl24_indexes[i]];
+ ptbl = (const uint32_t *)&lpm->tbl24[tbl24_indexes[i]];
+ next_hops[i] = *ptbl;
/* Overwrite output with tbl8 entry if needed */
if (unlikely((next_hops[i] & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
unsigned tbl8_index = (uint8_t)ips[i] +
- ((uint8_t)next_hops[i] *
+ (((uint32_t)next_hops[i] & 0x00FFFFFF) *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
- next_hops[i] = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+ ptbl = (const uint32_t *)&lpm->tbl8[tbl8_index];
+ next_hops[i] = *ptbl;
}
}
return 0;
}
/* Mask four results. */
-#define RTE_LPM_MASKX4_RES UINT64_C(0x00ff00ff00ff00ff)
+#define RTE_LPM_MASKX4_RES UINT64_C(0x00ffffff00ffffff)
/**
* Lookup four IP addresses in an LPM table.
* if lookup would fail.
*/
static inline void
-rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint16_t hop[4],
- uint16_t defv)
+rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip, uint32_t hop[4],
+ uint32_t defv)
{
__m128i i24;
rte_xmm_t i8;
- uint16_t tbl[4];
- uint64_t idx, pt;
+ uint32_t tbl[4];
+ uint64_t idx, pt, pt2;
+ const uint32_t *ptbl;
const __m128i mask8 =
_mm_set_epi32(UINT8_MAX, UINT8_MAX, UINT8_MAX, UINT8_MAX);
/*
- * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 4 LPM entries
- * as one 64-bit value (0x0300030003000300).
+ * RTE_LPM_VALID_EXT_ENTRY_BITMASK for 2 LPM entries
+ * as one 64-bit value (0x0300000003000000).
*/
const uint64_t mask_xv =
((uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK |
- (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 16 |
- (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32 |
- (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 48);
+ (uint64_t)RTE_LPM_VALID_EXT_ENTRY_BITMASK << 32);
/*
- * RTE_LPM_LOOKUP_SUCCESS for 4 LPM entries
- * as one 64-bit value (0x0100010001000100).
+ * RTE_LPM_LOOKUP_SUCCESS for 2 LPM entries
+ * as one 64-bit value (0x0100000001000000).
*/
const uint64_t mask_v =
((uint64_t)RTE_LPM_LOOKUP_SUCCESS |
- (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 16 |
- (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32 |
- (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 48);
+ (uint64_t)RTE_LPM_LOOKUP_SUCCESS << 32);
/* get 4 indexes for tbl24[]. */
i24 = _mm_srli_epi32(ip, CHAR_BIT);
idx = _mm_cvtsi128_si64(i24);
i24 = _mm_srli_si128(i24, sizeof(uint64_t));
- tbl[0] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
- tbl[1] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+ ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
+ tbl[0] = *ptbl;
+ ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
+ tbl[1] = *ptbl;
idx = _mm_cvtsi128_si64(i24);
- tbl[2] = *(const uint16_t *)&lpm->tbl24[(uint32_t)idx];
- tbl[3] = *(const uint16_t *)&lpm->tbl24[idx >> 32];
+ ptbl = (const uint32_t *)&lpm->tbl24[(uint32_t)idx];
+ tbl[2] = *ptbl;
+ ptbl = (const uint32_t *)&lpm->tbl24[idx >> 32];
+ tbl[3] = *ptbl;
/* get 4 indexes for tbl8[]. */
i8.x = _mm_and_si128(ip, mask8);
pt = (uint64_t)tbl[0] |
- (uint64_t)tbl[1] << 16 |
- (uint64_t)tbl[2] << 32 |
- (uint64_t)tbl[3] << 48;
+ (uint64_t)tbl[1] << 32;
+ pt2 = (uint64_t)tbl[2] |
+ (uint64_t)tbl[3] << 32;
/* search successfully finished for all 4 IP addresses. */
- if (likely((pt & mask_xv) == mask_v)) {
- uintptr_t ph = (uintptr_t)hop;
- *(uint64_t *)ph = pt & RTE_LPM_MASKX4_RES;
+ if (likely((pt & mask_xv) == mask_v) &&
+ likely((pt2 & mask_xv) == mask_v)) {
+ *(uint64_t *)hop = pt & RTE_LPM_MASKX4_RES;
+ *(uint64_t *)(hop + 2) = pt2 & RTE_LPM_MASKX4_RES;
return;
}
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
i8.u32[0] = i8.u32[0] +
(uint8_t)tbl[0] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[0] = *(const uint16_t *)&lpm->tbl8[i8.u32[0]];
+ ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[0]];
+ tbl[0] = *ptbl;
}
- if (unlikely((pt >> 16 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
i8.u32[1] = i8.u32[1] +
(uint8_t)tbl[1] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[1] = *(const uint16_t *)&lpm->tbl8[i8.u32[1]];
+ ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[1]];
+ tbl[1] = *ptbl;
}
- if (unlikely((pt >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ if (unlikely((pt2 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
i8.u32[2] = i8.u32[2] +
(uint8_t)tbl[2] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[2] = *(const uint16_t *)&lpm->tbl8[i8.u32[2]];
+ ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[2]];
+ tbl[2] = *ptbl;
}
- if (unlikely((pt >> 48 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
+ if (unlikely((pt2 >> 32 & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
i8.u32[3] = i8.u32[3] +
(uint8_t)tbl[3] * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl[3] = *(const uint16_t *)&lpm->tbl8[i8.u32[3]];
+ ptbl = (const uint32_t *)&lpm->tbl8[i8.u32[3]];
+ tbl[3] = *ptbl;
}
- hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[0] : defv;
- hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[1] : defv;
- hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[2] : defv;
- hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? (uint8_t)tbl[3] : defv;
+ hop[0] = (tbl[0] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[0] & 0x00FFFFFF : defv;
+ hop[1] = (tbl[1] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[1] & 0x00FFFFFF : defv;
+ hop[2] = (tbl[2] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[2] & 0x00FFFFFF : defv;
+ hop[3] = (tbl[3] & RTE_LPM_LOOKUP_SUCCESS) ? tbl[3] & 0x00FFFFFF : defv;
}
#ifdef __cplusplus