{
unsigned lcore_self = rte_lcore_id();
struct rte_lpm *lpm;
+ struct rte_lpm_config config;
+
+ config.max_rules = 4;
+ config.number_tbl8s = 256;
+ config.flags = 0;
char lpm_name[MAX_STRING_SIZE];
int i;
/* create the same lpm simultaneously on all threads */
for (i = 0; i < MAX_ITER_TIMES; i++) {
- lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, 4, 0);
+ lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
if ((NULL == lpm) && (rte_lpm_find_existing("fr_test_once") == NULL))
return -1;
}
/* create mutiple fbk tables simultaneously */
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
- lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, 4, 0);
+ lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
if (NULL == lpm)
return -1;
#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
#define MAX_DEPTH 32
#define MAX_RULES 256
+#define NUMBER_TBL8S 256
#define PASS 0
/*
test0(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
/* rte_lpm_create: lpm name == NULL */
- lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm == NULL);
/* rte_lpm_create: max_rules = 0 */
/* Note: __func__ inserts the function name, in this case "test0". */
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, 0);
+ config.max_rules = 0;
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm == NULL);
/* socket_id < -1 is invalid */
- lpm = rte_lpm_create(__func__, -2, MAX_RULES, 0);
+ config.max_rules = MAX_RULES;
+ lpm = rte_lpm_create(__func__, -2, &config);
TEST_LPM_ASSERT(lpm == NULL);
return PASS;
test1(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
int32_t i;
/* rte_lpm_free: Free NULL */
for (i = 0; i < 100; i++) {
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, 0);
+ config.max_rules = MAX_RULES - i;
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
rte_lpm_free(lpm);
test2(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
rte_lpm_free(lpm);
test3(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
uint8_t depth = 24;
int32_t status = 0;
TEST_LPM_ASSERT(status < 0);
/*Create vaild lpm to use in rest of test. */
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/* rte_lpm_add: depth < 1 */
test4(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip = IPv4(0, 0, 0, 0);
uint8_t depth = 24;
int32_t status = 0;
TEST_LPM_ASSERT(status < 0);
/*Create vaild lpm to use in rest of test. */
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/* rte_lpm_delete: depth < 1 */
{
#if defined(RTE_LIBRTE_LPM_DEBUG)
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
int32_t status = 0;
TEST_LPM_ASSERT(status < 0);
/*Create vaild lpm to use in rest of test. */
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/* rte_lpm_lookup: depth < 1 */
test6(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
uint8_t depth = 24;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm_add(lpm, ip, depth, next_hop_add);
__m128i ipx4;
uint32_t hop[4];
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
uint8_t depth = 32;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm_add(lpm, ip, depth, next_hop_add);
__m128i ipx4;
uint32_t hop[4];
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
uint32_t next_hop_add, next_hop_return;
uint8_t depth;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/* Loop with rte_lpm_add. */
test9(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip, ip_1, ip_2;
uint8_t depth, depth_1, depth_2;
uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
depth = 24;
next_hop_add = 100;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm_add(lpm, ip, depth, next_hop_add);
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip, next_hop_add, next_hop_return;
uint8_t depth;
int32_t status = 0;
/* Add rule that covers a TBL24 range previously invalid & lookup
* (& delete & lookup) */
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
ip = IPv4(128, 0, 0, 0);
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip, next_hop_add, next_hop_return;
uint8_t depth;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
ip = IPv4(128, 0, 0, 0);
__m128i ipx4;
uint32_t hop[4];
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip, i, next_hop_add, next_hop_return;
uint8_t depth;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
ip = IPv4(128, 0, 0, 0);
test13(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
uint8_t depth;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
ip = IPv4(128, 0, 0, 0);
* that we have enough storage for all rules at that depth*/
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = 256 * 32;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint32_t ip, next_hop_add, next_hop_return;
uint8_t depth;
int32_t status = 0;
/* Add enough space for 256 rules for every depth */
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
depth = 32;
test15(void)
{
struct rte_lpm *lpm = NULL, *result = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = 256 * 32;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
/* Create lpm */
- lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, 0);
+ lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/* Try to find existing lpm */
test16(void)
{
uint32_t ip;
- struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
- 256 * 32, 0);
+ struct rte_lpm_config config;
+
+ config.max_rules = 256 * 32;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+ struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
/* ip loops through all possibilities for top 24 bits of address */
for (ip = 0; ip < 0xFFFFFF; ip++) {
break;
}
- if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
+ if (ip != NUMBER_TBL8S) {
printf("Error, unexpected failure with filling tbl8 groups\n");
printf("Failed after %u additions, expected after %u\n",
- (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
+ (unsigned)ip, (unsigned)NUMBER_TBL8S);
}
rte_lpm_free(lpm);
test17(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
uint32_t next_hop_return = 0;
int32_t status = 0;
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
perf_test(void)
{
struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+
+ config.max_rules = 1000000;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
uint64_t begin, total_time, lpm_used_entries = 0;
unsigned i, j;
uint32_t next_hop_add = 0xAA, next_hop_return = 0;
print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
- lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000, 0);
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/* Measue add. */
#ifdef RTE_LIBRTE_LPM
rte_errno=0;
- if ((rte_lpm_create("test_lpm", size, rte_socket_id(), 0) != NULL) &&
+ struct rte_lpm_config config;
+
+ config.max_rules = rte_socket_id();
+ config.number_tbl8s = 256;
+ config.flags = 0;
+ if ((rte_lpm_create("test_lpm", size, &config) != NULL) &&
(rte_lpm_find_existing("test_lpm") == NULL)){
printf("Error: unexpected return value from rte_lpm_create()\n");
return -1;
struct rte_table_lpm_params lpm_params = {
.name = "LPM",
.n_rules = 1 << 16,
+ .number_tbl8s = 1 << 8,
+ .flags = 0,
.entry_unique_size = 8,
.offset = APP_METADATA_OFFSET(0),
};
struct rte_table_lpm_params lpm_params = {
.name = "LPM",
.n_rules = 1 << 24,
+ .number_tbl8s = 1 << 8,
+ .flags = 0,
.entry_unique_size = entry_size,
.offset = APP_METADATA_OFFSET(1)
};
* The LPM ``next_hop`` field is extended from 8 bits to 24 bits for IPv4
while keeping ABI compatibility.
+* A new ``rte_lpm_config`` structure is used so LPM library will allocate
+ exactly the amount of memory which is necessary to hold application’s rules.
+ The previous ABI is kept for compatibility.
+
ABI Changes
-----------
struct rte_mempool *mp;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
+ struct rte_lpm_config lpm_config;
int socket;
unsigned lcore_id;
RTE_LOG(INFO, IP_FRAG, "Creating LPM table on socket %i\n", socket);
snprintf(buf, sizeof(buf), "IP_FRAG_LPM_%i", socket);
- lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
+ lpm_config.max_rules = LPM_MAX_RULES;
+ lpm_config.number_tbl8s = 256;
+ lpm_config.flags = 0;
+
+ lpm = rte_lpm_create(buf, socket, &lpm_config);
if (lpm == NULL) {
RTE_LOG(ERR, IP_FRAG, "Cannot create LPM table\n");
return -1;
char buf[PATH_MAX];
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
+ struct rte_lpm_config lpm_config;
int socket;
unsigned lcore_id;
RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
- lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
+ lpm_config.max_rules = LPM_MAX_RULES;
+ lpm_config.number_tbl8s = 256;
+ lpm_config.flags = 0;
+
+ lpm = rte_lpm_create(buf, socket, &lpm_config);
if (lpm == NULL) {
RTE_LOG(ERR, IP_RSMBL, "Cannot create LPM table\n");
return -1;
char s[64];
/* create the LPM table */
+ struct rte_lpm_config lpm_ipv4_config;
+
+ lpm_ipv4_config.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
+ lpm_ipv4_config.number_tbl8s = 256;
+ lpm_ipv4_config.flags = 0;
+
snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
- ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
- IPV4_L3FWD_LPM_MAX_RULES, 0);
+ ipv4_l3fwd_lookup_struct[socketid] =
+ rte_lpm_create(s, socketid, &lpm_ipv4_config);
if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
" on socket %d\n", socketid);
int ret;
char s[64];
+ struct rte_lpm_config lpm_ipv4_config;
+
+ lpm_ipv4_config.max_rules = L3FWD_LPM_MAX_RULES;
+ lpm_ipv4_config.number_tbl8s = 256;
+ lpm_ipv4_config.flags = 0;
+
/* create the LPM table */
snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid);
- l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
- L3FWD_LPM_MAX_RULES, 0);
+ l3fwd_lookup_struct[socketid] =
+ rte_lpm_create(s, socketid, &lpm_ipv4_config);
if (l3fwd_lookup_struct[socketid] == NULL)
rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
" on socket %d\n", socketid);
(sizeof(ipv6_l3fwd_lpm_route_array) / sizeof(ipv6_l3fwd_lpm_route_array[0]))
#define IPV4_L3FWD_LPM_MAX_RULES 1024
+#define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
#define IPV6_L3FWD_LPM_MAX_RULES 1024
#define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
setup_lpm(const int socketid)
{
struct rte_lpm6_config config;
+ struct rte_lpm_config config_ipv4;
unsigned i;
int ret;
char s[64];
/* create the LPM table */
+ config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
+ config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
+ config_ipv4.flags = 0;
snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
- ipv4_l3fwd_lpm_lookup_struct[socketid] = rte_lpm_create(s, socketid,
- IPV4_L3FWD_LPM_MAX_RULES, 0);
+ ipv4_l3fwd_lpm_lookup_struct[socketid] =
+ rte_lpm_create(s, socketid, &config_ipv4);
if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL)
rte_exit(EXIT_FAILURE,
"Unable to create the l3fwd LPM table on socket %d\n",
continue;
}
+ struct rte_lpm_config lpm_config;
+
+ lpm_config.max_rules = APP_MAX_LPM_RULES;
+ lpm_config.number_tbl8s = 256;
+ lpm_config.flags = 0;
snprintf(name, sizeof(name), "lpm_table_%u", socket);
printf("Creating the LPM table for socket %u ...\n", socket);
app.lpm_tables[socket] = rte_lpm_create(
name,
socket,
- APP_MAX_LPM_RULES,
- 0);
+ &lpm_config);
if (app.lpm_tables[socket] == NULL) {
rte_panic("Unable to create LPM table on socket %u\n", socket);
}
setup_lpm(int socketid)
{
struct rte_lpm6_config config;
+ struct rte_lpm_config lpm_ipv4_config;
unsigned i;
int ret;
char s[64];
/* create the LPM table */
snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
- ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
- IPV4_L3FWD_LPM_MAX_RULES, 0);
+ lpm_ipv4_config.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
+ lpm_ipv4_config.number_tbl8s = 256;
+ lpm_ipv4_config.flags = 0;
+ ipv4_l3fwd_lookup_struct[socketid] =
+ rte_lpm_create(s, socketid, &lpm_ipv4_config);
if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
" on socket %d\n", socketid);
VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
struct rte_lpm *
-rte_lpm_create_v1604(const char *name, int socket_id, int max_rules,
- __rte_unused int flags)
+rte_lpm_create_v1604(const char *name, int socket_id,
+ const struct rte_lpm_config *config)
{
char mem_name[RTE_LPM_NAMESIZE];
struct rte_lpm *lpm = NULL;
struct rte_tailq_entry *te;
- uint32_t mem_size;
+ uint32_t mem_size, rules_size, tbl8s_size;
struct rte_lpm_list *lpm_list;
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
/* Check user arguments. */
- if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
+ if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
+ || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
rte_errno = EINVAL;
return NULL;
}
snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
/* Determine the amount of memory to allocate. */
- mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+ mem_size = sizeof(*lpm);
+ rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
+ tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
goto exit;
}
+ lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
+ (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (lpm->rules_tbl == NULL) {
+ RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(lpm);
+ rte_free(te);
+ goto exit;
+ }
+
+ lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
+ (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (lpm->tbl8 == NULL) {
+ RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+ rte_free(lpm);
+ rte_free(te);
+ goto exit;
+ }
+
/* Save user arguments. */
- lpm->max_rules = max_rules;
+ lpm->max_rules = config->max_rules;
+ lpm->number_tbl8s = config->number_tbl8s;
snprintf(lpm->name, sizeof(lpm->name), "%s", name);
te->data = (void *) lpm;
BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
MAP_STATIC_SYMBOL(
struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
- int max_rules, int flags), rte_lpm_create_v1604);
+ const struct rte_lpm_config *config), rte_lpm_create_v1604);
/*
* Deallocates memory for given LPM table.
}
static inline int32_t
-tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8)
+tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
- group_idx++) {
+ for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
if (!lpm->tbl24[tbl24_index].valid) {
/* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+ tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+ tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
if (tbl8_group_index < 0) {
return tbl8_group_index;
memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
/* Zero tbl8. */
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
+ * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
/* Delete all rules form the rules table. */
memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
/** @internal Number of entries in a tbl8 group. */
#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
+/** @internal Max number of tbl8 groups in the tbl8. */
+#define RTE_LPM_MAX_TBL8_NUM_GROUPS (1 << 24)
+
/** @internal Total number of tbl8 groups in the tbl8. */
#define RTE_LPM_TBL8_NUM_GROUPS 256
#endif
+/** LPM configuration structure. */
+struct rte_lpm_config {
+ uint32_t max_rules; /**< Max number of rules. */
+ uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */
+ int flags; /**< This field is currently unused. */
+};
+
/** @internal Rule structure. */
struct rte_lpm_rule_v20 {
uint32_t ip; /**< Rule IP address. */
/* LPM metadata. */
char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */
uint32_t max_rules; /**< Max. balanced rules per lpm. */
+ uint32_t number_tbl8s; /**< Number of tbl8s. */
struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */
/* LPM Tables. */
struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES]
__rte_cache_aligned; /**< LPM tbl24 table. */
- struct rte_lpm_tbl_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES]
- __rte_cache_aligned; /**< LPM tbl8 table. */
- struct rte_lpm_rule rules_tbl[0] \
- __rte_cache_aligned; /**< LPM rules. */
+ struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */
+ struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
};
/**
* LPM object name
* @param socket_id
* NUMA socket ID for LPM table memory allocation
- * @param max_rules
- * Maximum number of LPM rules that can be added
- * @param flags
- * This parameter is currently unused
+ * @param config
+ * Structure containing the configuration
* @return
* Handle to LPM object on success, NULL otherwise with rte_errno set
* to an appropriate values. Possible rte_errno values include:
* - ENOMEM - no appropriate memory area found in which to create memzone
*/
struct rte_lpm *
-rte_lpm_create(const char *name, int socket_id, int max_rules, int flags);
+rte_lpm_create(const char *name, int socket_id,
+ const struct rte_lpm_config *config);
struct rte_lpm_v20 *
rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags);
struct rte_lpm *
-rte_lpm_create_v1604(const char *name, int socket_id, int max_rules, int flags);
+rte_lpm_create_v1604(const char *name, int socket_id,
+ const struct rte_lpm_config *config);
/**
* Find an existing LPM object and return a pointer to it.
{
struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
struct rte_table_lpm *lpm;
+ struct rte_lpm_config lpm_config;
+
uint32_t total_size, nht_size;
/* Check input parameters */
RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__);
return NULL;
}
+ if (p->number_tbl8s == 0) {
+ RTE_LOG(ERR, TABLE, "%s: Invalid number_tbl8s\n", __func__);
+ return NULL;
+ }
if (p->entry_unique_size == 0) {
RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n",
__func__);
}
/* LPM low-level table creation */
- lpm->lpm = rte_lpm_create(p->name, socket_id, p->n_rules, 0);
+ lpm_config.max_rules = p->n_rules;
+ lpm_config.number_tbl8s = p->number_tbl8s;
+ lpm_config.flags = p->flags;
+ lpm->lpm = rte_lpm_create(p->name, socket_id, &lpm_config);
+
if (lpm->lpm == NULL) {
rte_free(lpm);
RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n");
/** Maximum number of LPM rules (i.e. IP routes) */
uint32_t n_rules;
+ /**< Number of tbl8s to allocate. */
+ uint32_t number_tbl8s;
+
+ /**< This field is currently unused. */
+ int flags;
+
/** Number of bytes at the start of the table entry that uniquely
identify the entry. Cannot be bigger than table entry size. */
uint32_t entry_unique_size;