if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
break;
}
- if (te != NULL)
+ lpm = NULL;
+ if (te != NULL) {
+ rte_errno = EEXIST;
goto exit;
+ }
/* allocate tailq entry */
te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
VERSION_SYMBOL(rte_lpm_create, _v20, 2.0);
struct rte_lpm *
-rte_lpm_create_v1604(const char *name, int socket_id, int max_rules,
- __rte_unused int flags)
+rte_lpm_create_v1604(const char *name, int socket_id,
+ const struct rte_lpm_config *config)
{
char mem_name[RTE_LPM_NAMESIZE];
struct rte_lpm *lpm = NULL;
struct rte_tailq_entry *te;
- uint32_t mem_size;
+ uint32_t mem_size, rules_size, tbl8s_size;
struct rte_lpm_list *lpm_list;
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
/* Check user arguments. */
- if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) {
+ if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
+ || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
rte_errno = EINVAL;
return NULL;
}
snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
/* Determine the amount of memory to allocate. */
- mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+ mem_size = sizeof(*lpm);
+ rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
+ tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
break;
}
- if (te != NULL)
+ lpm = NULL;
+ if (te != NULL) {
+ rte_errno = EEXIST;
goto exit;
+ }
/* allocate tailq entry */
te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
goto exit;
}
+ lpm->rules_tbl = (struct rte_lpm_rule *)rte_zmalloc_socket(NULL,
+ (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (lpm->rules_tbl == NULL) {
+ RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
+ rte_free(lpm);
+ lpm = NULL;
+ rte_free(te);
+ goto exit;
+ }
+
+ lpm->tbl8 = (struct rte_lpm_tbl_entry *)rte_zmalloc_socket(NULL,
+ (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (lpm->tbl8 == NULL) {
+ RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
+ rte_free(lpm);
+ lpm = NULL;
+ rte_free(te);
+ goto exit;
+ }
+
/* Save user arguments. */
- lpm->max_rules = max_rules;
+ lpm->max_rules = config->max_rules;
+ lpm->number_tbl8s = config->number_tbl8s;
snprintf(lpm->name, sizeof(lpm->name), "%s", name);
te->data = (void *) lpm;
BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04);
MAP_STATIC_SYMBOL(
struct rte_lpm *rte_lpm_create(const char *name, int socket_id,
- int max_rules, int flags), rte_lpm_create_v1604);
+ const struct rte_lpm_config *config), rte_lpm_create_v1604);
/*
* Deallocates memory for given LPM table.
if (te->data == (void *) lpm)
break;
}
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(lpm_list, te, next);
+ if (te != NULL)
+ TAILQ_REMOVE(lpm_list, te, next);
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
if (te->data == (void *) lpm)
break;
}
- if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- return;
- }
-
- TAILQ_REMOVE(lpm_list, te, next);
+ if (te != NULL)
+ TAILQ_REMOVE(lpm_list, te, next);
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_free(lpm->rules_tbl);
rte_free(lpm);
rte_free(te);
}
}
static inline int32_t
-tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8)
+tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS;
- group_idx++) {
+ for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
lpm->tbl24[i].depth <= depth)) {
struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
- { .next_hop = next_hop, },
.valid = VALID,
.valid_group = 0,
.depth = depth,
};
+ new_tbl24_entry.next_hop = next_hop;
/* Setting tbl24 entry in one go to avoid race
* conditions
.valid = VALID,
.valid_group = VALID,
.depth = depth,
- .next_hop = next_hop,
};
+ new_tbl8_entry.next_hop = next_hop;
/*
* Setting tbl8 entry in one go to avoid
struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
.valid = VALID,
.depth = depth,
- .next_hop = next_hop,
.valid_group = lpm->tbl8[i].valid_group,
};
-
+ new_tbl8_entry.next_hop = next_hop;
/*
* Setting tbl8 entry in one go to avoid race
* condition
if (!lpm->tbl24[tbl24_index].valid) {
/* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+ tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8);
+ tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s);
if (tbl8_group_index < 0) {
return tbl8_group_index;
.valid = VALID,
.valid_group = VALID,
.depth = sub_rule_depth,
- .next_hop = lpm->rules_tbl
- [sub_rule_index].next_hop,
};
+ new_tbl8_entry.next_hop =
+ lpm->rules_tbl[sub_rule_index].next_hop;
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
.valid = VALID,
.depth = sub_rule_depth,
.valid_group = lpm->tbl8[tbl8_group_start].valid_group,
- .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
};
+ new_tbl8_entry.next_hop =
+ lpm->rules_tbl[sub_rule_index].next_hop;
/*
* Loop through the range of entries on tbl8 for which the
* rule_to_delete must be modified.
memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
/* Zero tbl8. */
- memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+ memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
+ * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
/* Delete all rules form the rules table. */
memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);