Right now we allocate indexes for all types of nodes, except MATCH,
at 'gen final RT table' stage.
For MATCH type nodes we are doing it at building temporary tree stage.
This is totally unnecessary and makes code more complex and error prone.
Rework the code and make MATCH indexes being allocated at the same stage
as all others.
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
struct rte_acl_trie {
uint32_t type;
uint32_t count;
struct rte_acl_trie {
uint32_t type;
uint32_t count;
- int32_t smallest; /* smallest rule in this trie */
uint32_t root_index;
const uint32_t *data_index;
uint32_t num_data_indexes;
uint32_t root_index;
const uint32_t *data_index;
uint32_t num_data_indexes;
int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
- uint32_t num_categories, uint32_t data_index_sz, int match_num);
+ uint32_t num_categories, uint32_t data_index_sz);
typedef int (*rte_acl_classify_t)
(const struct rte_acl_ctx *, const uint8_t **, uint32_t *, uint32_t, uint32_t);
typedef int (*rte_acl_classify_t)
(const struct rte_acl_ctx *, const uint8_t **, uint32_t *, uint32_t, uint32_t);
context->tries[n].type = RTE_ACL_UNUSED_TRIE;
context->bld_tries[n].trie = NULL;
context->tries[n].count = 0;
context->tries[n].type = RTE_ACL_UNUSED_TRIE;
context->bld_tries[n].trie = NULL;
context->tries[n].count = 0;
- context->tries[n].smallest = INT32_MAX;
}
context->tries[0].type = RTE_ACL_FULL_TRIE;
}
context->tries[0].type = RTE_ACL_FULL_TRIE;
rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
bcx.num_tries, bcx.cfg.num_categories,
RTE_ACL_MAX_FIELDS * RTE_DIM(bcx.tries) *
rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
bcx.num_tries, bcx.cfg.num_categories,
RTE_ACL_MAX_FIELDS * RTE_DIM(bcx.tries) *
- sizeof(ctx->data_indexes[0]),
- bcx.num_build_rules + 1);
+ sizeof(ctx->data_indexes[0]));
if (rc == 0) {
/* set data indexes. */
if (rc == 0) {
/* set data indexes. */
int32_t quad_vectors;
int32_t dfa;
int32_t dfa_gr64;
int32_t quad_vectors;
int32_t dfa;
int32_t dfa_gr64;
- int32_t smallest_match;
};
struct rte_acl_indices {
};
struct rte_acl_indices {
- int dfa_index;
- int quad_index;
- int single_index;
- int match_index;
+ int32_t dfa_index;
+ int32_t quad_index;
+ int32_t single_index;
+ int32_t match_index;
+ int32_t match_start;
/*
* Determine the type of nodes and count each type
*/
/*
* Determine the type of nodes and count each type
*/
acl_count_trie_types(struct acl_node_counters *counts,
acl_count_trie_types(struct acl_node_counters *counts,
- struct rte_acl_node *node, uint64_t no_match, int match, int force_dfa)
+ struct rte_acl_node *node, uint64_t no_match, int force_dfa)
{
uint32_t n;
int num_ptrs;
{
uint32_t n;
int num_ptrs;
/* skip if this node has been counted */
if (node->node_type != (uint32_t)RTE_ACL_NODE_UNDEFINED)
/* skip if this node has been counted */
if (node->node_type != (uint32_t)RTE_ACL_NODE_UNDEFINED)
if (node->match_flag != 0 || node->num_ptrs == 0) {
counts->match++;
if (node->match_flag != 0 || node->num_ptrs == 0) {
counts->match++;
- if (node->match_flag == -1)
- node->match_flag = match++;
node->node_type = RTE_ACL_NODE_MATCH;
node->node_type = RTE_ACL_NODE_MATCH;
- if (counts->smallest_match > node->match_flag)
- counts->smallest_match = node->match_flag;
- return match;
}
num_ptrs = acl_count_fanout(node);
}
num_ptrs = acl_count_fanout(node);
*/
for (n = 0; n < node->num_ptrs; n++) {
if (node->ptrs[n].ptr != NULL)
*/
for (n = 0; n < node->num_ptrs; n++) {
if (node->ptrs[n].ptr != NULL)
- match = acl_count_trie_types(counts, node->ptrs[n].ptr,
- no_match, match, 0);
+ acl_count_trie_types(counts, node->ptrs[n].ptr,
+ no_match, 0);
break;
case RTE_ACL_NODE_MATCH:
match = ((struct rte_acl_match_results *)
break;
case RTE_ACL_NODE_MATCH:
match = ((struct rte_acl_match_results *)
- (node_array + index->match_index));
- memcpy(match + node->match_flag, node->mrt, sizeof(*node->mrt));
- node->node_index = node->match_flag | node->node_type;
+ (node_array + index->match_start));
+ for (n = 0; n != RTE_DIM(match->results); n++)
+ RTE_ACL_VERIFY(match->results[0] == 0);
+ memcpy(match + index->match_index, node->mrt,
+ sizeof(*node->mrt));
+ node->node_index = index->match_index | node->node_type;
+ index->match_index += 1;
break;
case RTE_ACL_NODE_UNDEFINED:
RTE_ACL_VERIFY(node->node_type !=
break;
case RTE_ACL_NODE_UNDEFINED:
RTE_ACL_VERIFY(node->node_type !=
acl_calc_counts_indices(struct acl_node_counters *counts,
acl_calc_counts_indices(struct acl_node_counters *counts,
- struct rte_acl_indices *indices, struct rte_acl_trie *trie,
+ struct rte_acl_indices *indices,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
- int match_num, uint64_t no_match)
/* Get stats on nodes */
for (n = 0; n < num_tries; n++) {
/* Get stats on nodes */
for (n = 0; n < num_tries; n++) {
- counts->smallest_match = INT32_MAX;
- match_num = acl_count_trie_types(counts, node_bld_trie[n].trie,
- no_match, match_num, 1);
- trie[n].smallest = counts->smallest_match;
+ acl_count_trie_types(counts, node_bld_trie[n].trie,
+ no_match, 1);
}
indices->dfa_index = RTE_ACL_DFA_SIZE + 1;
indices->quad_index = indices->dfa_index +
counts->dfa_gr64 * RTE_ACL_DFA_GR64_SIZE;
indices->single_index = indices->quad_index + counts->quad_vectors;
}
indices->dfa_index = RTE_ACL_DFA_SIZE + 1;
indices->quad_index = indices->dfa_index +
counts->dfa_gr64 * RTE_ACL_DFA_GR64_SIZE;
indices->single_index = indices->quad_index + counts->quad_vectors;
- indices->match_index = indices->single_index + counts->single + 1;
- indices->match_index = RTE_ALIGN(indices->match_index,
+ indices->match_start = indices->single_index + counts->single + 1;
+ indices->match_start = RTE_ALIGN(indices->match_start,
(XMM_SIZE / sizeof(uint64_t)));
(XMM_SIZE / sizeof(uint64_t)));
+ indices->match_index = 1;
int
rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
int
rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
- uint32_t num_categories, uint32_t data_index_sz, int match_num)
+ uint32_t num_categories, uint32_t data_index_sz)
{
void *mem;
size_t total_size;
{
void *mem;
size_t total_size;
no_match = RTE_ACL_NODE_MATCH;
/* Fill counts and indices arrays from the nodes. */
no_match = RTE_ACL_NODE_MATCH;
/* Fill counts and indices arrays from the nodes. */
- match_num = acl_calc_counts_indices(&counts, &indices, trie,
- node_bld_trie, num_tries, match_num, no_match);
+ acl_calc_counts_indices(&counts, &indices,
+ node_bld_trie, num_tries, no_match);
/* Allocate runtime memory (align to cache boundary) */
total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) +
/* Allocate runtime memory (align to cache boundary) */
total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) +
- indices.match_index * sizeof(uint64_t) +
- (match_num + 2) * sizeof(struct rte_acl_match_results) +
+ indices.match_start * sizeof(uint64_t) +
+ (counts.match + 1) * sizeof(struct rte_acl_match_results) +
XMM_SIZE;
mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
XMM_SIZE;
mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
}
/* Fill the runtime structure */
}
/* Fill the runtime structure */
- match_index = indices.match_index;
+ match_index = indices.match_start;
node_array = (uint64_t *)((uintptr_t)mem +
RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE));
node_array = (uint64_t *)((uintptr_t)mem +
RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE));