X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_acl%2Facl_bld.c;h=3801843909f39def97b0d7ec9417a469780cb3af;hb=229ea9a71cd9e1e4ad84930c5acb99a95cb500ba;hp=fe7b82430ef9d8a378beeb7e0faa5b4296e453bf;hpb=dc276b5780c29a86c537774b6f7b91379ee0690d;p=dpdk.git diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c index fe7b82430e..3801843909 100644 --- a/lib/librte_acl/acl_bld.c +++ b/lib/librte_acl/acl_bld.c @@ -41,10 +41,9 @@ /* number of pointers per alloc */ #define ACL_PTR_ALLOC 32 -/* variable for dividing rule sets */ -#define NODE_MAX 2500 -#define NODE_PERCENTAGE (0.40) -#define RULE_PERCENTAGE (0.40) +/* macros for dividing rule sets heuristics */ +#define NODE_MAX 0x4000 +#define NODE_MIN 0x800 /* TALLY are statistics per field */ enum { @@ -97,6 +96,7 @@ struct acl_build_context { const struct rte_acl_ctx *acx; struct rte_acl_build_rule *build_rules; struct rte_acl_config cfg; + int32_t node_max; uint32_t node; uint32_t num_nodes; uint32_t category_mask; @@ -117,7 +117,7 @@ struct acl_build_context { static int acl_merge_trie(struct acl_build_context *context, struct rte_acl_node *node_a, struct rte_acl_node *node_b, - uint32_t level, uint32_t subtree_id, struct rte_acl_node **node_c); + uint32_t level, struct rte_acl_node **node_c); static int acl_merge(struct acl_build_context *context, struct rte_acl_node *node_a, struct rte_acl_node *node_b, @@ -302,8 +302,6 @@ acl_add_ptr(struct acl_build_context *context, /* add room for more pointers */ num_ptrs = node->max_ptrs + ACL_PTR_ALLOC; ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs)); - if (ptrs == NULL) - return -ENOMEM; /* copy current points to new memory allocation */ if (node->ptrs != NULL) { @@ -388,8 +386,8 @@ acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask) * Determine if A and/or B are supersets of the intersection. */ static int -acl_intersect_type(struct rte_acl_bitset *a_bits, - struct rte_acl_bitset *b_bits, +acl_intersect_type(const struct rte_acl_bitset *a_bits, + const struct rte_acl_bitset *b_bits, struct rte_acl_bitset *intersect) { uint32_t n; @@ -477,16 +475,12 @@ acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node) struct rte_acl_node *next; next = acl_alloc_node(context, node->level); - if (next == NULL) - return NULL; /* allocate the pointers */ if (node->num_ptrs > 0) { next->ptrs = acl_build_alloc(context, node->max_ptrs, sizeof(struct rte_acl_ptr_set)); - if (next->ptrs == NULL) - return NULL; next->max_ptrs = node->max_ptrs; } @@ -669,8 +663,6 @@ acl_merge_intersect(struct acl_build_context *context, /* Duplicate A for intersection */ node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr); - if (node_c == NULL) - return -1; /* Remove intersection from A */ acl_exclude_ptr(context, node_a, idx_a, intersect_ptr); @@ -908,94 +900,6 @@ acl_resolve_leaf(struct acl_build_context *context, return 0; } -/* -* Within the existing trie structure, determine which nodes are -* part of the subtree of the trie to be merged. -* -* For these purposes, a subtree is defined as the set of nodes that -* are 1) not a superset of the intersection with the same level of -* the merging tree, and 2) do not have any references from a node -* outside of the subtree. -*/ -static void -mark_subtree(struct rte_acl_node *node, - struct rte_acl_bitset *level_bits, - uint32_t level, - uint32_t id) -{ - uint32_t n; - - /* mark this node as part of the subtree */ - node->subtree_id = id | RTE_ACL_SUBTREE_NODE; - - for (n = 0; n < node->num_ptrs; n++) { - - if (node->ptrs[n].ptr != NULL) { - - struct rte_acl_bitset intersect_bits; - int intersect; - - /* - * Item 1) : - * check if this child pointer is not a superset of the - * same level of the merging tree. - */ - intersect = acl_intersect_type(&node->ptrs[n].values, - &level_bits[level], - &intersect_bits); - - if ((intersect & ACL_INTERSECT_A) == 0) { - - struct rte_acl_node *child = node->ptrs[n].ptr; - - /* - * reset subtree reference if this is - * the first visit by this subtree. - */ - if (child->subtree_id != id) { - child->subtree_id = id; - child->subtree_ref_count = 0; - } - - /* - * Item 2) : - * increment the subtree reference count and if - * all references are from this subtree then - * recurse to that child - */ - child->subtree_ref_count++; - if (child->subtree_ref_count == - child->ref_count) - mark_subtree(child, level_bits, - level + 1, id); - } - } - } -} - -/* - * Build the set of bits that define the set of transitions - * for each level of a trie. - */ -static void -build_subset_mask(struct rte_acl_node *node, - struct rte_acl_bitset *level_bits, - int level) -{ - uint32_t n; - - /* Add this node's transitions to the set for this level */ - for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) - level_bits[level].bits[n] &= node->values.bits[n]; - - /* For each child, add the transitions for the next level */ - for (n = 0; n < node->num_ptrs; n++) - if (node->ptrs[n].ptr != NULL) - build_subset_mask(node->ptrs[n].ptr, level_bits, - level + 1); -} - - /* * Merge nodes A and B together, * returns a node that is the path for the intersection @@ -1022,7 +926,7 @@ build_subset_mask(struct rte_acl_node *node, static int acl_merge_trie(struct acl_build_context *context, struct rte_acl_node *node_a, struct rte_acl_node *node_b, - uint32_t level, uint32_t subtree_id, struct rte_acl_node **return_c) + uint32_t level, struct rte_acl_node **return_c) { uint32_t n, m, ptrs_c, ptrs_b; uint32_t min_add_c, min_add_b; @@ -1048,16 +952,12 @@ acl_merge_trie(struct acl_build_context *context, } /* - * Create node C as a copy of node A if node A is not part of - * a subtree of the merging tree (node B side). Otherwise, - * just use node A. + * Create node C as a copy of node A, and do: C = merge(A,B); + * If node A can be used instead (A==C), then later we'll + * destroy C and return A. */ - if (level > 0 && - node_a->subtree_id != - (subtree_id | RTE_ACL_SUBTREE_NODE)) { + if (level > 0) node_c = acl_dup_node(context, node_a); - node_c->subtree_id = subtree_id | RTE_ACL_SUBTREE_NODE; - } /* * If the two node transitions intersect then merge the transitions. @@ -1104,7 +1004,7 @@ acl_merge_trie(struct acl_build_context *context, if (acl_merge_trie(context, node_c->ptrs[n].ptr, node_b->ptrs[m].ptr, - level + 1, subtree_id, + level + 1, &child_node_c)) return 1; @@ -1322,20 +1222,16 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, struct rte_acl_build_rule *prev, *rule; struct rte_acl_node *end, *merge, *root, *end_prev; const struct rte_acl_field *fld; - struct rte_acl_bitset level_bits[RTE_ACL_MAX_LEVELS]; prev = head; rule = head; + *last = prev; trie = acl_alloc_node(context, 0); - if (trie == NULL) - return NULL; while (rule != NULL) { root = acl_alloc_node(context, 0); - if (root == NULL) - return NULL; root->ref_count = 1; end = root; @@ -1408,7 +1304,7 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, /* merge this field on to the end of the rule */ if (acl_merge_trie(context, end_prev, merge, 0, - 0, NULL) != 0) { + NULL) != 0) { return NULL; } } @@ -1419,12 +1315,11 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, * Setup the results for this rule. * The result and priority of each category. */ - if (end->mrt == NULL && - (end->mrt = acl_build_alloc(context, 1, - sizeof(*end->mrt))) == NULL) - return NULL; + if (end->mrt == NULL) + end->mrt = acl_build_alloc(context, 1, + sizeof(*end->mrt)); - for (m = 0; m < context->cfg.num_categories; m++) { + for (m = context->cfg.num_categories; 0 != m--; ) { if (rule->f->data.category_mask & (1 << m)) { end->mrt->results[m] = rule->f->data.userdata; end->mrt->priority[m] = rule->f->data.priority; @@ -1435,19 +1330,14 @@ build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head, } node_count = context->num_nodes; - - memset(&level_bits[0], UINT8_MAX, sizeof(level_bits)); - build_subset_mask(root, &level_bits[0], 0); - mark_subtree(trie, &level_bits[0], 0, end->match_flag); (*count)++; /* merge this rule into the trie */ - if (acl_merge_trie(context, trie, root, 0, end->match_flag, - NULL)) + if (acl_merge_trie(context, trie, root, 0, NULL)) return NULL; node_count = context->num_nodes - node_count; - if (node_count > NODE_MAX) { + if (node_count > context->node_max) { *last = prev; return trie; } @@ -1479,8 +1369,8 @@ acl_calc_wildness(struct rte_acl_build_rule *head, switch (rule->config->defs[n].type) { case RTE_ACL_FIELD_TYPE_BITMASK: - wild = (size - - _mm_popcnt_u32(fld->mask_range.u8)) / + wild = (size - __builtin_popcount( + fld->mask_range.u8)) / size; break; @@ -1539,11 +1429,9 @@ acl_calc_wildness(struct rte_acl_build_rule *head, return 0; } -static int -acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config, - uint32_t *wild_limit) +static void +acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config) { - int min; struct rte_acl_build_rule *rule; uint32_t n, m, fields_deactivated = 0; uint32_t start = 0, deactivate = 0; @@ -1604,129 +1492,62 @@ acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config, for (k = 0; k < config->num_fields; k++) { if (tally[k][TALLY_DEACTIVATED] == 0) { - memcpy(&tally[l][0], &tally[k][0], + memmove(&tally[l][0], &tally[k][0], TALLY_NUM * sizeof(tally[0][0])); - memcpy(&config->defs[l++], + memmove(&config->defs[l++], &config->defs[k], sizeof(struct rte_acl_field_def)); } } config->num_fields = l; } - - min = RTE_ACL_SINGLE_TRIE_SIZE; - if (config->num_fields == 2) - min *= 4; - else if (config->num_fields == 3) - min *= 3; - else if (config->num_fields == 4) - min *= 2; - - if (tally[0][TALLY_0] < min) - return 0; - for (n = 0; n < config->num_fields; n++) - wild_limit[n] = 0; - - /* - * If trailing fields are 100% wild, group those together. - * This allows the search length of the trie to be shortened. - */ - for (n = 1; n < config->num_fields; n++) { - - double rule_percentage = (double)tally[n][TALLY_DEPTH] / - tally[n][0]; - - if (rule_percentage > RULE_PERCENTAGE) { - /* if it crosses an input boundary then round up */ - while (config->defs[n - 1].input_index == - config->defs[n].input_index) - n++; - - /* set the limit for selecting rules */ - while (n < config->num_fields) - wild_limit[n++] = 100; - - if (wild_limit[n - 1] == 100) - return 1; - } - } - - /* look for the most wild that's 40% or more of the rules */ - for (n = 1; n < config->num_fields; n++) { - for (m = TALLY_100; m > 0; m--) { - - double rule_percentage = (double)tally[n][m] / - tally[n][0]; - - if (tally[n][TALLY_DEACTIVATED] == 0 && - tally[n][TALLY_0] > - RTE_ACL_SINGLE_TRIE_SIZE && - rule_percentage > NODE_PERCENTAGE && - rule_percentage < 0.80) { - wild_limit[n] = wild_limits[m]; - return 1; - } - } - } - return 0; } static int -order(struct rte_acl_build_rule **insert, struct rte_acl_build_rule *rule) +rule_cmp_wildness(struct rte_acl_build_rule *r1, struct rte_acl_build_rule *r2) { uint32_t n; - struct rte_acl_build_rule *left = *insert; - if (left == NULL) - return 0; - - for (n = 1; n < left->config->num_fields; n++) { - int field_index = left->config->defs[n].field_index; + for (n = 1; n < r1->config->num_fields; n++) { + int field_index = r1->config->defs[n].field_index; - if (left->wildness[field_index] != rule->wildness[field_index]) - return (left->wildness[field_index] >= - rule->wildness[field_index]); + if (r1->wildness[field_index] != r2->wildness[field_index]) + return (r1->wildness[field_index] - + r2->wildness[field_index]); } return 0; } -static struct rte_acl_build_rule * -ordered_insert_rule(struct rte_acl_build_rule *head, - struct rte_acl_build_rule *rule) -{ - struct rte_acl_build_rule **insert; - - if (rule == NULL) - return head; - - rule->next = head; - if (head == NULL) - return rule; - - insert = &head; - while (order(insert, rule)) - insert = &(*insert)->next; - - rule->next = *insert; - *insert = rule; - return head; -} - +/* + * Sort list of rules based on the rules wildness. + */ static struct rte_acl_build_rule * sort_rules(struct rte_acl_build_rule *head) { - struct rte_acl_build_rule *rule, *reordered_head = NULL; - struct rte_acl_build_rule *last_rule = NULL; - - for (rule = head; rule != NULL; rule = rule->next) { - reordered_head = ordered_insert_rule(reordered_head, last_rule); - last_rule = rule; + struct rte_acl_build_rule *new_head; + struct rte_acl_build_rule *l, *r, **p; + + new_head = NULL; + while (head != NULL) { + + /* remove element from the head of the old list. */ + r = head; + head = r->next; + r->next = NULL; + + /* walk through new sorted list to find a proper place. */ + for (p = &new_head; + (l = *p) != NULL && + rule_cmp_wildness(l, r) >= 0; + p = &l->next) + ; + + /* insert element into the new sorted list. */ + r->next = *p; + *p = r; } - if (last_rule != reordered_head) - reordered_head = ordered_insert_rule(reordered_head, last_rule); - - return reordered_head; + return new_head; } static uint32_t @@ -1748,28 +1569,50 @@ acl_build_index(const struct rte_acl_config *config, uint32_t *data_index) return m; } +static struct rte_acl_build_rule * +build_one_trie(struct acl_build_context *context, + struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES], + uint32_t n) +{ + struct rte_acl_build_rule *last; + struct rte_acl_config *config; + + config = rule_sets[n]->config; + + acl_rule_stats(rule_sets[n], config); + rule_sets[n] = sort_rules(rule_sets[n]); + + context->tries[n].type = RTE_ACL_FULL_TRIE; + context->tries[n].count = 0; + + context->tries[n].num_data_indexes = acl_build_index(config, + context->data_indexes[n]); + context->tries[n].data_index = context->data_indexes[n]; + + context->bld_tries[n].trie = build_trie(context, rule_sets[n], + &last, &context->tries[n].count); + + return last; +} + static int acl_build_tries(struct acl_build_context *context, struct rte_acl_build_rule *head) { int32_t rc; - uint32_t n, m, num_tries; + uint32_t n, num_tries; struct rte_acl_config *config; - struct rte_acl_build_rule *last, *rule; - uint32_t wild_limit[RTE_ACL_MAX_LEVELS]; + struct rte_acl_build_rule *last; struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES]; config = head->config; - rule = head; rule_sets[0] = head; - num_tries = 1; /* initialize tries */ for (n = 0; n < RTE_DIM(context->tries); n++) { context->tries[n].type = RTE_ACL_UNUSED_TRIE; context->bld_tries[n].trie = NULL; context->tries[n].count = 0; - context->tries[n].smallest = INT32_MAX; } context->tries[0].type = RTE_ACL_FULL_TRIE; @@ -1779,91 +1622,48 @@ acl_build_tries(struct acl_build_context *context, if (rc != 0) return rc; - n = acl_rule_stats(head, config, &wild_limit[0]); - - /* put all rules that fit the wildness criteria into a seperate trie */ - while (n > 0 && num_tries < RTE_ACL_MAX_TRIES) { + for (n = 0;; n = num_tries) { - struct rte_acl_config *new_config; - struct rte_acl_build_rule **prev = &rule_sets[num_tries - 1]; - struct rte_acl_build_rule *next = head->next; + num_tries = n + 1; - new_config = acl_build_alloc(context, 1, sizeof(*new_config)); - if (new_config == NULL) { - RTE_LOG(ERR, ACL, - "Failed to geti space for new config\n"); + last = build_one_trie(context, rule_sets, n); + if (context->bld_tries[n].trie == NULL) { + RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n); return -ENOMEM; } - memcpy(new_config, config, sizeof(*new_config)); - config = new_config; - rule_sets[num_tries] = NULL; - - for (rule = head; rule != NULL; rule = next) { - - int move = 1; - - next = rule->next; - for (m = 0; m < config->num_fields; m++) { - int x = config->defs[m].field_index; - if (rule->wildness[x] < wild_limit[m]) { - move = 0; - break; - } - } + /* Build of the last trie completed. */ + if (last == NULL) + break; - if (move) { - rule->config = new_config; - rule->next = rule_sets[num_tries]; - rule_sets[num_tries] = rule; - *prev = next; - } else - prev = &rule->next; + if (num_tries == RTE_DIM(context->tries)) { + RTE_LOG(ERR, ACL, + "Exceeded max number of tries: %u\n", + num_tries); + return -ENOMEM; } - head = rule_sets[num_tries]; - n = acl_rule_stats(rule_sets[num_tries], config, - &wild_limit[0]); - num_tries++; - } + /* Trie is getting too big, split remaining rule set. */ + rule_sets[num_tries] = last->next; + last->next = NULL; + acl_free_node(context, context->bld_tries[n].trie); - if (n > 0) - RTE_LOG(DEBUG, ACL, - "Number of tries(%d) exceeded.\n", RTE_ACL_MAX_TRIES); + /* Create a new copy of config for remaining rules. */ + config = acl_build_alloc(context, 1, sizeof(*config)); + memcpy(config, rule_sets[n]->config, sizeof(*config)); - for (n = 0; n < num_tries; n++) { + /* Make remaining rules use new config. */ + for (head = rule_sets[num_tries]; head != NULL; + head = head->next) + head->config = config; - rule_sets[n] = sort_rules(rule_sets[n]); - context->tries[n].type = RTE_ACL_FULL_TRIE; - context->tries[n].count = 0; - context->tries[n].num_data_indexes = - acl_build_index(rule_sets[n]->config, - context->data_indexes[n]); - context->tries[n].data_index = context->data_indexes[n]; - - context->bld_tries[n].trie = - build_trie(context, rule_sets[n], - &last, &context->tries[n].count); - if (context->bld_tries[n].trie == NULL) { + /* Rebuild the trie for the reduced rule-set. */ + last = build_one_trie(context, rule_sets, n); + if (context->bld_tries[n].trie == NULL || last != NULL) { RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n); return -ENOMEM; } - if (last != NULL) { - rule_sets[num_tries++] = last->next; - last->next = NULL; - acl_free_node(context, context->bld_tries[n].trie); - context->tries[n].count = 0; - - context->bld_tries[n].trie = - build_trie(context, rule_sets[n], - &last, &context->tries[n].count); - if (context->bld_tries[n].trie == NULL) { - RTE_LOG(ERR, ACL, - "Build of %u-th trie failed\n", n); - return -ENOMEM; - } - } } context->num_tries = num_tries; @@ -1876,15 +1676,20 @@ acl_build_log(const struct acl_build_context *ctx) uint32_t n; RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n" + "node limit for tree split: %u\n" + "nodes created: %u\n" "memory consumed: %zu\n", ctx->acx->name, + ctx->node_max, + ctx->num_nodes, ctx->pool.alloc); for (n = 0; n < RTE_DIM(ctx->tries); n++) { if (ctx->tries[n].count != 0) RTE_LOG(DEBUG, ACL, - "trie %u: number of rules: %u\n", - n, ctx->tries[n].count); + "trie %u: number of rules: %u, indexes: %u\n", + n, ctx->tries[n].count, + ctx->tries[n].num_data_indexes); } } @@ -1903,12 +1708,6 @@ acl_build_rules(struct acl_build_context *bcx) sz = ofs + n * fn * sizeof(*wp); br = tb_alloc(&bcx->pool, sz); - if (br == NULL) { - RTE_LOG(ERR, ACL, "ACL conext %s: failed to create a copy " - "of %u build rules (%zu bytes)\n", - bcx->acx->name, n, sz); - return -ENOMEM; - } wp = (uint32_t *)((uintptr_t)br + ofs); num = 0; @@ -1948,15 +1747,62 @@ acl_set_data_indexes(struct rte_acl_ctx *ctx) memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index, n * sizeof(ctx->data_indexes[0])); ctx->trie[i].data_index = ctx->data_indexes + ofs; - ofs += n; + ofs += RTE_ACL_MAX_FIELDS; } } +/* + * Internal routine, performs 'build' phase of trie generation: + * - setups build context. + * - analizes given set of rules. + * - builds internal tree(s). + */ +static int +acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx *ctx, + const struct rte_acl_config *cfg, uint32_t node_max) +{ + int32_t rc; + + /* setup build context. */ + memset(bcx, 0, sizeof(*bcx)); + bcx->acx = ctx; + bcx->pool.alignment = ACL_POOL_ALIGN; + bcx->pool.min_alloc = ACL_POOL_ALLOC_MIN; + bcx->cfg = *cfg; + bcx->category_mask = LEN2MASK(bcx->cfg.num_categories); + bcx->node_max = node_max; + + rc = sigsetjmp(bcx->pool.fail, 0); + + /* build phase runs out of memory. */ + if (rc != 0) { + RTE_LOG(ERR, ACL, + "ACL context: %s, %s() failed with error code: %d\n", + bcx->acx->name, __func__, rc); + return rc; + } + + /* Create a build rules copy. */ + rc = acl_build_rules(bcx); + if (rc != 0) + return rc; + + /* No rules to build for that context+config */ + if (bcx->build_rules == NULL) { + rc = -EINVAL; + } else { + /* build internal trie representation. */ + rc = acl_build_tries(bcx, bcx->build_rules); + } + return rc; +} int rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg) { - int rc; + int32_t rc; + uint32_t n; + size_t max_size; struct acl_build_context bcx; if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 || @@ -1965,44 +1811,39 @@ rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg) acl_build_reset(ctx); - memset(&bcx, 0, sizeof(bcx)); - bcx.acx = ctx; - bcx.pool.alignment = ACL_POOL_ALIGN; - bcx.pool.min_alloc = ACL_POOL_ALLOC_MIN; - bcx.cfg = *cfg; - bcx.category_mask = LEN2MASK(bcx.cfg.num_categories); - - - /* Create a buid rules copy. */ - rc = acl_build_rules(&bcx); - if (rc != 0) - return rc; + if (cfg->max_size == 0) { + n = NODE_MIN; + max_size = SIZE_MAX; + } else { + n = NODE_MAX; + max_size = cfg->max_size; + } - /* No rules to build for that context+config */ - if (bcx.build_rules == NULL) { - rc = -EINVAL; + for (rc = -ERANGE; n >= NODE_MIN && rc == -ERANGE; n /= 2) { - /* build internal trie representation. */ - } else if ((rc = acl_build_tries(&bcx, bcx.build_rules)) == 0) { + /* perform build phase. */ + rc = acl_bld(&bcx, ctx, cfg, n); - /* allocate and fill run-time structures. */ - rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries, - bcx.num_tries, bcx.cfg.num_categories, - RTE_ACL_IPV4VLAN_NUM * RTE_DIM(bcx.tries), - bcx.num_build_rules); if (rc == 0) { + /* allocate and fill run-time structures. */ + rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries, + bcx.num_tries, bcx.cfg.num_categories, + RTE_ACL_MAX_FIELDS * RTE_DIM(bcx.tries) * + sizeof(ctx->data_indexes[0]), max_size); + if (rc == 0) { + /* set data indexes. */ + acl_set_data_indexes(ctx); + + /* copy in build config. */ + ctx->config = *cfg; + } + } - /* set data indexes. */ - acl_set_data_indexes(ctx); + acl_build_log(&bcx); - /* copy in build config. */ - ctx->config = *cfg; - } + /* cleanup after build. */ + tb_free_pool(&bcx.pool); } - acl_build_log(&bcx); - - /* cleanup after build. */ - tb_free_pool(&bcx.pool); return rc; }