/* number of pointers per alloc */
#define ACL_PTR_ALLOC 32
-/* variable for dividing rule sets */
-#define NODE_MAX 2500
-#define NODE_PERCENTAGE (0.40)
-#define RULE_PERCENTAGE (0.40)
+/* macros for dividing rule sets heuristics */
+#define NODE_MAX 0x4000
+#define NODE_MIN 0x800
/* TALLY are statistics per field */
enum {
const struct rte_acl_ctx *acx;
struct rte_acl_build_rule *build_rules;
struct rte_acl_config cfg;
+ int32_t node_max;
uint32_t node;
uint32_t num_nodes;
uint32_t category_mask;
static int acl_merge_trie(struct acl_build_context *context,
struct rte_acl_node *node_a, struct rte_acl_node *node_b,
- uint32_t level, uint32_t subtree_id, struct rte_acl_node **node_c);
+ uint32_t level, struct rte_acl_node **node_c);
static int acl_merge(struct acl_build_context *context,
struct rte_acl_node *node_a, struct rte_acl_node *node_b,
/* add room for more pointers */
num_ptrs = node->max_ptrs + ACL_PTR_ALLOC;
ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
- if (ptrs == NULL)
- return -ENOMEM;
/* copy current points to new memory allocation */
if (node->ptrs != NULL) {
* Determine if A and/or B are supersets of the intersection.
*/
static int
-acl_intersect_type(struct rte_acl_bitset *a_bits,
- struct rte_acl_bitset *b_bits,
+acl_intersect_type(const struct rte_acl_bitset *a_bits,
+ const struct rte_acl_bitset *b_bits,
struct rte_acl_bitset *intersect)
{
uint32_t n;
struct rte_acl_node *next;
next = acl_alloc_node(context, node->level);
- if (next == NULL)
- return NULL;
/* allocate the pointers */
if (node->num_ptrs > 0) {
next->ptrs = acl_build_alloc(context,
node->max_ptrs,
sizeof(struct rte_acl_ptr_set));
- if (next->ptrs == NULL)
- return NULL;
next->max_ptrs = node->max_ptrs;
}
/* Duplicate A for intersection */
node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr);
- if (node_c == NULL)
- return -1;
/* Remove intersection from A */
acl_exclude_ptr(context, node_a, idx_a, intersect_ptr);
return 0;
}
-/*
-* Within the existing trie structure, determine which nodes are
-* part of the subtree of the trie to be merged.
-*
-* For these purposes, a subtree is defined as the set of nodes that
-* are 1) not a superset of the intersection with the same level of
-* the merging tree, and 2) do not have any references from a node
-* outside of the subtree.
-*/
-static void
-mark_subtree(struct rte_acl_node *node,
- struct rte_acl_bitset *level_bits,
- uint32_t level,
- uint32_t id)
-{
- uint32_t n;
-
- /* mark this node as part of the subtree */
- node->subtree_id = id | RTE_ACL_SUBTREE_NODE;
-
- for (n = 0; n < node->num_ptrs; n++) {
-
- if (node->ptrs[n].ptr != NULL) {
-
- struct rte_acl_bitset intersect_bits;
- int intersect;
-
- /*
- * Item 1) :
- * check if this child pointer is not a superset of the
- * same level of the merging tree.
- */
- intersect = acl_intersect_type(&node->ptrs[n].values,
- &level_bits[level],
- &intersect_bits);
-
- if ((intersect & ACL_INTERSECT_A) == 0) {
-
- struct rte_acl_node *child = node->ptrs[n].ptr;
-
- /*
- * reset subtree reference if this is
- * the first visit by this subtree.
- */
- if (child->subtree_id != id) {
- child->subtree_id = id;
- child->subtree_ref_count = 0;
- }
-
- /*
- * Item 2) :
- * increment the subtree reference count and if
- * all references are from this subtree then
- * recurse to that child
- */
- child->subtree_ref_count++;
- if (child->subtree_ref_count ==
- child->ref_count)
- mark_subtree(child, level_bits,
- level + 1, id);
- }
- }
- }
-}
-
-/*
- * Build the set of bits that define the set of transitions
- * for each level of a trie.
- */
-static void
-build_subset_mask(struct rte_acl_node *node,
- struct rte_acl_bitset *level_bits,
- int level)
-{
- uint32_t n;
-
- /* Add this node's transitions to the set for this level */
- for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
- level_bits[level].bits[n] &= node->values.bits[n];
-
- /* For each child, add the transitions for the next level */
- for (n = 0; n < node->num_ptrs; n++)
- if (node->ptrs[n].ptr != NULL)
- build_subset_mask(node->ptrs[n].ptr, level_bits,
- level + 1);
-}
-
-
/*
* Merge nodes A and B together,
* returns a node that is the path for the intersection
static int
acl_merge_trie(struct acl_build_context *context,
struct rte_acl_node *node_a, struct rte_acl_node *node_b,
- uint32_t level, uint32_t subtree_id, struct rte_acl_node **return_c)
+ uint32_t level, struct rte_acl_node **return_c)
{
uint32_t n, m, ptrs_c, ptrs_b;
uint32_t min_add_c, min_add_b;
}
/*
- * Create node C as a copy of node A if node A is not part of
- * a subtree of the merging tree (node B side). Otherwise,
- * just use node A.
+ * Create node C as a copy of node A, and do: C = merge(A,B);
+ * If node A can be used instead (A==C), then later we'll
+ * destroy C and return A.
*/
- if (level > 0 &&
- node_a->subtree_id !=
- (subtree_id | RTE_ACL_SUBTREE_NODE)) {
+ if (level > 0)
node_c = acl_dup_node(context, node_a);
- node_c->subtree_id = subtree_id | RTE_ACL_SUBTREE_NODE;
- }
/*
* If the two node transitions intersect then merge the transitions.
if (acl_merge_trie(context,
node_c->ptrs[n].ptr,
node_b->ptrs[m].ptr,
- level + 1, subtree_id,
+ level + 1,
&child_node_c))
return 1;
struct rte_acl_build_rule *prev, *rule;
struct rte_acl_node *end, *merge, *root, *end_prev;
const struct rte_acl_field *fld;
- struct rte_acl_bitset level_bits[RTE_ACL_MAX_LEVELS];
prev = head;
rule = head;
+ *last = prev;
trie = acl_alloc_node(context, 0);
- if (trie == NULL)
- return NULL;
while (rule != NULL) {
root = acl_alloc_node(context, 0);
- if (root == NULL)
- return NULL;
root->ref_count = 1;
end = root;
/* merge this field on to the end of the rule */
if (acl_merge_trie(context, end_prev, merge, 0,
- 0, NULL) != 0) {
+ NULL) != 0) {
return NULL;
}
}
* Setup the results for this rule.
* The result and priority of each category.
*/
- if (end->mrt == NULL &&
- (end->mrt = acl_build_alloc(context, 1,
- sizeof(*end->mrt))) == NULL)
- return NULL;
+ if (end->mrt == NULL)
+ end->mrt = acl_build_alloc(context, 1,
+ sizeof(*end->mrt));
- for (m = 0; m < context->cfg.num_categories; m++) {
+ for (m = context->cfg.num_categories; 0 != m--; ) {
if (rule->f->data.category_mask & (1 << m)) {
end->mrt->results[m] = rule->f->data.userdata;
end->mrt->priority[m] = rule->f->data.priority;
}
node_count = context->num_nodes;
-
- memset(&level_bits[0], UINT8_MAX, sizeof(level_bits));
- build_subset_mask(root, &level_bits[0], 0);
- mark_subtree(trie, &level_bits[0], 0, end->match_flag);
(*count)++;
/* merge this rule into the trie */
- if (acl_merge_trie(context, trie, root, 0, end->match_flag,
- NULL))
+ if (acl_merge_trie(context, trie, root, 0, NULL))
return NULL;
node_count = context->num_nodes - node_count;
- if (node_count > NODE_MAX) {
+ if (node_count > context->node_max) {
*last = prev;
return trie;
}
return 0;
}
+/*
+ * Sort list of rules based on the rules wildness.
+ */
static struct rte_acl_build_rule *
sort_rules(struct rte_acl_build_rule *head)
{
new_head = NULL;
while (head != NULL) {
+
+ /* remove element from the head of the old list. */
r = head;
head = r->next;
r->next = NULL;
- if (new_head == NULL) {
- new_head = r;
- } else {
- for (p = &new_head;
- (l = *p) != NULL &&
- rule_cmp_wildness(l, r) >= 0;
- p = &l->next)
- ;
-
- r->next = *p;
- *p = r;
- }
+
+ /* walk through new sorted list to find a proper place. */
+ for (p = &new_head;
+ (l = *p) != NULL &&
+ rule_cmp_wildness(l, r) >= 0;
+ p = &l->next)
+ ;
+
+ /* insert element into the new sorted list. */
+ r->next = *p;
+ *p = r;
}
return new_head;
context->tries[n].type = RTE_ACL_UNUSED_TRIE;
context->bld_tries[n].trie = NULL;
context->tries[n].count = 0;
- context->tries[n].smallest = INT32_MAX;
}
context->tries[0].type = RTE_ACL_FULL_TRIE;
/* Create a new copy of config for remaining rules. */
config = acl_build_alloc(context, 1, sizeof(*config));
- if (config == NULL) {
- RTE_LOG(ERR, ACL,
- "New config allocation for %u-th "
- "trie failed\n", num_tries);
- return -ENOMEM;
- }
-
memcpy(config, rule_sets[n]->config, sizeof(*config));
/* Make remaining rules use new config. */
uint32_t n;
RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n"
+ "node limit for tree split: %u\n"
"nodes created: %u\n"
"memory consumed: %zu\n",
ctx->acx->name,
+ ctx->node_max,
ctx->num_nodes,
ctx->pool.alloc);
sz = ofs + n * fn * sizeof(*wp);
br = tb_alloc(&bcx->pool, sz);
- if (br == NULL) {
- RTE_LOG(ERR, ACL, "ACL context %s: failed to create a copy "
- "of %u build rules (%zu bytes)\n",
- bcx->acx->name, n, sz);
- return -ENOMEM;
- }
wp = (uint32_t *)((uintptr_t)br + ofs);
num = 0;
}
}
+/*
+ * Internal routine, performs 'build' phase of trie generation:
+ * - setups build context.
+ * - analizes given set of rules.
+ * - builds internal tree(s).
+ */
+static int
+acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx *ctx,
+ const struct rte_acl_config *cfg, uint32_t node_max)
+{
+ int32_t rc;
+
+ /* setup build context. */
+ memset(bcx, 0, sizeof(*bcx));
+ bcx->acx = ctx;
+ bcx->pool.alignment = ACL_POOL_ALIGN;
+ bcx->pool.min_alloc = ACL_POOL_ALLOC_MIN;
+ bcx->cfg = *cfg;
+ bcx->category_mask = LEN2MASK(bcx->cfg.num_categories);
+ bcx->node_max = node_max;
+
+ rc = sigsetjmp(bcx->pool.fail, 0);
+
+ /* build phase runs out of memory. */
+ if (rc != 0) {
+ RTE_LOG(ERR, ACL,
+ "ACL context: %s, %s() failed with error code: %d\n",
+ bcx->acx->name, __func__, rc);
+ return rc;
+ }
+
+ /* Create a build rules copy. */
+ rc = acl_build_rules(bcx);
+ if (rc != 0)
+ return rc;
+
+ /* No rules to build for that context+config */
+ if (bcx->build_rules == NULL) {
+ rc = -EINVAL;
+ } else {
+ /* build internal trie representation. */
+ rc = acl_build_tries(bcx, bcx->build_rules);
+ }
+ return rc;
+}
int
rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
{
- int rc;
+ int32_t rc;
+ uint32_t n;
+ size_t max_size;
struct acl_build_context bcx;
if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 ||
acl_build_reset(ctx);
- memset(&bcx, 0, sizeof(bcx));
- bcx.acx = ctx;
- bcx.pool.alignment = ACL_POOL_ALIGN;
- bcx.pool.min_alloc = ACL_POOL_ALLOC_MIN;
- bcx.cfg = *cfg;
- bcx.category_mask = LEN2MASK(bcx.cfg.num_categories);
-
-
- /* Create a build rules copy. */
- rc = acl_build_rules(&bcx);
- if (rc != 0)
- return rc;
+ if (cfg->max_size == 0) {
+ n = NODE_MIN;
+ max_size = SIZE_MAX;
+ } else {
+ n = NODE_MAX;
+ max_size = cfg->max_size;
+ }
- /* No rules to build for that context+config */
- if (bcx.build_rules == NULL) {
- rc = -EINVAL;
+ for (rc = -ERANGE; n >= NODE_MIN && rc == -ERANGE; n /= 2) {
- /* build internal trie representation. */
- } else if ((rc = acl_build_tries(&bcx, bcx.build_rules)) == 0) {
+ /* perform build phase. */
+ rc = acl_bld(&bcx, ctx, cfg, n);
- /* allocate and fill run-time structures. */
- rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
+ if (rc == 0) {
+ /* allocate and fill run-time structures. */
+ rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
bcx.num_tries, bcx.cfg.num_categories,
RTE_ACL_MAX_FIELDS * RTE_DIM(bcx.tries) *
- sizeof(ctx->data_indexes[0]),
- bcx.num_build_rules);
- if (rc == 0) {
+ sizeof(ctx->data_indexes[0]), max_size);
+ if (rc == 0) {
+ /* set data indexes. */
+ acl_set_data_indexes(ctx);
- /* set data indexes. */
- acl_set_data_indexes(ctx);
-
- /* copy in build config. */
- ctx->config = *cfg;
+ /* copy in build config. */
+ ctx->config = *cfg;
+ }
}
- }
- acl_build_log(&bcx);
+ acl_build_log(&bcx);
+
+ /* cleanup after build. */
+ tb_free_pool(&bcx.pool);
+ }
- /* cleanup after build. */
- tb_free_pool(&bcx.pool);
return rc;
}