4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <nmmintrin.h>
39 #define ACL_POOL_ALIGN 8
40 #define ACL_POOL_ALLOC_MIN 0x800000
42 /* number of pointers per alloc */
43 #define ACL_PTR_ALLOC 32
45 /* variable for dividing rule sets */
47 #define NODE_PERCENTAGE (0.40)
48 #define RULE_PERCENTAGE (0.40)
50 /* TALLY are statistics per field */
52 TALLY_0 = 0, /* number of rules that are 0% or more wild. */
53 TALLY_25, /* number of rules that are 25% or more wild. */
57 TALLY_DEACTIVATED, /* deactivated fields (100% wild in all rules). */
59 /* number of rules that are 100% wild for this field and higher. */
63 static const uint32_t wild_limits[TALLY_DEACTIVATED] = {0, 25, 50, 75, 100};
66 ACL_INTERSECT_NONE = 0,
67 ACL_INTERSECT_A = 1, /* set A is a superset of A and B intersect */
68 ACL_INTERSECT_B = 2, /* set B is a superset of A and B intersect */
69 ACL_INTERSECT = 4, /* sets A and B intersect */
73 ACL_PRIORITY_EQUAL = 0,
74 ACL_PRIORITY_NODE_A = 1,
75 ACL_PRIORITY_NODE_B = 2,
76 ACL_PRIORITY_MIXED = 3
80 struct acl_mem_block {
85 #define MEM_BLOCK_NUM 16
87 /* Single ACL rule, build representation.*/
88 struct rte_acl_build_rule {
89 struct rte_acl_build_rule *next;
90 struct rte_acl_config *config;
91 /**< configuration for each field in the rule. */
92 const struct rte_acl_rule *f;
96 /* Context for build phase */
97 struct acl_build_context {
98 const struct rte_acl_ctx *acx;
99 struct rte_acl_build_rule *build_rules;
100 struct rte_acl_config cfg;
103 uint32_t category_mask;
107 uint32_t num_build_rules;
109 struct tb_mem_pool pool;
110 struct rte_acl_trie tries[RTE_ACL_MAX_TRIES];
111 struct rte_acl_bld_trie bld_tries[RTE_ACL_MAX_TRIES];
112 uint32_t data_indexes[RTE_ACL_MAX_TRIES][RTE_ACL_MAX_FIELDS];
114 /* memory free lists for nodes and blocks used for node ptrs */
115 struct acl_mem_block blocks[MEM_BLOCK_NUM];
116 struct rte_acl_node *node_free_list;
119 static int acl_merge_trie(struct acl_build_context *context,
120 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
121 uint32_t level, uint32_t subtree_id, struct rte_acl_node **node_c);
123 static int acl_merge(struct acl_build_context *context,
124 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
125 int move, int a_subset, int level);
128 acl_deref_ptr(struct acl_build_context *context,
129 struct rte_acl_node *node, int index);
132 acl_build_alloc(struct acl_build_context *context, size_t n, size_t s)
136 size_t alloc_size = n * s;
139 * look for memory in free lists
141 for (m = 0; m < RTE_DIM(context->blocks); m++) {
142 if (context->blocks[m].block_size ==
143 alloc_size && context->blocks[m].mem_ptr != NULL) {
144 p = context->blocks[m].mem_ptr;
145 context->blocks[m].mem_ptr = *((void **)p);
146 memset(p, 0, alloc_size);
152 * return allocation from memory pool
154 p = tb_alloc(&context->pool, alloc_size);
159 * Free memory blocks (kept in context for reuse).
162 acl_build_free(struct acl_build_context *context, size_t s, void *p)
166 for (n = 0; n < RTE_DIM(context->blocks); n++) {
167 if (context->blocks[n].block_size == s) {
168 *((void **)p) = context->blocks[n].mem_ptr;
169 context->blocks[n].mem_ptr = p;
173 for (n = 0; n < RTE_DIM(context->blocks); n++) {
174 if (context->blocks[n].block_size == 0) {
175 context->blocks[n].block_size = s;
176 *((void **)p) = NULL;
177 context->blocks[n].mem_ptr = p;
184 * Allocate and initialize a new node.
186 static struct rte_acl_node *
187 acl_alloc_node(struct acl_build_context *context, int level)
189 struct rte_acl_node *node;
191 if (context->node_free_list != NULL) {
192 node = context->node_free_list;
193 context->node_free_list = node->next;
194 memset(node, 0, sizeof(struct rte_acl_node));
196 node = acl_build_alloc(context, sizeof(struct rte_acl_node), 1);
202 node->node_type = RTE_ACL_NODE_UNDEFINED;
203 node->node_index = RTE_ACL_NODE_UNDEFINED;
204 context->num_nodes++;
205 node->id = context->node_id++;
211 * Dereference all nodes to which this node points
214 acl_free_node(struct acl_build_context *context,
215 struct rte_acl_node *node)
219 if (node->prev != NULL)
220 node->prev->next = NULL;
221 for (n = 0; n < node->num_ptrs; n++)
222 acl_deref_ptr(context, node, n);
224 /* free mrt if this is a match node */
225 if (node->mrt != NULL) {
226 acl_build_free(context, sizeof(struct rte_acl_match_results),
231 /* free transitions to other nodes */
232 if (node->ptrs != NULL) {
233 acl_build_free(context,
234 node->max_ptrs * sizeof(struct rte_acl_ptr_set),
239 /* put it on the free list */
240 context->num_nodes--;
241 node->next = context->node_free_list;
242 context->node_free_list = node;
247 * Include src bitset in dst bitset
250 acl_include(struct rte_acl_bitset *dst, struct rte_acl_bitset *src, bits_t mask)
254 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
255 dst->bits[n] = (dst->bits[n] & mask) | src->bits[n];
259 * Set dst to bits of src1 that are not in src2
262 acl_exclude(struct rte_acl_bitset *dst,
263 struct rte_acl_bitset *src1,
264 struct rte_acl_bitset *src2)
269 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
270 dst->bits[n] = src1->bits[n] & ~src2->bits[n];
271 all_bits |= dst->bits[n];
273 return all_bits != 0;
277 * Add a pointer (ptr) to a node.
280 acl_add_ptr(struct acl_build_context *context,
281 struct rte_acl_node *node,
282 struct rte_acl_node *ptr,
283 struct rte_acl_bitset *bits)
285 uint32_t n, num_ptrs;
286 struct rte_acl_ptr_set *ptrs = NULL;
289 * If there's already a pointer to the same node, just add to the bitset
291 for (n = 0; n < node->num_ptrs; n++) {
292 if (node->ptrs[n].ptr != NULL) {
293 if (node->ptrs[n].ptr == ptr) {
294 acl_include(&node->ptrs[n].values, bits, -1);
295 acl_include(&node->values, bits, -1);
301 /* if there's no room for another pointer, make room */
302 if (node->num_ptrs >= node->max_ptrs) {
303 /* add room for more pointers */
304 num_ptrs = node->max_ptrs + ACL_PTR_ALLOC;
305 ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
309 /* copy current points to new memory allocation */
310 if (node->ptrs != NULL) {
311 memcpy(ptrs, node->ptrs,
312 node->num_ptrs * sizeof(*ptrs));
313 acl_build_free(context, node->max_ptrs * sizeof(*ptrs),
317 node->max_ptrs = num_ptrs;
320 /* Find available ptr and add a new pointer to this node */
321 for (n = node->min_add; n < node->max_ptrs; n++) {
322 if (node->ptrs[n].ptr == NULL) {
323 node->ptrs[n].ptr = ptr;
324 acl_include(&node->ptrs[n].values, bits, 0);
325 acl_include(&node->values, bits, -1);
328 if (node->num_ptrs <= n)
329 node->num_ptrs = n + 1;
338 * Add a pointer for a range of values
341 acl_add_ptr_range(struct acl_build_context *context,
342 struct rte_acl_node *root,
343 struct rte_acl_node *node,
348 struct rte_acl_bitset bitset;
350 /* clear the bitset values */
351 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
354 /* for each bit in range, add bit to set */
355 for (n = 0; n < UINT8_MAX + 1; n++)
356 if (n >= low && n <= high)
357 bitset.bits[n / (sizeof(bits_t) * 8)] |=
358 1 << (n % (sizeof(bits_t) * 8));
360 return acl_add_ptr(context, root, node, &bitset);
364 * Generate a bitset from a byte value and mask.
367 acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask)
372 /* clear the bitset values */
373 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
376 /* for each bit in value/mask, add bit to set */
377 for (n = 0; n < UINT8_MAX + 1; n++) {
378 if ((n & mask) == value) {
380 bitset->bits[n / (sizeof(bits_t) * 8)] |=
381 1 << (n % (sizeof(bits_t) * 8));
388 * Determine how A and B intersect.
389 * Determine if A and/or B are supersets of the intersection.
392 acl_intersect_type(struct rte_acl_bitset *a_bits,
393 struct rte_acl_bitset *b_bits,
394 struct rte_acl_bitset *intersect)
397 bits_t intersect_bits = 0;
398 bits_t a_superset = 0;
399 bits_t b_superset = 0;
402 * calculate and store intersection and check if A and/or B have
403 * bits outside the intersection (superset)
405 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
406 intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
407 a_superset |= a_bits->bits[n] ^ intersect->bits[n];
408 b_superset |= b_bits->bits[n] ^ intersect->bits[n];
409 intersect_bits |= intersect->bits[n];
412 n = (intersect_bits == 0 ? ACL_INTERSECT_NONE : ACL_INTERSECT) |
413 (b_superset == 0 ? 0 : ACL_INTERSECT_B) |
414 (a_superset == 0 ? 0 : ACL_INTERSECT_A);
420 * Check if all bits in the bitset are on
423 acl_full(struct rte_acl_node *node)
426 bits_t all_bits = -1;
428 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
429 all_bits &= node->values.bits[n];
430 return all_bits == -1;
434 * Check if all bits in the bitset are off
437 acl_empty(struct rte_acl_node *node)
441 if (node->ref_count == 0) {
442 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
443 if (0 != node->values.bits[n])
453 * Compute intersection of A and B
454 * return 1 if there is an intersection else 0.
457 acl_intersect(struct rte_acl_bitset *a_bits,
458 struct rte_acl_bitset *b_bits,
459 struct rte_acl_bitset *intersect)
464 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
465 intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
466 all_bits |= intersect->bits[n];
468 return all_bits != 0;
474 static struct rte_acl_node *
475 acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node)
478 struct rte_acl_node *next;
480 next = acl_alloc_node(context, node->level);
484 /* allocate the pointers */
485 if (node->num_ptrs > 0) {
486 next->ptrs = acl_build_alloc(context,
488 sizeof(struct rte_acl_ptr_set));
489 if (next->ptrs == NULL)
491 next->max_ptrs = node->max_ptrs;
494 /* copy over the pointers */
495 for (n = 0; n < node->num_ptrs; n++) {
496 if (node->ptrs[n].ptr != NULL) {
497 next->ptrs[n].ptr = node->ptrs[n].ptr;
498 next->ptrs[n].ptr->ref_count++;
499 acl_include(&next->ptrs[n].values,
500 &node->ptrs[n].values, -1);
504 next->num_ptrs = node->num_ptrs;
506 /* copy over node's match results */
507 if (node->match_flag == 0)
508 next->match_flag = 0;
510 next->match_flag = -1;
511 next->mrt = acl_build_alloc(context, 1, sizeof(*next->mrt));
512 memcpy(next->mrt, node->mrt, sizeof(*next->mrt));
515 /* copy over node's bitset */
516 acl_include(&next->values, &node->values, -1);
525 * Dereference a pointer from a node
528 acl_deref_ptr(struct acl_build_context *context,
529 struct rte_acl_node *node, int index)
531 struct rte_acl_node *ref_node;
533 /* De-reference the node at the specified pointer */
534 if (node != NULL && node->ptrs[index].ptr != NULL) {
535 ref_node = node->ptrs[index].ptr;
536 ref_node->ref_count--;
537 if (ref_node->ref_count == 0)
538 acl_free_node(context, ref_node);
543 * Exclude bitset from a node pointer
544 * returns 0 if poiter was deref'd
548 acl_exclude_ptr(struct acl_build_context *context,
549 struct rte_acl_node *node,
551 struct rte_acl_bitset *b_bits)
556 * remove bitset from node pointer and deref
557 * if the bitset becomes empty.
559 if (!acl_exclude(&node->ptrs[index].values,
560 &node->ptrs[index].values,
562 acl_deref_ptr(context, node, index);
563 node->ptrs[index].ptr = NULL;
567 /* exclude bits from the composite bits for the node */
568 acl_exclude(&node->values, &node->values, b_bits);
573 * Remove a bitset from src ptr and move remaining ptr to dst
576 acl_move_ptr(struct acl_build_context *context,
577 struct rte_acl_node *dst,
578 struct rte_acl_node *src,
580 struct rte_acl_bitset *b_bits)
585 if (!acl_exclude_ptr(context, src, index, b_bits))
588 /* add src pointer to dst node */
589 rc = acl_add_ptr(context, dst, src->ptrs[index].ptr,
590 &src->ptrs[index].values);
594 /* remove ptr from src */
595 acl_exclude_ptr(context, src, index, &src->ptrs[index].values);
600 * acl_exclude rte_acl_bitset from src and copy remaining pointer to dst
603 acl_copy_ptr(struct acl_build_context *context,
604 struct rte_acl_node *dst,
605 struct rte_acl_node *src,
607 struct rte_acl_bitset *b_bits)
610 struct rte_acl_bitset bits;
613 if (!acl_exclude(&bits, &src->ptrs[index].values, b_bits))
616 rc = acl_add_ptr(context, dst, src->ptrs[index].ptr, &bits);
623 * Fill in gaps in ptrs list with the ptr at the end of the list
626 acl_compact_node_ptrs(struct rte_acl_node *node_a)
629 int min_add = node_a->min_add;
631 while (node_a->num_ptrs > 0 &&
632 node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
635 for (n = min_add; n + 1 < node_a->num_ptrs; n++) {
637 /* if this entry is empty */
638 if (node_a->ptrs[n].ptr == NULL) {
640 /* move the last pointer to this entry */
641 acl_include(&node_a->ptrs[n].values,
642 &node_a->ptrs[node_a->num_ptrs - 1].values,
644 node_a->ptrs[n].ptr =
645 node_a->ptrs[node_a->num_ptrs - 1].ptr;
648 * mark the end as empty and adjust the number
649 * of used pointer enum_tries
651 node_a->ptrs[node_a->num_ptrs - 1].ptr = NULL;
652 while (node_a->num_ptrs > 0 &&
653 node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
660 * acl_merge helper routine.
663 acl_merge_intersect(struct acl_build_context *context,
664 struct rte_acl_node *node_a, uint32_t idx_a,
665 struct rte_acl_node *node_b, uint32_t idx_b,
666 int next_move, int level,
667 struct rte_acl_bitset *intersect_ptr)
669 struct rte_acl_node *node_c;
671 /* Duplicate A for intersection */
672 node_c = acl_dup_node(context, node_a->ptrs[idx_a].ptr);
676 /* Remove intersection from A */
677 acl_exclude_ptr(context, node_a, idx_a, intersect_ptr);
680 * Added link from A to C for all transitions
681 * in the intersection
683 if (acl_add_ptr(context, node_a, node_c, intersect_ptr) < 0)
686 /* merge B->node into C */
687 return acl_merge(context, node_c, node_b->ptrs[idx_b].ptr, next_move,
693 * Merge the children of nodes A and B together.
697 * node A result = highest priority result
698 * if any pointers in A intersect with any in B
699 * For each intersection
700 * C = copy of node that A points to
701 * remove intersection from A pointer
702 * add a pointer to A that points to C for the intersection
703 * Merge C and node that B points to
704 * Compact the pointers in A and B
706 * If B has only one reference
707 * Move B pointers to A
709 * Copy B pointers to A
712 acl_merge(struct acl_build_context *context,
713 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
714 int move, int a_subset, int level)
716 uint32_t n, m, ptrs_a, ptrs_b;
717 uint32_t min_add_a, min_add_b;
719 int node_intersect_type;
720 int b_full, next_move, rc;
721 struct rte_acl_bitset intersect_values;
722 struct rte_acl_bitset intersect_ptr;
727 node_intersect_type = 0;
733 * Resolve match priorities
735 if (node_a->match_flag != 0 || node_b->match_flag != 0) {
737 if (node_a->match_flag == 0 || node_b->match_flag == 0)
738 RTE_LOG(ERR, ACL, "Not both matches\n");
740 if (node_b->match_flag < node_a->match_flag)
741 RTE_LOG(ERR, ACL, "Not same match\n");
743 for (n = 0; n < context->cfg.num_categories; n++) {
744 if (node_a->mrt->priority[n] <
745 node_b->mrt->priority[n]) {
746 node_a->mrt->priority[n] =
747 node_b->mrt->priority[n];
748 node_a->mrt->results[n] =
749 node_b->mrt->results[n];
755 * If the two node transitions intersect then merge the transitions.
756 * Check intersection for entire node (all pointers)
758 node_intersect_type = acl_intersect_type(&node_a->values,
762 if (node_intersect_type & ACL_INTERSECT) {
764 b_full = acl_full(node_b);
766 min_add_b = node_b->min_add;
767 node_b->min_add = node_b->num_ptrs;
768 ptrs_b = node_b->num_ptrs;
770 min_add_a = node_a->min_add;
771 node_a->min_add = node_a->num_ptrs;
772 ptrs_a = node_a->num_ptrs;
774 for (n = 0; n < ptrs_a; n++) {
775 for (m = 0; m < ptrs_b; m++) {
777 if (node_a->ptrs[n].ptr == NULL ||
778 node_b->ptrs[m].ptr == NULL ||
779 node_a->ptrs[n].ptr ==
783 intersect_type = acl_intersect_type(
784 &node_a->ptrs[n].values,
785 &node_b->ptrs[m].values,
788 /* If this node is not a 'match' node */
789 if ((intersect_type & ACL_INTERSECT) &&
790 (context->cfg.num_categories != 1 ||
791 !(node_a->ptrs[n].ptr->match_flag))) {
794 * next merge is a 'move' pointer,
795 * if this one is and B is a
796 * subset of the intersection.
800 ACL_INTERSECT_B) == 0;
802 if (a_subset && b_full) {
803 rc = acl_merge(context,
811 rc = acl_merge_intersect(
813 node_b, m, next_move,
814 level, &intersect_ptr);
823 /* Compact pointers */
824 node_a->min_add = min_add_a;
825 acl_compact_node_ptrs(node_a);
826 node_b->min_add = min_add_b;
827 acl_compact_node_ptrs(node_b);
830 * Either COPY or MOVE pointers from B to A
832 acl_intersect(&node_a->values, &node_b->values, &intersect_values);
834 if (move && node_b->ref_count == 1) {
835 for (m = 0; m < node_b->num_ptrs; m++) {
836 if (node_b->ptrs[m].ptr != NULL &&
837 acl_move_ptr(context, node_a, node_b, m,
838 &intersect_values) < 0)
842 for (m = 0; m < node_b->num_ptrs; m++) {
843 if (node_b->ptrs[m].ptr != NULL &&
844 acl_copy_ptr(context, node_a, node_b, m,
845 &intersect_values) < 0)
851 * Free node if its empty (no longer used)
853 if (acl_empty(node_b))
854 acl_free_node(context, node_b);
859 acl_resolve_leaf(struct acl_build_context *context,
860 struct rte_acl_node *node_a,
861 struct rte_acl_node *node_b,
862 struct rte_acl_node **node_c)
865 int combined_priority = ACL_PRIORITY_EQUAL;
867 for (n = 0; n < context->cfg.num_categories; n++) {
868 if (node_a->mrt->priority[n] != node_b->mrt->priority[n]) {
869 combined_priority |= (node_a->mrt->priority[n] >
870 node_b->mrt->priority[n]) ?
871 ACL_PRIORITY_NODE_A : ACL_PRIORITY_NODE_B;
876 * if node a is higher or equal priority for all categories,
877 * then return node_a.
879 if (combined_priority == ACL_PRIORITY_NODE_A ||
880 combined_priority == ACL_PRIORITY_EQUAL) {
886 * if node b is higher or equal priority for all categories,
887 * then return node_b.
889 if (combined_priority == ACL_PRIORITY_NODE_B) {
895 * mixed priorities - create a new node with the highest priority
899 /* force new duplication. */
902 *node_c = acl_dup_node(context, node_a);
903 for (n = 0; n < context->cfg.num_categories; n++) {
904 if ((*node_c)->mrt->priority[n] < node_b->mrt->priority[n]) {
905 (*node_c)->mrt->priority[n] = node_b->mrt->priority[n];
906 (*node_c)->mrt->results[n] = node_b->mrt->results[n];
913 * Within the existing trie structure, determine which nodes are
914 * part of the subtree of the trie to be merged.
916 * For these purposes, a subtree is defined as the set of nodes that
917 * are 1) not a superset of the intersection with the same level of
918 * the merging tree, and 2) do not have any references from a node
919 * outside of the subtree.
922 mark_subtree(struct rte_acl_node *node,
923 struct rte_acl_bitset *level_bits,
929 /* mark this node as part of the subtree */
930 node->subtree_id = id | RTE_ACL_SUBTREE_NODE;
932 for (n = 0; n < node->num_ptrs; n++) {
934 if (node->ptrs[n].ptr != NULL) {
936 struct rte_acl_bitset intersect_bits;
941 * check if this child pointer is not a superset of the
942 * same level of the merging tree.
944 intersect = acl_intersect_type(&node->ptrs[n].values,
948 if ((intersect & ACL_INTERSECT_A) == 0) {
950 struct rte_acl_node *child = node->ptrs[n].ptr;
953 * reset subtree reference if this is
954 * the first visit by this subtree.
956 if (child->subtree_id != id) {
957 child->subtree_id = id;
958 child->subtree_ref_count = 0;
963 * increment the subtree reference count and if
964 * all references are from this subtree then
965 * recurse to that child
967 child->subtree_ref_count++;
968 if (child->subtree_ref_count ==
970 mark_subtree(child, level_bits,
978 * Build the set of bits that define the set of transitions
979 * for each level of a trie.
982 build_subset_mask(struct rte_acl_node *node,
983 struct rte_acl_bitset *level_bits,
988 /* Add this node's transitions to the set for this level */
989 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
990 level_bits[level].bits[n] &= node->values.bits[n];
992 /* For each child, add the transitions for the next level */
993 for (n = 0; n < node->num_ptrs; n++)
994 if (node->ptrs[n].ptr != NULL)
995 build_subset_mask(node->ptrs[n].ptr, level_bits,
1001 * Merge nodes A and B together,
1002 * returns a node that is the path for the intersection
1004 * If match node (leaf on trie)
1006 * return node = highest priority result
1008 * Create C as a duplicate of A to point to child intersections
1009 * If any pointers in C intersect with any in B
1010 * For each intersection
1012 * remove intersection from C pointer
1013 * add a pointer from C to child intersection node
1014 * Compact the pointers in A and B
1015 * Copy any B pointers that are outside of the intersection to C
1016 * If C has no references to the B trie
1017 * free C and return A
1018 * Else If C has no references to the A trie
1019 * free C and return B
1024 acl_merge_trie(struct acl_build_context *context,
1025 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
1026 uint32_t level, uint32_t subtree_id, struct rte_acl_node **return_c)
1028 uint32_t n, m, ptrs_c, ptrs_b;
1029 uint32_t min_add_c, min_add_b;
1030 int node_intersect_type;
1031 struct rte_acl_bitset node_intersect;
1032 struct rte_acl_node *node_c;
1033 struct rte_acl_node *node_a_next;
1038 node_a_next = node_a->next;
1041 node_a_refs = node_a->num_ptrs;
1043 node_intersect_type = 0;
1045 /* Resolve leaf nodes (matches) */
1046 if (node_a->match_flag != 0) {
1047 acl_resolve_leaf(context, node_a, node_b, return_c);
1052 * Create node C as a copy of node A if node A is not part of
1053 * a subtree of the merging tree (node B side). Otherwise,
1057 node_a->subtree_id !=
1058 (subtree_id | RTE_ACL_SUBTREE_NODE)) {
1059 node_c = acl_dup_node(context, node_a);
1060 node_c->subtree_id = subtree_id | RTE_ACL_SUBTREE_NODE;
1064 * If the two node transitions intersect then merge the transitions.
1065 * Check intersection for entire node (all pointers)
1067 node_intersect_type = acl_intersect_type(&node_c->values,
1071 if (node_intersect_type & ACL_INTERSECT) {
1073 min_add_b = node_b->min_add;
1074 node_b->min_add = node_b->num_ptrs;
1075 ptrs_b = node_b->num_ptrs;
1077 min_add_c = node_c->min_add;
1078 node_c->min_add = node_c->num_ptrs;
1079 ptrs_c = node_c->num_ptrs;
1081 for (n = 0; n < ptrs_c; n++) {
1082 if (node_c->ptrs[n].ptr == NULL) {
1086 node_c->ptrs[n].ptr->next = NULL;
1087 for (m = 0; m < ptrs_b; m++) {
1089 struct rte_acl_bitset child_intersect;
1090 int child_intersect_type;
1091 struct rte_acl_node *child_node_c = NULL;
1093 if (node_b->ptrs[m].ptr == NULL ||
1094 node_c->ptrs[n].ptr ==
1095 node_b->ptrs[m].ptr)
1098 child_intersect_type = acl_intersect_type(
1099 &node_c->ptrs[n].values,
1100 &node_b->ptrs[m].values,
1103 if ((child_intersect_type & ACL_INTERSECT) !=
1105 if (acl_merge_trie(context,
1106 node_c->ptrs[n].ptr,
1107 node_b->ptrs[m].ptr,
1108 level + 1, subtree_id,
1112 if (child_node_c != NULL &&
1114 node_c->ptrs[n].ptr) {
1119 * Added link from C to
1120 * child_C for all transitions
1121 * in the intersection.
1123 acl_add_ptr(context, node_c,
1128 * inc refs if pointer is not
1131 node_a_refs += (child_node_c !=
1132 node_b->ptrs[m].ptr);
1135 * Remove intersection from C
1139 &node_c->ptrs[n].values,
1140 &node_c->ptrs[n].values,
1141 &child_intersect)) {
1142 acl_deref_ptr(context,
1144 node_c->ptrs[n].ptr =
1153 /* Compact pointers */
1154 node_c->min_add = min_add_c;
1155 acl_compact_node_ptrs(node_c);
1156 node_b->min_add = min_add_b;
1157 acl_compact_node_ptrs(node_b);
1161 * Copy pointers outside of the intersection from B to C
1163 if ((node_intersect_type & ACL_INTERSECT_B) != 0) {
1165 for (m = 0; m < node_b->num_ptrs; m++)
1166 if (node_b->ptrs[m].ptr != NULL)
1167 acl_copy_ptr(context, node_c,
1168 node_b, m, &node_intersect);
1172 * Free node C if top of trie is contained in A or B
1173 * if node C is a duplicate of node A &&
1174 * node C was not an existing duplicate
1176 if (node_c != node_a && node_c != node_a_next) {
1179 * if the intersection has no references to the
1180 * B side, then it is contained in A
1182 if (node_b_refs == 0) {
1183 acl_free_node(context, node_c);
1187 * if the intersection has no references to the
1188 * A side, then it is contained in B.
1190 if (node_a_refs == 0) {
1191 acl_free_node(context, node_c);
1197 if (return_c != NULL)
1201 acl_free_node(context, node_b);
1207 * Reset current runtime fields before next build:
1208 * - free allocated RT memory.
1209 * - reset all RT related fields to zero.
1212 acl_build_reset(struct rte_acl_ctx *ctx)
1215 memset(&ctx->num_categories, 0,
1216 sizeof(*ctx) - offsetof(struct rte_acl_ctx, num_categories));
1220 acl_gen_range(struct acl_build_context *context,
1221 const uint8_t *hi, const uint8_t *lo, int size, int level,
1222 struct rte_acl_node *root, struct rte_acl_node *end)
1224 struct rte_acl_node *node, *prev;
1228 for (n = size - 1; n > 0; n--) {
1229 node = acl_alloc_node(context, level++);
1230 acl_add_ptr_range(context, prev, node, lo[n], hi[n]);
1233 acl_add_ptr_range(context, prev, end, lo[0], hi[0]);
1236 static struct rte_acl_node *
1237 acl_gen_range_trie(struct acl_build_context *context,
1238 const void *min, const void *max,
1239 int size, int level, struct rte_acl_node **pend)
1242 struct rte_acl_node *root;
1243 const uint8_t *lo = (const uint8_t *)min;
1244 const uint8_t *hi = (const uint8_t *)max;
1246 *pend = acl_alloc_node(context, level+size);
1247 root = acl_alloc_node(context, level++);
1249 if (lo[size - 1] == hi[size - 1]) {
1250 acl_gen_range(context, hi, lo, size, level, root, *pend);
1252 uint8_t limit_lo[64];
1253 uint8_t limit_hi[64];
1254 uint8_t hi_ff = UINT8_MAX;
1257 memset(limit_lo, 0, RTE_DIM(limit_lo));
1258 memset(limit_hi, UINT8_MAX, RTE_DIM(limit_hi));
1260 for (n = size - 2; n >= 0; n--) {
1261 hi_ff = (uint8_t)(hi_ff & hi[n]);
1262 lo_00 = (uint8_t)(lo_00 | lo[n]);
1265 if (hi_ff != UINT8_MAX) {
1266 limit_lo[size - 1] = hi[size - 1];
1267 acl_gen_range(context, hi, limit_lo, size, level,
1272 limit_hi[size - 1] = lo[size - 1];
1273 acl_gen_range(context, limit_hi, lo, size, level,
1277 if (hi[size - 1] - lo[size - 1] > 1 ||
1279 hi_ff == UINT8_MAX) {
1280 limit_lo[size-1] = (uint8_t)(lo[size-1] + (lo_00 != 0));
1281 limit_hi[size-1] = (uint8_t)(hi[size-1] -
1282 (hi_ff != UINT8_MAX));
1283 acl_gen_range(context, limit_hi, limit_lo, size,
1284 level, root, *pend);
1290 static struct rte_acl_node *
1291 acl_gen_mask_trie(struct acl_build_context *context,
1292 const void *value, const void *mask,
1293 int size, int level, struct rte_acl_node **pend)
1296 struct rte_acl_node *root;
1297 struct rte_acl_node *node, *prev;
1298 struct rte_acl_bitset bits;
1299 const uint8_t *val = (const uint8_t *)value;
1300 const uint8_t *msk = (const uint8_t *)mask;
1302 root = acl_alloc_node(context, level++);
1305 for (n = size - 1; n >= 0; n--) {
1306 node = acl_alloc_node(context, level++);
1307 acl_gen_mask(&bits, val[n] & msk[n], msk[n]);
1308 acl_add_ptr(context, prev, node, &bits);
1316 static struct rte_acl_node *
1317 build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
1318 struct rte_acl_build_rule **last, uint32_t *count)
1321 int field_index, node_count;
1322 struct rte_acl_node *trie;
1323 struct rte_acl_build_rule *prev, *rule;
1324 struct rte_acl_node *end, *merge, *root, *end_prev;
1325 const struct rte_acl_field *fld;
1326 struct rte_acl_bitset level_bits[RTE_ACL_MAX_LEVELS];
1331 trie = acl_alloc_node(context, 0);
1335 while (rule != NULL) {
1337 root = acl_alloc_node(context, 0);
1341 root->ref_count = 1;
1344 for (n = 0; n < rule->config->num_fields; n++) {
1346 field_index = rule->config->defs[n].field_index;
1347 fld = rule->f->field + field_index;
1350 /* build a mini-trie for this field */
1351 switch (rule->config->defs[n].type) {
1353 case RTE_ACL_FIELD_TYPE_BITMASK:
1354 merge = acl_gen_mask_trie(context,
1357 rule->config->defs[n].size,
1362 case RTE_ACL_FIELD_TYPE_MASK:
1365 * set msb for the size of the field and
1370 if (fld->mask_range.u32 == 0) {
1374 * arithmetic right shift for the length of
1375 * the mask less the msb.
1379 (rule->config->defs[n].size *
1380 CHAR_BIT - fld->mask_range.u32);
1383 /* gen a mini-trie for this field */
1384 merge = acl_gen_mask_trie(context,
1387 rule->config->defs[n].size,
1393 case RTE_ACL_FIELD_TYPE_RANGE:
1394 merge = acl_gen_range_trie(context,
1395 &rule->f->field[field_index].value,
1396 &rule->f->field[field_index].mask_range,
1397 rule->config->defs[n].size,
1404 "Error in rule[%u] type - %hhu\n",
1405 rule->f->data.userdata,
1406 rule->config->defs[n].type);
1410 /* merge this field on to the end of the rule */
1411 if (acl_merge_trie(context, end_prev, merge, 0,
1417 end->match_flag = ++context->num_build_rules;
1420 * Setup the results for this rule.
1421 * The result and priority of each category.
1423 if (end->mrt == NULL &&
1424 (end->mrt = acl_build_alloc(context, 1,
1425 sizeof(*end->mrt))) == NULL)
1428 for (m = 0; m < context->cfg.num_categories; m++) {
1429 if (rule->f->data.category_mask & (1 << m)) {
1430 end->mrt->results[m] = rule->f->data.userdata;
1431 end->mrt->priority[m] = rule->f->data.priority;
1433 end->mrt->results[m] = 0;
1434 end->mrt->priority[m] = 0;
1438 node_count = context->num_nodes;
1440 memset(&level_bits[0], UINT8_MAX, sizeof(level_bits));
1441 build_subset_mask(root, &level_bits[0], 0);
1442 mark_subtree(trie, &level_bits[0], 0, end->match_flag);
1445 /* merge this rule into the trie */
1446 if (acl_merge_trie(context, trie, root, 0, end->match_flag,
1450 node_count = context->num_nodes - node_count;
1451 if (node_count > NODE_MAX) {
1465 acl_calc_wildness(struct rte_acl_build_rule *head,
1466 const struct rte_acl_config *config)
1469 struct rte_acl_build_rule *rule;
1471 for (rule = head; rule != NULL; rule = rule->next) {
1473 for (n = 0; n < config->num_fields; n++) {
1476 double size = CHAR_BIT * config->defs[n].size;
1477 int field_index = config->defs[n].field_index;
1478 const struct rte_acl_field *fld = rule->f->field +
1481 switch (rule->config->defs[n].type) {
1482 case RTE_ACL_FIELD_TYPE_BITMASK:
1484 _mm_popcnt_u32(fld->mask_range.u8)) /
1488 case RTE_ACL_FIELD_TYPE_MASK:
1489 wild = (size - fld->mask_range.u32) / size;
1492 case RTE_ACL_FIELD_TYPE_RANGE:
1493 switch (rule->config->defs[n].size) {
1494 case sizeof(uint8_t):
1495 wild = ((double)fld->mask_range.u8 -
1496 fld->value.u8) / UINT8_MAX;
1498 case sizeof(uint16_t):
1499 wild = ((double)fld->mask_range.u16 -
1500 fld->value.u16) / UINT16_MAX;
1502 case sizeof(uint32_t):
1503 wild = ((double)fld->mask_range.u32 -
1504 fld->value.u32) / UINT32_MAX;
1506 case sizeof(uint64_t):
1507 wild = ((double)fld->mask_range.u64 -
1508 fld->value.u64) / UINT64_MAX;
1512 "%s(rule: %u) invalid %u-th "
1513 "field, type: %hhu, "
1514 "unknown size: %hhu\n",
1516 rule->f->data.userdata,
1518 rule->config->defs[n].type,
1519 rule->config->defs[n].size);
1526 "%s(rule: %u) invalid %u-th "
1527 "field, unknown type: %hhu\n",
1529 rule->f->data.userdata,
1531 rule->config->defs[n].type);
1536 rule->wildness[field_index] = (uint32_t)(wild * 100);
1544 acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config,
1545 uint32_t *wild_limit)
1548 struct rte_acl_build_rule *rule;
1549 uint32_t n, m, fields_deactivated = 0;
1550 uint32_t start = 0, deactivate = 0;
1551 int tally[RTE_ACL_MAX_LEVELS][TALLY_NUM];
1553 memset(tally, 0, sizeof(tally));
1555 for (rule = head; rule != NULL; rule = rule->next) {
1557 for (n = 0; n < config->num_fields; n++) {
1558 uint32_t field_index = config->defs[n].field_index;
1560 tally[n][TALLY_0]++;
1561 for (m = 1; m < RTE_DIM(wild_limits); m++) {
1562 if (rule->wildness[field_index] >=
1568 for (n = config->num_fields - 1; n > 0; n--) {
1569 uint32_t field_index = config->defs[n].field_index;
1571 if (rule->wildness[field_index] == 100)
1572 tally[n][TALLY_DEPTH]++;
1579 * Look for any field that is always wild and drop it from the config
1580 * Only deactivate if all fields for a given input loop are deactivated.
1582 for (n = 1; n < config->num_fields; n++) {
1583 if (config->defs[n].input_index !=
1584 config->defs[n - 1].input_index) {
1585 for (m = start; m < n; m++)
1586 tally[m][TALLY_DEACTIVATED] = deactivate;
1587 fields_deactivated += deactivate;
1592 /* if the field is not always completely wild */
1593 if (tally[n][TALLY_100] != tally[n][TALLY_0])
1597 for (m = start; m < n; m++)
1598 tally[m][TALLY_DEACTIVATED] = deactivate;
1600 fields_deactivated += deactivate;
1602 /* remove deactivated fields */
1603 if (fields_deactivated) {
1606 for (k = 0; k < config->num_fields; k++) {
1607 if (tally[k][TALLY_DEACTIVATED] == 0) {
1608 memcpy(&tally[l][0], &tally[k][0],
1609 TALLY_NUM * sizeof(tally[0][0]));
1610 memcpy(&config->defs[l++],
1612 sizeof(struct rte_acl_field_def));
1615 config->num_fields = l;
1618 min = RTE_ACL_SINGLE_TRIE_SIZE;
1619 if (config->num_fields == 2)
1621 else if (config->num_fields == 3)
1623 else if (config->num_fields == 4)
1626 if (tally[0][TALLY_0] < min)
1628 for (n = 0; n < config->num_fields; n++)
1632 * If trailing fields are 100% wild, group those together.
1633 * This allows the search length of the trie to be shortened.
1635 for (n = 1; n < config->num_fields; n++) {
1637 double rule_percentage = (double)tally[n][TALLY_DEPTH] /
1640 if (rule_percentage > RULE_PERCENTAGE) {
1641 /* if it crosses an input boundary then round up */
1642 while (config->defs[n - 1].input_index ==
1643 config->defs[n].input_index)
1646 /* set the limit for selecting rules */
1647 while (n < config->num_fields)
1648 wild_limit[n++] = 100;
1650 if (wild_limit[n - 1] == 100)
1655 /* look for the most wild that's 40% or more of the rules */
1656 for (n = 1; n < config->num_fields; n++) {
1657 for (m = TALLY_100; m > 0; m--) {
1659 double rule_percentage = (double)tally[n][m] /
1662 if (tally[n][TALLY_DEACTIVATED] == 0 &&
1664 RTE_ACL_SINGLE_TRIE_SIZE &&
1665 rule_percentage > NODE_PERCENTAGE &&
1666 rule_percentage < 0.80) {
1667 wild_limit[n] = wild_limits[m];
1676 order(struct rte_acl_build_rule **insert, struct rte_acl_build_rule *rule)
1679 struct rte_acl_build_rule *left = *insert;
1684 for (n = 1; n < left->config->num_fields; n++) {
1685 int field_index = left->config->defs[n].field_index;
1687 if (left->wildness[field_index] != rule->wildness[field_index])
1688 return (left->wildness[field_index] >=
1689 rule->wildness[field_index]);
1694 static struct rte_acl_build_rule *
1695 ordered_insert_rule(struct rte_acl_build_rule *head,
1696 struct rte_acl_build_rule *rule)
1698 struct rte_acl_build_rule **insert;
1708 while (order(insert, rule))
1709 insert = &(*insert)->next;
1711 rule->next = *insert;
1716 static struct rte_acl_build_rule *
1717 sort_rules(struct rte_acl_build_rule *head)
1719 struct rte_acl_build_rule *rule, *reordered_head = NULL;
1720 struct rte_acl_build_rule *last_rule = NULL;
1722 for (rule = head; rule != NULL; rule = rule->next) {
1723 reordered_head = ordered_insert_rule(reordered_head, last_rule);
1727 if (last_rule != reordered_head)
1728 reordered_head = ordered_insert_rule(reordered_head, last_rule);
1730 return reordered_head;
1734 acl_build_index(const struct rte_acl_config *config, uint32_t *data_index)
1737 int32_t last_header;
1742 for (n = 0; n < config->num_fields; n++) {
1743 if (last_header != config->defs[n].input_index) {
1744 last_header = config->defs[n].input_index;
1745 data_index[m++] = config->defs[n].offset;
1753 acl_build_tries(struct acl_build_context *context,
1754 struct rte_acl_build_rule *head)
1757 uint32_t n, m, num_tries;
1758 struct rte_acl_config *config;
1759 struct rte_acl_build_rule *last, *rule;
1760 uint32_t wild_limit[RTE_ACL_MAX_LEVELS];
1761 struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES];
1763 config = head->config;
1765 rule_sets[0] = head;
1768 /* initialize tries */
1769 for (n = 0; n < RTE_DIM(context->tries); n++) {
1770 context->tries[n].type = RTE_ACL_UNUSED_TRIE;
1771 context->bld_tries[n].trie = NULL;
1772 context->tries[n].count = 0;
1773 context->tries[n].smallest = INT32_MAX;
1776 context->tries[0].type = RTE_ACL_FULL_TRIE;
1778 /* calc wildness of each field of each rule */
1779 rc = acl_calc_wildness(head, config);
1783 n = acl_rule_stats(head, config, &wild_limit[0]);
1785 /* put all rules that fit the wildness criteria into a seperate trie */
1786 while (n > 0 && num_tries < RTE_ACL_MAX_TRIES) {
1788 struct rte_acl_config *new_config;
1789 struct rte_acl_build_rule **prev = &rule_sets[num_tries - 1];
1790 struct rte_acl_build_rule *next = head->next;
1792 new_config = acl_build_alloc(context, 1, sizeof(*new_config));
1793 if (new_config == NULL) {
1795 "Failed to geti space for new config\n");
1799 memcpy(new_config, config, sizeof(*new_config));
1800 config = new_config;
1801 rule_sets[num_tries] = NULL;
1803 for (rule = head; rule != NULL; rule = next) {
1808 for (m = 0; m < config->num_fields; m++) {
1809 int x = config->defs[m].field_index;
1810 if (rule->wildness[x] < wild_limit[m]) {
1817 rule->config = new_config;
1818 rule->next = rule_sets[num_tries];
1819 rule_sets[num_tries] = rule;
1825 head = rule_sets[num_tries];
1826 n = acl_rule_stats(rule_sets[num_tries], config,
1833 "Number of tries(%d) exceeded.\n", RTE_ACL_MAX_TRIES);
1835 for (n = 0; n < num_tries; n++) {
1837 rule_sets[n] = sort_rules(rule_sets[n]);
1838 context->tries[n].type = RTE_ACL_FULL_TRIE;
1839 context->tries[n].count = 0;
1840 context->tries[n].num_data_indexes =
1841 acl_build_index(rule_sets[n]->config,
1842 context->data_indexes[n]);
1843 context->tries[n].data_index = context->data_indexes[n];
1845 context->bld_tries[n].trie =
1846 build_trie(context, rule_sets[n],
1847 &last, &context->tries[n].count);
1848 if (context->bld_tries[n].trie == NULL) {
1849 RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
1854 rule_sets[num_tries++] = last->next;
1856 acl_free_node(context, context->bld_tries[n].trie);
1857 context->tries[n].count = 0;
1859 context->bld_tries[n].trie =
1860 build_trie(context, rule_sets[n],
1861 &last, &context->tries[n].count);
1862 if (context->bld_tries[n].trie == NULL) {
1864 "Build of %u-th trie failed\n", n);
1870 context->num_tries = num_tries;
1875 acl_build_log(const struct acl_build_context *ctx)
1879 RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n"
1880 "memory consumed: %zu\n",
1884 for (n = 0; n < RTE_DIM(ctx->tries); n++) {
1885 if (ctx->tries[n].count != 0)
1887 "trie %u: number of rules: %u\n",
1888 n, ctx->tries[n].count);
1893 acl_build_rules(struct acl_build_context *bcx)
1895 struct rte_acl_build_rule *br, *head;
1896 const struct rte_acl_rule *rule;
1898 uint32_t fn, i, n, num;
1901 fn = bcx->cfg.num_fields;
1902 n = bcx->acx->num_rules;
1903 ofs = n * sizeof(*br);
1904 sz = ofs + n * fn * sizeof(*wp);
1906 br = tb_alloc(&bcx->pool, sz);
1908 RTE_LOG(ERR, ACL, "ACL conext %s: failed to create a copy "
1909 "of %u build rules (%zu bytes)\n",
1910 bcx->acx->name, n, sz);
1914 wp = (uint32_t *)((uintptr_t)br + ofs);
1918 for (i = 0; i != n; i++) {
1919 rule = (const struct rte_acl_rule *)
1920 ((uintptr_t)bcx->acx->rules + bcx->acx->rule_sz * i);
1921 if ((rule->data.category_mask & bcx->category_mask) != 0) {
1922 br[num].next = head;
1923 br[num].config = &bcx->cfg;
1925 br[num].wildness = wp;
1932 bcx->num_rules = num;
1933 bcx->build_rules = head;
1939 * Copy data_indexes for each trie into RT location.
1942 acl_set_data_indexes(struct rte_acl_ctx *ctx)
1947 for (i = 0; i != ctx->num_tries; i++) {
1948 n = ctx->trie[i].num_data_indexes;
1949 memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
1950 n * sizeof(ctx->data_indexes[0]));
1951 ctx->trie[i].data_index = ctx->data_indexes + ofs;
1958 rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
1961 struct acl_build_context bcx;
1963 if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 ||
1964 cfg->num_categories > RTE_ACL_MAX_CATEGORIES)
1967 acl_build_reset(ctx);
1969 memset(&bcx, 0, sizeof(bcx));
1971 bcx.pool.alignment = ACL_POOL_ALIGN;
1972 bcx.pool.min_alloc = ACL_POOL_ALLOC_MIN;
1974 bcx.category_mask = LEN2MASK(bcx.cfg.num_categories);
1977 /* Create a buid rules copy. */
1978 rc = acl_build_rules(&bcx);
1982 /* No rules to build for that context+config */
1983 if (bcx.build_rules == NULL) {
1986 /* build internal trie representation. */
1987 } else if ((rc = acl_build_tries(&bcx, bcx.build_rules)) == 0) {
1989 /* allocate and fill run-time structures. */
1990 rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
1991 bcx.num_tries, bcx.cfg.num_categories,
1992 RTE_ACL_IPV4VLAN_NUM * RTE_DIM(bcx.tries),
1993 bcx.num_build_rules);
1996 /* set data indexes. */
1997 acl_set_data_indexes(ctx);
1999 /* copy in build config. */
2004 acl_build_log(&bcx);
2006 /* cleanup after build. */
2007 tb_free_pool(&bcx.pool);