4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #define QRANGE_MIN ((uint8_t)INT8_MIN)
39 #define RTE_ACL_VERIFY(exp) do { \
41 rte_panic("line %d\tassert \"" #exp "\" failed\n", __LINE__); \
44 struct acl_node_counters {
54 struct rte_acl_indices {
63 acl_gen_log_stats(const struct rte_acl_ctx *ctx,
64 const struct acl_node_counters *counts,
65 const struct rte_acl_indices *indices,
68 RTE_LOG(DEBUG, ACL, "Gen phase for ACL \"%s\":\n"
69 "runtime memory footprint on socket %d:\n"
70 "single nodes/bytes used: %d/%zu\n"
71 "quad nodes/vectors/bytes used: %d/%d/%zu\n"
72 "DFA nodes/group64/bytes used: %d/%d/%zu\n"
73 "match nodes/bytes used: %d/%zu\n"
75 "max limit: %zu bytes\n",
76 ctx->name, ctx->socket_id,
77 counts->single, counts->single * sizeof(uint64_t),
78 counts->quad, counts->quad_vectors,
79 (indices->quad_index - indices->dfa_index) * sizeof(uint64_t),
80 counts->dfa, counts->dfa_gr64,
81 indices->dfa_index * sizeof(uint64_t),
83 counts->match * sizeof(struct rte_acl_match_results),
89 acl_dfa_gen_idx(const struct rte_acl_node *node, uint32_t index)
95 for (i = 0; i != RTE_DIM(node->dfa_gr64); i++) {
96 RTE_ACL_VERIFY(node->dfa_gr64[i] < RTE_ACL_DFA_GR64_NUM);
97 RTE_ACL_VERIFY(node->dfa_gr64[i] < node->fanout);
98 idx |= (i - node->dfa_gr64[i]) <<
99 (6 + RTE_ACL_DFA_GR64_BIT * i);
102 return idx << (CHAR_BIT * sizeof(index)) | index | node->node_type;
106 acl_dfa_fill_gr64(const struct rte_acl_node *node,
107 const uint64_t src[RTE_ACL_DFA_SIZE], uint64_t dst[RTE_ACL_DFA_SIZE])
111 for (i = 0; i != RTE_DIM(node->dfa_gr64); i++) {
112 memcpy(dst + node->dfa_gr64[i] * RTE_ACL_DFA_GR64_SIZE,
113 src + i * RTE_ACL_DFA_GR64_SIZE,
114 RTE_ACL_DFA_GR64_SIZE * sizeof(dst[0]));
119 acl_dfa_count_gr64(const uint64_t array_ptr[RTE_ACL_DFA_SIZE],
120 uint8_t gr64[RTE_ACL_DFA_GR64_NUM])
125 for (i = 0; i != RTE_ACL_DFA_GR64_NUM; i++) {
127 for (j = 0; j != i; j++) {
128 if (memcmp(array_ptr + i * RTE_ACL_DFA_GR64_SIZE,
129 array_ptr + j * RTE_ACL_DFA_GR64_SIZE,
130 RTE_ACL_DFA_GR64_SIZE *
131 sizeof(array_ptr[0])) == 0)
134 gr64[i] = (j != i) ? gr64[j] : k++;
141 acl_node_fill_dfa(const struct rte_acl_node *node,
142 uint64_t dfa[RTE_ACL_DFA_SIZE], uint64_t no_match, int32_t resolved)
145 uint32_t ranges, last_bit;
146 struct rte_acl_node *child;
147 struct rte_acl_bitset *bits;
152 for (n = 0; n < RTE_ACL_DFA_SIZE; n++)
155 for (x = 0; x < node->num_ptrs; x++) {
157 child = node->ptrs[x].ptr;
161 bits = &node->ptrs[x].values;
162 for (n = 0; n < RTE_ACL_DFA_SIZE; n++) {
164 if (bits->bits[n / (sizeof(bits_t) * CHAR_BIT)] &
165 (1 << (n % (sizeof(bits_t) * CHAR_BIT)))) {
167 dfa[n] = resolved ? child->node_index : x;
168 ranges += (last_bit == 0);
180 * Counts the number of groups of sequential bits that are
181 * either 0 or 1, as specified by the zero_one parameter. This is used to
182 * calculate the number of ranges in a node to see if it fits in a quad range
186 acl_count_sequential_groups(struct rte_acl_bitset *bits, int zero_one)
188 int n, ranges, last_bit;
191 last_bit = zero_one ^ 1;
193 for (n = QRANGE_MIN; n < UINT8_MAX + 1; n++) {
194 if (bits->bits[n / (sizeof(bits_t) * 8)] &
195 (1 << (n % (sizeof(bits_t) * 8)))) {
196 if (zero_one == 1 && last_bit != 1)
200 if (zero_one == 0 && last_bit != 0)
205 for (n = 0; n < QRANGE_MIN; n++) {
206 if (bits->bits[n / (sizeof(bits_t) * 8)] &
207 (1 << (n % (sizeof(bits_t) * 8)))) {
208 if (zero_one == 1 && last_bit != 1)
212 if (zero_one == 0 && last_bit != 0)
222 * Count number of ranges spanned by the node's pointers
225 acl_count_fanout(struct rte_acl_node *node)
230 if (node->fanout != 0)
233 ranges = acl_count_sequential_groups(&node->values, 0);
235 for (n = 0; n < node->num_ptrs; n++) {
236 if (node->ptrs[n].ptr != NULL)
237 ranges += acl_count_sequential_groups(
238 &node->ptrs[n].values, 1);
241 node->fanout = ranges;
246 * Determine the type of nodes and count each type
249 acl_count_trie_types(struct acl_node_counters *counts,
250 struct rte_acl_node *node, uint64_t no_match, int force_dfa)
254 uint64_t dfa[RTE_ACL_DFA_SIZE];
256 /* skip if this node has been counted */
257 if (node->node_type != (uint32_t)RTE_ACL_NODE_UNDEFINED)
260 if (node->match_flag != 0 || node->num_ptrs == 0) {
262 node->node_type = RTE_ACL_NODE_MATCH;
266 num_ptrs = acl_count_fanout(node);
268 /* Force type to dfa */
270 num_ptrs = RTE_ACL_DFA_SIZE;
272 /* determine node type based on number of ranges */
275 node->node_type = RTE_ACL_NODE_SINGLE;
276 } else if (num_ptrs <= RTE_ACL_QUAD_MAX) {
278 counts->quad_vectors += node->fanout;
279 node->node_type = RTE_ACL_NODE_QRANGE;
282 node->node_type = RTE_ACL_NODE_DFA;
283 if (force_dfa != 0) {
284 /* always expand to a max number of nodes. */
285 for (n = 0; n != RTE_DIM(node->dfa_gr64); n++)
286 node->dfa_gr64[n] = n;
289 acl_node_fill_dfa(node, dfa, no_match, 0);
290 node->fanout = acl_dfa_count_gr64(dfa, node->dfa_gr64);
292 counts->dfa_gr64 += node->fanout;
296 * recursively count the types of all children
298 for (n = 0; n < node->num_ptrs; n++) {
299 if (node->ptrs[n].ptr != NULL)
300 acl_count_trie_types(counts, node->ptrs[n].ptr,
306 acl_add_ptrs(struct rte_acl_node *node, uint64_t *node_array, uint64_t no_match,
311 uint64_t *node_a, index, dfa[RTE_ACL_DFA_SIZE];
313 acl_node_fill_dfa(node, dfa, no_match, resolved);
316 * Rather than going from 0 to 256, the range count and
317 * the layout are from 80-ff then 0-7f due to signed compare
320 if (node->node_type == RTE_ACL_NODE_QRANGE) {
324 index = dfa[QRANGE_MIN];
327 for (x = QRANGE_MIN + 1; x < UINT8_MAX + 1; x++) {
328 if (dfa[x] != index) {
331 node->transitions[m++] = (uint8_t)(x - 1);
335 for (x = 0; x < INT8_MAX + 1; x++) {
336 if (dfa[x] != index) {
339 node->transitions[m++] = (uint8_t)(x - 1);
343 /* fill unused locations with max value - nothing is greater */
344 for (; m < RTE_ACL_QUAD_SIZE; m++)
345 node->transitions[m] = INT8_MAX;
347 RTE_ACL_VERIFY(m <= RTE_ACL_QUAD_SIZE);
349 } else if (node->node_type == RTE_ACL_NODE_DFA && resolved) {
350 acl_dfa_fill_gr64(node, dfa, node_array);
355 * Routine that allocates space for this node and recursively calls
356 * to allocate space for each child. Once all the children are allocated,
357 * then resolve all transitions for this node.
360 acl_gen_node(struct rte_acl_node *node, uint64_t *node_array,
361 uint64_t no_match, struct rte_acl_indices *index, int num_categories)
363 uint32_t n, sz, *qtrp;
365 struct rte_acl_match_results *match;
367 if (node->node_index != RTE_ACL_NODE_UNDEFINED)
372 switch (node->node_type) {
373 case RTE_ACL_NODE_DFA:
374 array_ptr = &node_array[index->dfa_index];
375 node->node_index = acl_dfa_gen_idx(node, index->dfa_index);
376 sz = node->fanout * RTE_ACL_DFA_GR64_SIZE;
377 index->dfa_index += sz;
378 for (n = 0; n < sz; n++)
379 array_ptr[n] = no_match;
381 case RTE_ACL_NODE_SINGLE:
382 node->node_index = RTE_ACL_QUAD_SINGLE | index->single_index |
384 array_ptr = &node_array[index->single_index];
385 index->single_index += 1;
386 array_ptr[0] = no_match;
388 case RTE_ACL_NODE_QRANGE:
389 array_ptr = &node_array[index->quad_index];
390 acl_add_ptrs(node, array_ptr, no_match, 0);
391 qtrp = (uint32_t *)node->transitions;
392 node->node_index = qtrp[0];
393 node->node_index <<= sizeof(index->quad_index) * CHAR_BIT;
394 node->node_index |= index->quad_index | node->node_type;
395 index->quad_index += node->fanout;
397 case RTE_ACL_NODE_MATCH:
398 match = ((struct rte_acl_match_results *)
399 (node_array + index->match_start));
400 for (n = 0; n != RTE_DIM(match->results); n++)
401 RTE_ACL_VERIFY(match->results[0] == 0);
402 memcpy(match + index->match_index, node->mrt,
404 node->node_index = index->match_index | node->node_type;
405 index->match_index += 1;
407 case RTE_ACL_NODE_UNDEFINED:
408 RTE_ACL_VERIFY(node->node_type !=
409 (uint32_t)RTE_ACL_NODE_UNDEFINED);
413 /* recursively allocate space for all children */
414 for (n = 0; n < node->num_ptrs; n++) {
415 if (node->ptrs[n].ptr != NULL)
416 acl_gen_node(node->ptrs[n].ptr,
423 /* All children are resolved, resolve this node's pointers */
424 switch (node->node_type) {
425 case RTE_ACL_NODE_DFA:
426 acl_add_ptrs(node, array_ptr, no_match, 1);
428 case RTE_ACL_NODE_SINGLE:
429 for (n = 0; n < node->num_ptrs; n++) {
430 if (node->ptrs[n].ptr != NULL)
431 array_ptr[0] = node->ptrs[n].ptr->node_index;
434 case RTE_ACL_NODE_QRANGE:
435 acl_add_ptrs(node, array_ptr, no_match, 1);
437 case RTE_ACL_NODE_MATCH:
439 case RTE_ACL_NODE_UNDEFINED:
440 RTE_ACL_VERIFY(node->node_type !=
441 (uint32_t)RTE_ACL_NODE_UNDEFINED);
447 acl_calc_counts_indices(struct acl_node_counters *counts,
448 struct rte_acl_indices *indices,
449 struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
454 memset(indices, 0, sizeof(*indices));
455 memset(counts, 0, sizeof(*counts));
457 /* Get stats on nodes */
458 for (n = 0; n < num_tries; n++) {
459 acl_count_trie_types(counts, node_bld_trie[n].trie,
463 indices->dfa_index = RTE_ACL_DFA_SIZE + 1;
464 indices->quad_index = indices->dfa_index +
465 counts->dfa_gr64 * RTE_ACL_DFA_GR64_SIZE;
466 indices->single_index = indices->quad_index + counts->quad_vectors;
467 indices->match_start = indices->single_index + counts->single + 1;
468 indices->match_start = RTE_ALIGN(indices->match_start,
469 (XMM_SIZE / sizeof(uint64_t)));
470 indices->match_index = 1;
474 * Generate the runtime structure using build structure
477 rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie,
478 struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries,
479 uint32_t num_categories, uint32_t data_index_sz, size_t max_size)
483 uint64_t *node_array, no_match;
484 uint32_t n, match_index;
485 struct rte_acl_match_results *match;
486 struct acl_node_counters counts;
487 struct rte_acl_indices indices;
489 no_match = RTE_ACL_NODE_MATCH;
491 /* Fill counts and indices arrays from the nodes. */
492 acl_calc_counts_indices(&counts, &indices,
493 node_bld_trie, num_tries, no_match);
495 /* Allocate runtime memory (align to cache boundary) */
496 total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) +
497 indices.match_start * sizeof(uint64_t) +
498 (counts.match + 1) * sizeof(struct rte_acl_match_results) +
501 if (total_size > max_size) {
503 "Gen phase for ACL ctx \"%s\" exceeds max_size limit, "
504 "bytes required: %zu, allowed: %zu\n",
505 ctx->name, total_size, max_size);
509 mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE,
513 "allocation of %zu bytes on socket %d for %s failed\n",
514 total_size, ctx->socket_id, ctx->name);
518 /* Fill the runtime structure */
519 match_index = indices.match_start;
520 node_array = (uint64_t *)((uintptr_t)mem +
521 RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE));
524 * Setup the NOMATCH node (a SINGLE at the
525 * highest index, that points to itself)
528 node_array[RTE_ACL_DFA_SIZE] = RTE_ACL_DFA_SIZE | RTE_ACL_NODE_SINGLE;
530 for (n = 0; n < RTE_ACL_DFA_SIZE; n++)
531 node_array[n] = no_match;
533 /* NOMATCH result at index 0 */
534 match = ((struct rte_acl_match_results *)(node_array + match_index));
535 memset(match, 0, sizeof(*match));
537 for (n = 0; n < num_tries; n++) {
539 acl_gen_node(node_bld_trie[n].trie, node_array, no_match,
540 &indices, num_categories);
542 if (node_bld_trie[n].trie->node_index == no_match)
543 trie[n].root_index = 0;
545 trie[n].root_index = node_bld_trie[n].trie->node_index;
549 ctx->mem_sz = total_size;
550 ctx->data_indexes = mem;
551 ctx->num_tries = num_tries;
552 ctx->num_categories = num_categories;
553 ctx->match_index = match_index;
554 ctx->no_match = no_match;
555 ctx->idle = node_array[RTE_ACL_DFA_SIZE];
556 ctx->trans_table = node_array;
557 memcpy(ctx->trie, trie, sizeof(ctx->trie));
559 acl_gen_log_stats(ctx, &counts, &indices, max_size);