X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_acl%2Facl_gen.c;h=f1b9d12f1eac970566b067b1faf2a831e281b8dd;hb=84c39beb2fdbef86b917a18ade8d2e2baa3de732;hp=c9b78394f83b5616f62c0af6cff611093b232561;hpb=ec51901a0b6632556b3c2d10998d72eaf1a54141;p=dpdk.git diff --git a/lib/librte_acl/acl_gen.c b/lib/librte_acl/acl_gen.c index c9b78394f8..f1b9d12f1e 100644 --- a/lib/librte_acl/acl_gen.c +++ b/lib/librte_acl/acl_gen.c @@ -1,38 +1,8 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include -#include "acl_vect.h" #include "acl.h" #define QRANGE_MIN ((uint8_t)INT8_MIN) @@ -50,20 +20,21 @@ struct acl_node_counters { int32_t quad_vectors; int32_t dfa; int32_t dfa_gr64; - int32_t smallest_match; }; struct rte_acl_indices { - int dfa_index; - int quad_index; - int single_index; - int match_index; + int32_t dfa_index; + int32_t quad_index; + int32_t single_index; + int32_t match_index; + int32_t match_start; }; static void acl_gen_log_stats(const struct rte_acl_ctx *ctx, const struct acl_node_counters *counts, - const struct rte_acl_indices *indices) + const struct rte_acl_indices *indices, + size_t max_size) { RTE_LOG(DEBUG, ACL, "Gen phase for ACL \"%s\":\n" "runtime memory footprint on socket %d:\n" @@ -71,7 +42,8 @@ acl_gen_log_stats(const struct rte_acl_ctx *ctx, "quad nodes/vectors/bytes used: %d/%d/%zu\n" "DFA nodes/group64/bytes used: %d/%d/%zu\n" "match nodes/bytes used: %d/%zu\n" - "total: %zu bytes\n", + "total: %zu bytes\n" + "max limit: %zu bytes\n", ctx->name, ctx->socket_id, counts->single, counts->single * sizeof(uint64_t), counts->quad, counts->quad_vectors, @@ -80,7 +52,8 @@ acl_gen_log_stats(const struct rte_acl_ctx *ctx, indices->dfa_index * sizeof(uint64_t), counts->match, counts->match * sizeof(struct rte_acl_match_results), - ctx->mem_sz); + ctx->mem_sz, + max_size); } static uint64_t @@ -160,7 +133,7 @@ acl_node_fill_dfa(const struct rte_acl_node *node, for (n = 0; n < RTE_ACL_DFA_SIZE; n++) { if (bits->bits[n / (sizeof(bits_t) * CHAR_BIT)] & - (1 << (n % (sizeof(bits_t) * CHAR_BIT)))) { + (1U << (n % (sizeof(bits_t) * CHAR_BIT)))) { dfa[n] = resolved ? child->node_index : x; ranges += (last_bit == 0); @@ -190,7 +163,7 @@ acl_count_sequential_groups(struct rte_acl_bitset *bits, int zero_one) for (n = QRANGE_MIN; n < UINT8_MAX + 1; n++) { if (bits->bits[n / (sizeof(bits_t) * 8)] & - (1 << (n % (sizeof(bits_t) * 8)))) { + (1U << (n % (sizeof(bits_t) * 8)))) { if (zero_one == 1 && last_bit != 1) ranges++; last_bit = 1; @@ -202,7 +175,7 @@ acl_count_sequential_groups(struct rte_acl_bitset *bits, int zero_one) } for (n = 0; n < QRANGE_MIN; n++) { if (bits->bits[n / (sizeof(bits_t) * 8)] & - (1 << (n % (sizeof(bits_t) * 8)))) { + (1U << (n % (sizeof(bits_t) * CHAR_BIT)))) { if (zero_one == 1 && last_bit != 1) ranges++; last_bit = 1; @@ -243,9 +216,9 @@ acl_count_fanout(struct rte_acl_node *node) /* * Determine the type of nodes and count each type */ -static int +static void acl_count_trie_types(struct acl_node_counters *counts, - struct rte_acl_node *node, uint64_t no_match, int match, int force_dfa) + struct rte_acl_node *node, uint64_t no_match, int force_dfa) { uint32_t n; int num_ptrs; @@ -253,16 +226,12 @@ acl_count_trie_types(struct acl_node_counters *counts, /* skip if this node has been counted */ if (node->node_type != (uint32_t)RTE_ACL_NODE_UNDEFINED) - return match; + return; if (node->match_flag != 0 || node->num_ptrs == 0) { counts->match++; - if (node->match_flag == -1) - node->match_flag = match++; node->node_type = RTE_ACL_NODE_MATCH; - if (counts->smallest_match > node->match_flag) - counts->smallest_match = node->match_flag; - return match; + return; } num_ptrs = acl_count_fanout(node); @@ -299,11 +268,9 @@ acl_count_trie_types(struct acl_node_counters *counts, */ for (n = 0; n < node->num_ptrs; n++) { if (node->ptrs[n].ptr != NULL) - match = acl_count_trie_types(counts, node->ptrs[n].ptr, - no_match, match, 0); + acl_count_trie_types(counts, node->ptrs[n].ptr, + no_match, 0); } - - return match; } static void @@ -400,9 +367,13 @@ acl_gen_node(struct rte_acl_node *node, uint64_t *node_array, break; case RTE_ACL_NODE_MATCH: match = ((struct rte_acl_match_results *) - (node_array + index->match_index)); - memcpy(match + node->match_flag, node->mrt, sizeof(*node->mrt)); - node->node_index = node->match_flag | node->node_type; + (node_array + index->match_start)); + for (n = 0; n != RTE_DIM(match->results); n++) + RTE_ACL_VERIFY(match->results[0] == 0); + memcpy(match + index->match_index, node->mrt, + sizeof(*node->mrt)); + node->node_index = index->match_index | node->node_type; + index->match_index += 1; break; case RTE_ACL_NODE_UNDEFINED: RTE_ACL_VERIFY(node->node_type != @@ -443,11 +414,11 @@ acl_gen_node(struct rte_acl_node *node, uint64_t *node_array, } } -static int +static void acl_calc_counts_indices(struct acl_node_counters *counts, - struct rte_acl_indices *indices, struct rte_acl_trie *trie, + struct rte_acl_indices *indices, struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries, - int match_num, uint64_t no_match) + uint64_t no_match) { uint32_t n; @@ -456,21 +427,18 @@ acl_calc_counts_indices(struct acl_node_counters *counts, /* Get stats on nodes */ for (n = 0; n < num_tries; n++) { - counts->smallest_match = INT32_MAX; - match_num = acl_count_trie_types(counts, node_bld_trie[n].trie, - no_match, match_num, 1); - trie[n].smallest = counts->smallest_match; + acl_count_trie_types(counts, node_bld_trie[n].trie, + no_match, 1); } indices->dfa_index = RTE_ACL_DFA_SIZE + 1; indices->quad_index = indices->dfa_index + counts->dfa_gr64 * RTE_ACL_DFA_GR64_SIZE; indices->single_index = indices->quad_index + counts->quad_vectors; - indices->match_index = indices->single_index + counts->single + 1; - indices->match_index = RTE_ALIGN(indices->match_index, + indices->match_start = indices->single_index + counts->single + 1; + indices->match_start = RTE_ALIGN(indices->match_start, (XMM_SIZE / sizeof(uint64_t))); - - return match_num; + indices->match_index = 1; } /* @@ -479,7 +447,7 @@ acl_calc_counts_indices(struct acl_node_counters *counts, int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie, struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries, - uint32_t num_categories, uint32_t data_index_sz, int match_num) + uint32_t num_categories, uint32_t data_index_sz, size_t max_size) { void *mem; size_t total_size; @@ -492,15 +460,23 @@ rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie, no_match = RTE_ACL_NODE_MATCH; /* Fill counts and indices arrays from the nodes. */ - match_num = acl_calc_counts_indices(&counts, &indices, trie, - node_bld_trie, num_tries, match_num, no_match); + acl_calc_counts_indices(&counts, &indices, + node_bld_trie, num_tries, no_match); /* Allocate runtime memory (align to cache boundary) */ total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) + - indices.match_index * sizeof(uint64_t) + - (match_num + 2) * sizeof(struct rte_acl_match_results) + + indices.match_start * sizeof(uint64_t) + + (counts.match + 1) * sizeof(struct rte_acl_match_results) + XMM_SIZE; + if (total_size > max_size) { + RTE_LOG(DEBUG, ACL, + "Gen phase for ACL ctx \"%s\" exceeds max_size limit, " + "bytes required: %zu, allowed: %zu\n", + ctx->name, total_size, max_size); + return -ERANGE; + } + mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE, ctx->socket_id); if (mem == NULL) { @@ -511,7 +487,7 @@ rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie, } /* Fill the runtime structure */ - match_index = indices.match_index; + match_index = indices.match_start; node_array = (uint64_t *)((uintptr_t)mem + RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE)); @@ -551,6 +527,6 @@ rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie, ctx->trans_table = node_array; memcpy(ctx->trie, trie, sizeof(ctx->trie)); - acl_gen_log_stats(ctx, &counts, &indices); + acl_gen_log_stats(ctx, &counts, &indices, max_size); return 0; }