From 074f54ad03ee0c84dcae235933e2b426208fe591 Mon Sep 17 00:00:00 2001 From: Konstantin Ananyev Date: Mon, 1 Sep 2014 16:28:44 +0100 Subject: [PATCH] acl: fix build and runtime for default target Make ACL library to build/work on 'default' architecture: - make rte_acl_classify_scalar really scalar (make sure it wouldn't use sse4 instrincts through resolve_priority()). - Provide two versions of rte_acl_classify code path: rte_acl_classify_sse() - could be build and used only on systems with sse4.2 and upper, return -ENOTSUP on lower arch. rte_acl_classify_scalar() - a slower version, but could be build and used on all systems. - Addition of a new function rte_acl_classify_alg. This function lets you specify an enum value to override the acl contexts default algorithm when doing a classification. This allows an application to specify a classification algorithm without needing to publicize each method. I know there was concern over keeping those methods public, but we don't have a static ABI at the moment, so this seems to me a reasonable thing to do, as it gives us less of an ABI surface to worry about. - keep common code shared between these two codepaths. Signed-off-by: Konstantin Ananyev Acked-by: Neil Horman --- app/test-acl/main.c | 20 +- app/test/test_acl.c | 19 +- examples/l3fwd-acl/main.c | 22 +- lib/librte_acl/Makefile | 5 +- lib/librte_acl/acl.h | 15 + lib/librte_acl/acl_bld.c | 5 +- lib/librte_acl/acl_run.h | 268 +++++++++++++++ lib/librte_acl/acl_run_scalar.c | 193 +++++++++++ lib/librte_acl/{acl_run.c => acl_run_sse.c} | 362 ++------------------ lib/librte_acl/rte_acl.c | 55 +++ lib/librte_acl/rte_acl.h | 56 ++- 11 files changed, 635 insertions(+), 385 deletions(-) create mode 100644 lib/librte_acl/acl_run.h create mode 100644 lib/librte_acl/acl_run_scalar.c rename lib/librte_acl/{acl_run.c => acl_run_sse.c} (64%) diff --git a/app/test-acl/main.c b/app/test-acl/main.c index d65440977b..44add100ff 100644 --- a/app/test-acl/main.c +++ b/app/test-acl/main.c @@ -772,6 +772,15 @@ acx_init(void) if (config.acx == NULL) rte_exit(rte_errno, "failed to create ACL context\n"); + /* set default classify method to scalar for this context. */ + if (config.scalar) { + ret = rte_acl_set_ctx_classify(config.acx, + RTE_ACL_CLASSIFY_SCALAR); + if (ret != 0) + rte_exit(ret, "failed to setup classify method " + "for ACL context\n"); + } + /* add ACL rules. */ f = fopen(config.rule_file, "r"); if (f == NULL) @@ -780,7 +789,7 @@ acx_init(void) ret = add_cb_rules(f, config.acx); if (ret != 0) - rte_exit(rte_errno, "failed to add rules into ACL context\n"); + rte_exit(ret, "failed to add rules into ACL context\n"); fclose(f); @@ -815,13 +824,8 @@ search_ip5tuples_once(uint32_t categories, uint32_t step, int scalar) v += config.trace_sz; } - if (scalar != 0) - ret = rte_acl_classify_scalar(config.acx, data, - results, n, categories); - - else - ret = rte_acl_classify(config.acx, data, - results, n, categories); + ret = rte_acl_classify(config.acx, data, results, + n, categories); if (ret != 0) rte_exit(ret, "classify for ipv%c_5tuples returns %d\n", diff --git a/app/test/test_acl.c b/app/test/test_acl.c index c6b3f860d2..356d6206b8 100644 --- a/app/test/test_acl.c +++ b/app/test/test_acl.c @@ -146,8 +146,9 @@ test_classify_run(struct rte_acl_ctx *acx) } /* make a quick check for scalar */ - ret = rte_acl_classify_scalar(acx, data, results, - RTE_DIM(acl_test_data), RTE_ACL_MAX_CATEGORIES); + ret = rte_acl_classify_alg(acx, data, results, + RTE_DIM(acl_test_data), RTE_ACL_MAX_CATEGORIES, + RTE_ACL_CLASSIFY_SCALAR); if (ret != 0) { printf("Line %i: SSE classify failed!\n", __LINE__); goto err; @@ -341,8 +342,8 @@ test_invalid_layout(void) } /* classify tuples */ - ret = rte_acl_classify(acx, data, results, - RTE_DIM(results), 1); + ret = rte_acl_classify_alg(acx, data, results, + RTE_DIM(results), 1, RTE_ACL_CLASSIFY_SCALAR); if (ret != 0) { printf("Line %i: SSE classify failed!\n", __LINE__); rte_acl_free(acx); @@ -360,8 +361,9 @@ test_invalid_layout(void) } /* classify tuples (scalar) */ - ret = rte_acl_classify_scalar(acx, data, results, - RTE_DIM(results), 1); + ret = rte_acl_classify_alg(acx, data, results, RTE_DIM(results), 1, + RTE_ACL_CLASSIFY_SCALAR); + if (ret != 0) { printf("Line %i: Scalar classify failed!\n", __LINE__); rte_acl_free(acx); @@ -848,7 +850,8 @@ test_invalid_parameters(void) /* scalar classify test */ /* cover zero categories in classify (should not fail) */ - result = rte_acl_classify_scalar(acx, NULL, NULL, 0, 0); + result = rte_acl_classify_alg(acx, NULL, NULL, 0, 0, + RTE_ACL_CLASSIFY_SCALAR); if (result != 0) { printf("Line %i: Scalar classify with zero categories " "failed!\n", __LINE__); @@ -857,7 +860,7 @@ test_invalid_parameters(void) } /* cover invalid but positive categories in classify */ - result = rte_acl_classify_scalar(acx, NULL, NULL, 0, 3); + result = rte_acl_classify(acx, NULL, NULL, 0, 3); if (result == 0) { printf("Line %i: Scalar classify with 3 categories " "should have failed!\n", __LINE__); diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c index 9b2c21bec8..eac0eab61b 100644 --- a/examples/l3fwd-acl/main.c +++ b/examples/l3fwd-acl/main.c @@ -278,15 +278,6 @@ send_single_packet(struct rte_mbuf *m, uint8_t port); (in) = end + 1; \ } while (0) -#define CLASSIFY(context, data, res, num, cat) do { \ - if (scalar) \ - rte_acl_classify_scalar((context), (data), \ - (res), (num), (cat)); \ - else \ - rte_acl_classify((context), (data), \ - (res), (num), (cat)); \ -} while (0) - /* * ACL rules should have higher priorities than route ones to ensure ACL rule * always be found when input packets have multi-matches in the database. @@ -1216,6 +1207,11 @@ setup_acl(struct rte_acl_rule *route_base, if ((context = rte_acl_create(&acl_param)) == NULL) rte_exit(EXIT_FAILURE, "Failed to create ACL context\n"); + if (parm_config.scalar && rte_acl_set_ctx_classify(context, + RTE_ACL_CLASSIFY_SCALAR) != 0) + rte_exit(EXIT_FAILURE, + "Failed to setup classify method for ACL context\n"); + if (rte_acl_add_rules(context, route_base, route_num) < 0) rte_exit(EXIT_FAILURE, "add rules failed\n"); @@ -1436,10 +1432,8 @@ main_loop(__attribute__((unused)) void *dummy) int socketid; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; - int scalar = parm_config.scalar; prev_tsc = 0; - lcore_id = rte_lcore_id(); qconf = &lcore_conf[lcore_id]; socketid = rte_lcore_to_socket_id(lcore_id); @@ -1503,7 +1497,8 @@ main_loop(__attribute__((unused)) void *dummy) nb_rx); if (acl_search.num_ipv4) { - CLASSIFY(acl_config.acx_ipv4[socketid], + rte_acl_classify( + acl_config.acx_ipv4[socketid], acl_search.data_ipv4, acl_search.res_ipv4, acl_search.num_ipv4, @@ -1515,7 +1510,8 @@ main_loop(__attribute__((unused)) void *dummy) } if (acl_search.num_ipv6) { - CLASSIFY(acl_config.acx_ipv6[socketid], + rte_acl_classify( + acl_config.acx_ipv6[socketid], acl_search.data_ipv6, acl_search.res_ipv6, acl_search.num_ipv6, diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile index 4fe4593a0d..65e566df2c 100644 --- a/lib/librte_acl/Makefile +++ b/lib/librte_acl/Makefile @@ -43,7 +43,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_ACL) += tb_mem.c SRCS-$(CONFIG_RTE_LIBRTE_ACL) += rte_acl.c SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_bld.c SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_gen.c -SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run.c +SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_scalar.c +SRCS-$(CONFIG_RTE_LIBRTE_ACL) += acl_run_sse.c + +CFLAGS_acl_run_sse.o += -msse4.1 # install this header file SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include := rte_acl_osdep.h diff --git a/lib/librte_acl/acl.h b/lib/librte_acl/acl.h index b9d63fd1d2..102fa51a46 100644 --- a/lib/librte_acl/acl.h +++ b/lib/librte_acl/acl.h @@ -153,6 +153,7 @@ struct rte_acl_ctx { /** Name of the ACL context. */ int32_t socket_id; /** Socket ID to allocate memory from. */ + enum rte_acl_classify_alg alg; void *rules; uint32_t max_rules; uint32_t rule_sz; @@ -174,6 +175,20 @@ int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie, struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries, uint32_t num_categories, uint32_t data_index_sz, int match_num); +typedef int (*rte_acl_classify_t) +(const struct rte_acl_ctx *, const uint8_t **, uint32_t *, uint32_t, uint32_t); + +/* + * Different implementations of ACL classify. + */ +int +rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data, + uint32_t *results, uint32_t num, uint32_t categories); + +int +rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data, + uint32_t *results, uint32_t num, uint32_t categories); + #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c index 873447b874..09d58ea04f 100644 --- a/lib/librte_acl/acl_bld.c +++ b/lib/librte_acl/acl_bld.c @@ -31,7 +31,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include #include #include "tb_mem.h" #include "acl.h" @@ -1480,8 +1479,8 @@ acl_calc_wildness(struct rte_acl_build_rule *head, switch (rule->config->defs[n].type) { case RTE_ACL_FIELD_TYPE_BITMASK: - wild = (size - - _mm_popcnt_u32(fld->mask_range.u8)) / + wild = (size - __builtin_popcount( + fld->mask_range.u8)) / size; break; diff --git a/lib/librte_acl/acl_run.h b/lib/librte_acl/acl_run.h new file mode 100644 index 0000000000..c191053c08 --- /dev/null +++ b/lib/librte_acl/acl_run.h @@ -0,0 +1,268 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ACL_RUN_H_ +#define _ACL_RUN_H_ + +#include +#include "acl_vect.h" +#include "acl.h" + +#define MAX_SEARCHES_SSE8 8 +#define MAX_SEARCHES_SSE4 4 +#define MAX_SEARCHES_SSE2 2 +#define MAX_SEARCHES_SCALAR 2 + +#define GET_NEXT_4BYTES(prm, idx) \ + (*((const int32_t *)((prm)[(idx)].data + *(prm)[idx].data_index++))) + + +#define RTE_ACL_NODE_INDEX ((uint32_t)~RTE_ACL_NODE_TYPE) + +#define SCALAR_QRANGE_MULT 0x01010101 +#define SCALAR_QRANGE_MASK 0x7f7f7f7f +#define SCALAR_QRANGE_MIN 0x80808080 + +/* + * Structure to manage N parallel trie traversals. + * The runtime trie traversal routines can process 8, 4, or 2 tries + * in parallel. Each packet may require multiple trie traversals (up to 4). + * This structure is used to fill the slots (0 to n-1) for parallel processing + * with the trie traversals needed for each packet. + */ +struct acl_flow_data { + uint32_t num_packets; + /* number of packets processed */ + uint32_t started; + /* number of trie traversals in progress */ + uint32_t trie; + /* current trie index (0 to N-1) */ + uint32_t cmplt_size; + uint32_t total_packets; + uint32_t categories; + /* number of result categories per packet. */ + /* maximum number of packets to process */ + const uint64_t *trans; + const uint8_t **data; + uint32_t *results; + struct completion *last_cmplt; + struct completion *cmplt_array; +}; + +/* + * Structure to maintain running results for + * a single packet (up to 4 tries). + */ +struct completion { + uint32_t *results; /* running results. */ + int32_t priority[RTE_ACL_MAX_CATEGORIES]; /* running priorities. */ + uint32_t count; /* num of remaining tries */ + /* true for allocated struct */ +} __attribute__((aligned(XMM_SIZE))); + +/* + * One parms structure for each slot in the search engine. + */ +struct parms { + const uint8_t *data; + /* input data for this packet */ + const uint32_t *data_index; + /* data indirection for this trie */ + struct completion *cmplt; + /* completion data for this packet */ +}; + +/* + * Define an global idle node for unused engine slots + */ +static const uint32_t idle[UINT8_MAX + 1]; + +/* + * Allocate a completion structure to manage the tries for a packet. + */ +static inline struct completion * +alloc_completion(struct completion *p, uint32_t size, uint32_t tries, + uint32_t *results) +{ + uint32_t n; + + for (n = 0; n < size; n++) { + + if (p[n].count == 0) { + + /* mark as allocated and set number of tries. */ + p[n].count = tries; + p[n].results = results; + return &(p[n]); + } + } + + /* should never get here */ + return NULL; +} + +/* + * Resolve priority for a single result trie. + */ +static inline void +resolve_single_priority(uint64_t transition, int n, + const struct rte_acl_ctx *ctx, struct parms *parms, + const struct rte_acl_match_results *p) +{ + if (parms[n].cmplt->count == ctx->num_tries || + parms[n].cmplt->priority[0] <= + p[transition].priority[0]) { + + parms[n].cmplt->priority[0] = p[transition].priority[0]; + parms[n].cmplt->results[0] = p[transition].results[0]; + } +} + +/* + * Routine to fill a slot in the parallel trie traversal array (parms) from + * the list of packets (flows). + */ +static inline uint64_t +acl_start_next_trie(struct acl_flow_data *flows, struct parms *parms, int n, + const struct rte_acl_ctx *ctx) +{ + uint64_t transition; + + /* if there are any more packets to process */ + if (flows->num_packets < flows->total_packets) { + parms[n].data = flows->data[flows->num_packets]; + parms[n].data_index = ctx->trie[flows->trie].data_index; + + /* if this is the first trie for this packet */ + if (flows->trie == 0) { + flows->last_cmplt = alloc_completion(flows->cmplt_array, + flows->cmplt_size, ctx->num_tries, + flows->results + + flows->num_packets * flows->categories); + } + + /* set completion parameters and starting index for this slot */ + parms[n].cmplt = flows->last_cmplt; + transition = + flows->trans[parms[n].data[*parms[n].data_index++] + + ctx->trie[flows->trie].root_index]; + + /* + * if this is the last trie for this packet, + * then setup next packet. + */ + flows->trie++; + if (flows->trie >= ctx->num_tries) { + flows->trie = 0; + flows->num_packets++; + } + + /* keep track of number of active trie traversals */ + flows->started++; + + /* no more tries to process, set slot to an idle position */ + } else { + transition = ctx->idle; + parms[n].data = (const uint8_t *)idle; + parms[n].data_index = idle; + } + return transition; +} + +static inline void +acl_set_flow(struct acl_flow_data *flows, struct completion *cmplt, + uint32_t cmplt_size, const uint8_t **data, uint32_t *results, + uint32_t data_num, uint32_t categories, const uint64_t *trans) +{ + flows->num_packets = 0; + flows->started = 0; + flows->trie = 0; + flows->last_cmplt = NULL; + flows->cmplt_array = cmplt; + flows->total_packets = data_num; + flows->categories = categories; + flows->cmplt_size = cmplt_size; + flows->data = data; + flows->results = results; + flows->trans = trans; +} + +typedef void (*resolve_priority_t) +(uint64_t transition, int n, const struct rte_acl_ctx *ctx, + struct parms *parms, const struct rte_acl_match_results *p, + uint32_t categories); + +/* + * Detect matches. If a match node transition is found, then this trie + * traversal is complete and fill the slot with the next trie + * to be processed. + */ +static inline uint64_t +acl_match_check(uint64_t transition, int slot, + const struct rte_acl_ctx *ctx, struct parms *parms, + struct acl_flow_data *flows, resolve_priority_t resolve_priority) +{ + const struct rte_acl_match_results *p; + + p = (const struct rte_acl_match_results *) + (flows->trans + ctx->match_index); + + if (transition & RTE_ACL_NODE_MATCH) { + + /* Remove flags from index and decrement active traversals */ + transition &= RTE_ACL_NODE_INDEX; + flows->started--; + + /* Resolve priorities for this trie and running results */ + if (flows->categories == 1) + resolve_single_priority(transition, slot, ctx, + parms, p); + else + resolve_priority(transition, slot, ctx, parms, + p, flows->categories); + + /* Count down completed tries for this search request */ + parms[slot].cmplt->count--; + + /* Fill the slot with the next trie or idle trie */ + transition = acl_start_next_trie(flows, parms, slot, ctx); + + } else if (transition == ctx->idle) { + /* reset indirection table for idle slots */ + parms[slot].data_index = idle; + } + + return transition; +} + +#endif /* _ACL_RUN_H_ */ diff --git a/lib/librte_acl/acl_run_scalar.c b/lib/librte_acl/acl_run_scalar.c new file mode 100644 index 0000000000..43c8fc3e98 --- /dev/null +++ b/lib/librte_acl/acl_run_scalar.c @@ -0,0 +1,193 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "acl_run.h" + +/* + * Resolve priority for multiple results (scalar version). + * This consists comparing the priority of the current traversal with the + * running set of results for the packet. + * For each result, keep a running array of the result (rule number) and + * its priority for each category. + */ +static inline void +resolve_priority_scalar(uint64_t transition, int n, + const struct rte_acl_ctx *ctx, struct parms *parms, + const struct rte_acl_match_results *p, uint32_t categories) +{ + uint32_t i; + int32_t *saved_priority; + uint32_t *saved_results; + const int32_t *priority; + const uint32_t *results; + + saved_results = parms[n].cmplt->results; + saved_priority = parms[n].cmplt->priority; + + /* results and priorities for completed trie */ + results = p[transition].results; + priority = p[transition].priority; + + /* if this is not the first completed trie */ + if (parms[n].cmplt->count != ctx->num_tries) { + for (i = 0; i < categories; i += RTE_ACL_RESULTS_MULTIPLIER) { + + if (saved_priority[i] <= priority[i]) { + saved_priority[i] = priority[i]; + saved_results[i] = results[i]; + } + if (saved_priority[i + 1] <= priority[i + 1]) { + saved_priority[i + 1] = priority[i + 1]; + saved_results[i + 1] = results[i + 1]; + } + if (saved_priority[i + 2] <= priority[i + 2]) { + saved_priority[i + 2] = priority[i + 2]; + saved_results[i + 2] = results[i + 2]; + } + if (saved_priority[i + 3] <= priority[i + 3]) { + saved_priority[i + 3] = priority[i + 3]; + saved_results[i + 3] = results[i + 3]; + } + } + } else { + for (i = 0; i < categories; i += RTE_ACL_RESULTS_MULTIPLIER) { + saved_priority[i] = priority[i]; + saved_priority[i + 1] = priority[i + 1]; + saved_priority[i + 2] = priority[i + 2]; + saved_priority[i + 3] = priority[i + 3]; + + saved_results[i] = results[i]; + saved_results[i + 1] = results[i + 1]; + saved_results[i + 2] = results[i + 2]; + saved_results[i + 3] = results[i + 3]; + } + } +} + +/* + * When processing the transition, rather than using if/else + * construct, the offset is calculated for DFA and QRANGE and + * then conditionally added to the address based on node type. + * This is done to avoid branch mis-predictions. Since the + * offset is rather simple calculation it is more efficient + * to do the calculation and do a condition move rather than + * a conditional branch to determine which calculation to do. + */ +static inline uint32_t +scan_forward(uint32_t input, uint32_t max) +{ + return (input == 0) ? max : rte_bsf32(input); +} + +static inline uint64_t +scalar_transition(const uint64_t *trans_table, uint64_t transition, + uint8_t input) +{ + uint32_t addr, index, ranges, x, a, b, c; + + /* break transition into component parts */ + ranges = transition >> (sizeof(index) * CHAR_BIT); + + /* calc address for a QRANGE node */ + c = input * SCALAR_QRANGE_MULT; + a = ranges | SCALAR_QRANGE_MIN; + index = transition & ~RTE_ACL_NODE_INDEX; + a -= (c & SCALAR_QRANGE_MASK); + b = c & SCALAR_QRANGE_MIN; + addr = transition ^ index; + a &= SCALAR_QRANGE_MIN; + a ^= (ranges ^ b) & (a ^ b); + x = scan_forward(a, 32) >> 3; + addr += (index == RTE_ACL_NODE_DFA) ? input : x; + + /* pickup next transition */ + transition = *(trans_table + addr); + return transition; +} + +int +rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data, + uint32_t *results, uint32_t num, uint32_t categories) +{ + int n; + uint64_t transition0, transition1; + uint32_t input0, input1; + struct acl_flow_data flows; + uint64_t index_array[MAX_SEARCHES_SCALAR]; + struct completion cmplt[MAX_SEARCHES_SCALAR]; + struct parms parms[MAX_SEARCHES_SCALAR]; + + if (categories != 1 && + ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0) + return -EINVAL; + + acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results, num, + categories, ctx->trans_table); + + for (n = 0; n < MAX_SEARCHES_SCALAR; n++) { + cmplt[n].count = 0; + index_array[n] = acl_start_next_trie(&flows, parms, n, ctx); + } + + transition0 = index_array[0]; + transition1 = index_array[1]; + + while (flows.started > 0) { + + input0 = GET_NEXT_4BYTES(parms, 0); + input1 = GET_NEXT_4BYTES(parms, 1); + + for (n = 0; n < 4; n++) { + if (likely((transition0 & RTE_ACL_NODE_MATCH) == 0)) + transition0 = scalar_transition(flows.trans, + transition0, (uint8_t)input0); + + input0 >>= CHAR_BIT; + + if (likely((transition1 & RTE_ACL_NODE_MATCH) == 0)) + transition1 = scalar_transition(flows.trans, + transition1, (uint8_t)input1); + + input1 >>= CHAR_BIT; + + } + if ((transition0 | transition1) & RTE_ACL_NODE_MATCH) { + transition0 = acl_match_check(transition0, + 0, ctx, parms, &flows, resolve_priority_scalar); + transition1 = acl_match_check(transition1, + 1, ctx, parms, &flows, resolve_priority_scalar); + + } + } + return 0; +} diff --git a/lib/librte_acl/acl_run.c b/lib/librte_acl/acl_run_sse.c similarity index 64% rename from lib/librte_acl/acl_run.c rename to lib/librte_acl/acl_run_sse.c index e3d9fc11b4..4f3f1158a7 100644 --- a/lib/librte_acl/acl_run.c +++ b/lib/librte_acl/acl_run_sse.c @@ -31,24 +31,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include -#include "acl_vect.h" -#include "acl.h" - -#define MAX_SEARCHES_SSE8 8 -#define MAX_SEARCHES_SSE4 4 -#define MAX_SEARCHES_SSE2 2 -#define MAX_SEARCHES_SCALAR 2 - -#define GET_NEXT_4BYTES(prm, idx) \ - (*((const int32_t *)((prm)[(idx)].data + *(prm)[idx].data_index++))) - - -#define RTE_ACL_NODE_INDEX ((uint32_t)~RTE_ACL_NODE_TYPE) - -#define SCALAR_QRANGE_MULT 0x01010101 -#define SCALAR_QRANGE_MASK 0x7f7f7f7f -#define SCALAR_QRANGE_MIN 0x80808080 +#include "acl_run.h" enum { SHUFFLE32_SLOT1 = 0xe5, @@ -57,60 +40,6 @@ enum { SHUFFLE32_SWAP64 = 0x4e, }; -/* - * Structure to manage N parallel trie traversals. - * The runtime trie traversal routines can process 8, 4, or 2 tries - * in parallel. Each packet may require multiple trie traversals (up to 4). - * This structure is used to fill the slots (0 to n-1) for parallel processing - * with the trie traversals needed for each packet. - */ -struct acl_flow_data { - uint32_t num_packets; - /* number of packets processed */ - uint32_t started; - /* number of trie traversals in progress */ - uint32_t trie; - /* current trie index (0 to N-1) */ - uint32_t cmplt_size; - uint32_t total_packets; - uint32_t categories; - /* number of result categories per packet. */ - /* maximum number of packets to process */ - const uint64_t *trans; - const uint8_t **data; - uint32_t *results; - struct completion *last_cmplt; - struct completion *cmplt_array; -}; - -/* - * Structure to maintain running results for - * a single packet (up to 4 tries). - */ -struct completion { - uint32_t *results; /* running results. */ - int32_t priority[RTE_ACL_MAX_CATEGORIES]; /* running priorities. */ - uint32_t count; /* num of remaining tries */ - /* true for allocated struct */ -} __attribute__((aligned(XMM_SIZE))); - -/* - * One parms structure for each slot in the search engine. - */ -struct parms { - const uint8_t *data; - /* input data for this packet */ - const uint32_t *data_index; - /* data indirection for this trie */ - struct completion *cmplt; - /* completion data for this packet */ -}; - -/* - * Define an global idle node for unused engine slots - */ -static const uint32_t idle[UINT8_MAX + 1]; - static const rte_xmm_t mm_type_quad_range = { .u32 = { RTE_ACL_NODE_QRANGE, @@ -185,57 +114,16 @@ static const rte_xmm_t mm_index_mask64 = { }, }; -/* - * Allocate a completion structure to manage the tries for a packet. - */ -static inline struct completion * -alloc_completion(struct completion *p, uint32_t size, uint32_t tries, - uint32_t *results) -{ - uint32_t n; - - for (n = 0; n < size; n++) { - - if (p[n].count == 0) { - - /* mark as allocated and set number of tries. */ - p[n].count = tries; - p[n].results = results; - return &(p[n]); - } - } - - /* should never get here */ - return NULL; -} - -/* - * Resolve priority for a single result trie. - */ -static inline void -resolve_single_priority(uint64_t transition, int n, - const struct rte_acl_ctx *ctx, struct parms *parms, - const struct rte_acl_match_results *p) -{ - if (parms[n].cmplt->count == ctx->num_tries || - parms[n].cmplt->priority[0] <= - p[transition].priority[0]) { - - parms[n].cmplt->priority[0] = p[transition].priority[0]; - parms[n].cmplt->results[0] = p[transition].results[0]; - } - - parms[n].cmplt->count--; -} /* - * Resolve priority for multiple results. This consists comparing - * the priority of the current traversal with the running set of - * results for the packet. For each result, keep a running array of - * the result (rule number) and its priority for each category. + * Resolve priority for multiple results (sse version). + * This consists comparing the priority of the current traversal with the + * running set of results for the packet. + * For each result, keep a running array of the result (rule number) and + * its priority for each category. */ static inline void -resolve_priority(uint64_t transition, int n, const struct rte_acl_ctx *ctx, +resolve_priority_sse(uint64_t transition, int n, const struct rte_acl_ctx *ctx, struct parms *parms, const struct rte_acl_match_results *p, uint32_t categories) { @@ -270,100 +158,6 @@ resolve_priority(uint64_t transition, int n, const struct rte_acl_ctx *ctx, MM_STOREU(saved_results, results); MM_STOREU(saved_priority, priority); } - - /* Count down completed tries for this search request */ - parms[n].cmplt->count--; -} - -/* - * Routine to fill a slot in the parallel trie traversal array (parms) from - * the list of packets (flows). - */ -static inline uint64_t -acl_start_next_trie(struct acl_flow_data *flows, struct parms *parms, int n, - const struct rte_acl_ctx *ctx) -{ - uint64_t transition; - - /* if there are any more packets to process */ - if (flows->num_packets < flows->total_packets) { - parms[n].data = flows->data[flows->num_packets]; - parms[n].data_index = ctx->trie[flows->trie].data_index; - - /* if this is the first trie for this packet */ - if (flows->trie == 0) { - flows->last_cmplt = alloc_completion(flows->cmplt_array, - flows->cmplt_size, ctx->num_tries, - flows->results + - flows->num_packets * flows->categories); - } - - /* set completion parameters and starting index for this slot */ - parms[n].cmplt = flows->last_cmplt; - transition = - flows->trans[parms[n].data[*parms[n].data_index++] + - ctx->trie[flows->trie].root_index]; - - /* - * if this is the last trie for this packet, - * then setup next packet. - */ - flows->trie++; - if (flows->trie >= ctx->num_tries) { - flows->trie = 0; - flows->num_packets++; - } - - /* keep track of number of active trie traversals */ - flows->started++; - - /* no more tries to process, set slot to an idle position */ - } else { - transition = ctx->idle; - parms[n].data = (const uint8_t *)idle; - parms[n].data_index = idle; - } - return transition; -} - -/* - * Detect matches. If a match node transition is found, then this trie - * traversal is complete and fill the slot with the next trie - * to be processed. - */ -static inline uint64_t -acl_match_check_transition(uint64_t transition, int slot, - const struct rte_acl_ctx *ctx, struct parms *parms, - struct acl_flow_data *flows) -{ - const struct rte_acl_match_results *p; - - p = (const struct rte_acl_match_results *) - (flows->trans + ctx->match_index); - - if (transition & RTE_ACL_NODE_MATCH) { - - /* Remove flags from index and decrement active traversals */ - transition &= RTE_ACL_NODE_INDEX; - flows->started--; - - /* Resolve priorities for this trie and running results */ - if (flows->categories == 1) - resolve_single_priority(transition, slot, ctx, - parms, p); - else - resolve_priority(transition, slot, ctx, parms, p, - flows->categories); - - /* Fill the slot with the next trie or idle trie */ - transition = acl_start_next_trie(flows, parms, slot, ctx); - - } else if (transition == ctx->idle) { - /* reset indirection table for idle slots */ - parms[slot].data_index = idle; - } - - return transition; } /* @@ -382,10 +176,10 @@ acl_process_matches(xmm_t *indicies, int slot, const struct rte_acl_ctx *ctx, *indicies = MM_SHUFFLE32(*indicies, SHUFFLE32_SWAP64); transition2 = MM_CVT64(*indicies); - transition1 = acl_match_check_transition(transition1, slot, ctx, - parms, flows); - transition2 = acl_match_check_transition(transition2, slot + 1, ctx, - parms, flows); + transition1 = acl_match_check(transition1, slot, ctx, + parms, flows, resolve_priority_sse); + transition2 = acl_match_check(transition2, slot + 1, ctx, + parms, flows, resolve_priority_sse); /* update indicies with new transitions. */ *indicies = MM_SET64(transition2, transition1); @@ -551,28 +345,10 @@ transition4(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input, return MM_SRL32(next_input, 8); } -static inline void -acl_set_flow(struct acl_flow_data *flows, struct completion *cmplt, - uint32_t cmplt_size, const uint8_t **data, uint32_t *results, - uint32_t data_num, uint32_t categories, const uint64_t *trans) -{ - flows->num_packets = 0; - flows->started = 0; - flows->trie = 0; - flows->last_cmplt = NULL; - flows->cmplt_array = cmplt; - flows->total_packets = data_num; - flows->categories = categories; - flows->cmplt_size = cmplt_size; - flows->data = data; - flows->results = results; - flows->trans = trans; -} - /* * Execute trie traversal with 8 traversals in parallel */ -static inline void +static inline int search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data, uint32_t *results, uint32_t total_packets, uint32_t categories) { @@ -676,12 +452,14 @@ search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data, acl_match_check_x4(4, ctx, parms, &flows, &indicies3, &indicies4, mm_match_mask.m); } + + return 0; } /* * Execute trie traversal with 4 traversals in parallel */ -static inline void +static inline int search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data, uint32_t *results, int total_packets, uint32_t categories) { @@ -740,6 +518,8 @@ search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data, acl_match_check_x4(0, ctx, parms, &flows, &indicies1, &indicies2, mm_match_mask.m); } + + return 0; } static inline xmm_t @@ -769,7 +549,7 @@ transition2(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input, /* * Execute trie traversal with 2 traversals in parallel. */ -static inline void +static inline int search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data, uint32_t *results, uint32_t total_packets, uint32_t categories) { @@ -825,108 +605,12 @@ search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data, acl_match_check_x2(0, ctx, parms, &flows, &indicies, mm_match_mask64.m); } -} - -/* - * When processing the transition, rather than using if/else - * construct, the offset is calculated for DFA and QRANGE and - * then conditionally added to the address based on node type. - * This is done to avoid branch mis-predictions. Since the - * offset is rather simple calculation it is more efficient - * to do the calculation and do a condition move rather than - * a conditional branch to determine which calculation to do. - */ -static inline uint32_t -scan_forward(uint32_t input, uint32_t max) -{ - return (input == 0) ? max : rte_bsf32(input); -} - -static inline uint64_t -scalar_transition(const uint64_t *trans_table, uint64_t transition, - uint8_t input) -{ - uint32_t addr, index, ranges, x, a, b, c; - - /* break transition into component parts */ - ranges = transition >> (sizeof(index) * CHAR_BIT); - - /* calc address for a QRANGE node */ - c = input * SCALAR_QRANGE_MULT; - a = ranges | SCALAR_QRANGE_MIN; - index = transition & ~RTE_ACL_NODE_INDEX; - a -= (c & SCALAR_QRANGE_MASK); - b = c & SCALAR_QRANGE_MIN; - addr = transition ^ index; - a &= SCALAR_QRANGE_MIN; - a ^= (ranges ^ b) & (a ^ b); - x = scan_forward(a, 32) >> 3; - addr += (index == RTE_ACL_NODE_DFA) ? input : x; - - /* pickup next transition */ - transition = *(trans_table + addr); - return transition; -} - -int -rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data, - uint32_t *results, uint32_t num, uint32_t categories) -{ - int n; - uint64_t transition0, transition1; - uint32_t input0, input1; - struct acl_flow_data flows; - uint64_t index_array[MAX_SEARCHES_SCALAR]; - struct completion cmplt[MAX_SEARCHES_SCALAR]; - struct parms parms[MAX_SEARCHES_SCALAR]; - - if (categories != 1 && - ((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0) - return -EINVAL; - - acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results, num, - categories, ctx->trans_table); - - for (n = 0; n < MAX_SEARCHES_SCALAR; n++) { - cmplt[n].count = 0; - index_array[n] = acl_start_next_trie(&flows, parms, n, ctx); - } - transition0 = index_array[0]; - transition1 = index_array[1]; - - while (flows.started > 0) { - - input0 = GET_NEXT_4BYTES(parms, 0); - input1 = GET_NEXT_4BYTES(parms, 1); - - for (n = 0; n < 4; n++) { - if (likely((transition0 & RTE_ACL_NODE_MATCH) == 0)) - transition0 = scalar_transition(flows.trans, - transition0, (uint8_t)input0); - - input0 >>= CHAR_BIT; - - if (likely((transition1 & RTE_ACL_NODE_MATCH) == 0)) - transition1 = scalar_transition(flows.trans, - transition1, (uint8_t)input1); - - input1 >>= CHAR_BIT; - - } - if ((transition0 | transition1) & RTE_ACL_NODE_MATCH) { - transition0 = acl_match_check_transition(transition0, - 0, ctx, parms, &flows); - transition1 = acl_match_check_transition(transition1, - 1, ctx, parms, &flows); - - } - } return 0; } int -rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data, +rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data, uint32_t *results, uint32_t num, uint32_t categories) { if (categories != 1 && @@ -934,11 +618,9 @@ rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data, return -EINVAL; if (likely(num >= MAX_SEARCHES_SSE8)) - search_sse_8(ctx, data, results, num, categories); + return search_sse_8(ctx, data, results, num, categories); else if (num >= MAX_SEARCHES_SSE4) - search_sse_4(ctx, data, results, num, categories); + return search_sse_4(ctx, data, results, num, categories); else - search_sse_2(ctx, data, results, num, categories); - - return 0; + return search_sse_2(ctx, data, results, num, categories); } diff --git a/lib/librte_acl/rte_acl.c b/lib/librte_acl/rte_acl.c index 7c288bdd4f..ea23220c94 100644 --- a/lib/librte_acl/rte_acl.c +++ b/lib/librte_acl/rte_acl.c @@ -38,6 +38,58 @@ TAILQ_HEAD(rte_acl_list, rte_tailq_entry); +static const rte_acl_classify_t classify_fns[] = { + [RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar, + [RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar, + [RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse, +}; + +/* by default, use always avaialbe scalar code path. */ +static enum rte_acl_classify_alg rte_acl_default_classify = + RTE_ACL_CLASSIFY_SCALAR; + +static void +rte_acl_set_default_classify(enum rte_acl_classify_alg alg) +{ + rte_acl_default_classify = alg; +} + +extern int +rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg) +{ + if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns)) + return -EINVAL; + + ctx->alg = alg; + return 0; +} + +static void __attribute__((constructor)) +rte_acl_init(void) +{ + enum rte_acl_classify_alg alg = RTE_ACL_CLASSIFY_DEFAULT; + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + alg = RTE_ACL_CLASSIFY_SSE; + + rte_acl_set_default_classify(alg); +} + +int +rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data, + uint32_t *results, uint32_t num, uint32_t categories) +{ + return classify_fns[ctx->alg](ctx, data, results, num, categories); +} + +int +rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data, + uint32_t *results, uint32_t num, uint32_t categories, + enum rte_acl_classify_alg alg) +{ + return classify_fns[alg](ctx, data, results, num, categories); +} + struct rte_acl_ctx * rte_acl_find_existing(const char *name) { @@ -165,6 +217,7 @@ rte_acl_create(const struct rte_acl_param *param) ctx->max_rules = param->max_rule_num; ctx->rule_sz = param->rule_size; ctx->socket_id = param->socket_id; + ctx->alg = rte_acl_default_classify; snprintf(ctx->name, sizeof(ctx->name), "%s", param->name); te->data = (void *) ctx; @@ -261,6 +314,8 @@ rte_acl_dump(const struct rte_acl_ctx *ctx) if (!ctx) return; printf("acl context <%s>@%p\n", ctx->name, ctx); + printf(" socket_id=%"PRId32"\n", ctx->socket_id); + printf(" alg=%"PRId32"\n", ctx->alg); printf(" max_rules=%"PRIu32"\n", ctx->max_rules); printf(" rule_size=%"PRIu32"\n", ctx->rule_sz); printf(" num_rules=%"PRIu32"\n", ctx->num_rules); diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h index afc0f69090..0e82339048 100644 --- a/lib/librte_acl/rte_acl.h +++ b/lib/librte_acl/rte_acl.h @@ -259,7 +259,16 @@ void rte_acl_reset(struct rte_acl_ctx *ctx); /** - * Search for a matching ACL rule for each input data buffer. + * Avaialble implementations of ACL classify. + */ +enum rte_acl_classify_alg { + RTE_ACL_CLASSIFY_DEFAULT = 0, + RTE_ACL_CLASSIFY_SCALAR = 1, /**< generic implementation. */ + RTE_ACL_CLASSIFY_SSE = 2, /**< requries SSE4.1 support. */ +}; + +/** + * Perform search for a matching ACL rule for each input data buffer. * Each input data buffer can have up to *categories* matches. * That implies that results array should be big enough to hold * (categories * num) elements. @@ -267,7 +276,7 @@ rte_acl_reset(struct rte_acl_ctx *ctx); * RTE_ACL_RESULTS_MULTIPLIER and can't be bigger than RTE_ACL_MAX_CATEGORIES. * If more than one rule is applicable for given input buffer and * given category, then rule with highest priority will be returned as a match. - * Note, that it is a caller responsibility to ensure that input parameters + * Note, that it is a caller's responsibility to ensure that input parameters * are valid and point to correct memory locations. * * @param ctx @@ -287,15 +296,15 @@ rte_acl_reset(struct rte_acl_ctx *ctx); * zero on successful completion. * -EINVAL for incorrect arguments. */ -int -rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data, - uint32_t *results, uint32_t num, uint32_t categories); +extern int +rte_acl_classify(const struct rte_acl_ctx *ctx, + const uint8_t **data, + uint32_t *results, uint32_t num, + uint32_t categories); /** - * Perform scalar search for a matching ACL rule for each input data buffer. - * Note, that while the search itself will avoid explicit use of SSE/AVX - * intrinsics, code for comparing matching results/priorities sill might use - * vector intrinsics (for categories > 1). + * Perform search using specified algorithm for a matching ACL rule for + * each input data buffer. * Each input data buffer can have up to *categories* matches. * That implies that results array should be big enough to hold * (categories * num) elements. @@ -319,13 +328,36 @@ rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data, * @param categories * Number of maximum possible matches for each input buffer, one possible * match per category. + * @param alg + * Algorithm to be used for the search. + * It is the caller responibility to ensure that the value refers to the + * existing algorithm, and that it could be run on the given CPU. * @return * zero on successful completion. * -EINVAL for incorrect arguments. */ -int -rte_acl_classify_scalar(const struct rte_acl_ctx *ctx, const uint8_t **data, - uint32_t *results, uint32_t num, uint32_t categories); +extern int +rte_acl_classify_alg(const struct rte_acl_ctx *ctx, + const uint8_t **data, + uint32_t *results, uint32_t num, + uint32_t categories, + enum rte_acl_classify_alg alg); + +/* + * Override the default classifier function for a given ACL context. + * @param ctx + * ACL context to change classify function for. + * @param alg + * New default classify algorithm for given ACL context. + * It is the caller responibility to ensure that the value refers to the + * existing algorithm, and that it could be run on the given CPU. + * @return + * - -EINVAL if the parameters are invalid. + * - Zero if operation completed successfully. + */ +extern int +rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, + enum rte_acl_classify_alg alg); /** * Dump an ACL context structure to the console. -- 2.20.1