4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 SHUFFLE32_SLOT1 = 0xe5,
39 SHUFFLE32_SLOT2 = 0xe6,
40 SHUFFLE32_SLOT3 = 0xe7,
41 SHUFFLE32_SWAP64 = 0x4e,
44 static const rte_xmm_t xmm_shuffle_input = {
45 .u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c},
48 static const rte_xmm_t xmm_ones_16 = {
49 .u16 = {1, 1, 1, 1, 1, 1, 1, 1},
52 static const rte_xmm_t xmm_match_mask = {
61 static const rte_xmm_t xmm_index_mask = {
71 * Resolve priority for multiple results (sse version).
72 * This consists comparing the priority of the current traversal with the
73 * running set of results for the packet.
74 * For each result, keep a running array of the result (rule number) and
75 * its priority for each category.
78 resolve_priority_sse(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
79 struct parms *parms, const struct rte_acl_match_results *p,
83 xmm_t results, priority, results1, priority1, selector;
84 xmm_t *saved_results, *saved_priority;
86 for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
88 saved_results = (xmm_t *)(&parms[n].cmplt->results[x]);
90 (xmm_t *)(&parms[n].cmplt->priority[x]);
92 /* get results and priorities for completed trie */
93 results = MM_LOADU((const xmm_t *)&p[transition].results[x]);
94 priority = MM_LOADU((const xmm_t *)&p[transition].priority[x]);
96 /* if this is not the first completed trie */
97 if (parms[n].cmplt->count != ctx->num_tries) {
99 /* get running best results and their priorities */
100 results1 = MM_LOADU(saved_results);
101 priority1 = MM_LOADU(saved_priority);
103 /* select results that are highest priority */
104 selector = MM_CMPGT32(priority1, priority);
105 results = MM_BLENDV8(results, results1, selector);
106 priority = MM_BLENDV8(priority, priority1, selector);
109 /* save running best results and their priorities */
110 MM_STOREU(saved_results, results);
111 MM_STOREU(saved_priority, priority);
116 * Extract transitions from an XMM register and check for any matches
119 acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
120 struct parms *parms, struct acl_flow_data *flows)
122 uint64_t transition1, transition2;
124 /* extract transition from low 64 bits. */
125 transition1 = MM_CVT64(*indices);
127 /* extract transition from high 64 bits. */
128 *indices = MM_SHUFFLE32(*indices, SHUFFLE32_SWAP64);
129 transition2 = MM_CVT64(*indices);
131 transition1 = acl_match_check(transition1, slot, ctx,
132 parms, flows, resolve_priority_sse);
133 transition2 = acl_match_check(transition2, slot + 1, ctx,
134 parms, flows, resolve_priority_sse);
136 /* update indices with new transitions. */
137 *indices = MM_SET64(transition2, transition1);
141 * Check for any match in 4 transitions (contained in 2 SSE registers)
143 static inline __attribute__((always_inline)) void
144 acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
145 struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
150 /* put low 32 bits of each transition into one register */
151 temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1, (__m128)*indices2,
153 /* test for match node */
154 temp = MM_AND(match_mask, temp);
156 while (!MM_TESTZ(temp, temp)) {
157 acl_process_matches(indices1, slot, ctx, parms, flows);
158 acl_process_matches(indices2, slot + 2, ctx, parms, flows);
160 temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1,
163 temp = MM_AND(match_mask, temp);
168 * Calculate the address of the next transition for
169 * all types of nodes. Note that only DFA nodes and range
170 * nodes actually transition to another node. Match
173 static inline __attribute__((always_inline)) xmm_t
174 calc_addr_sse(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
175 xmm_t ones_16, xmm_t indices1, xmm_t indices2)
177 xmm_t addr, node_types, range, temp;
178 xmm_t dfa_msk, dfa_ofs, quad_ofs;
181 const xmm_t range_base = _mm_set_epi32(0xffffff0c, 0xffffff08,
182 0xffffff04, 0xffffff00);
185 * Note that no transition is done for a match
186 * node and therefore a stream freezes when
187 * it reaches a match.
190 /* Shuffle low 32 into temp and high 32 into indices2 */
191 temp = (xmm_t)MM_SHUFFLEPS((__m128)indices1, (__m128)indices2, 0x88);
192 range = (xmm_t)MM_SHUFFLEPS((__m128)indices1, (__m128)indices2, 0xdd);
194 t = MM_XOR(index_mask, index_mask);
196 /* shuffle input byte to all 4 positions of 32 bit value */
197 in = MM_SHUFFLE8(next_input, shuffle_input);
199 /* Calc node type and node addr */
200 node_types = MM_ANDNOT(index_mask, temp);
201 addr = MM_AND(index_mask, temp);
204 * Calc addr for DFAs - addr = dfa_index + input_byte
207 /* mask for DFA type (0) nodes */
208 dfa_msk = MM_CMPEQ32(node_types, t);
210 r = _mm_srli_epi32(in, 30);
211 r = _mm_add_epi8(r, range_base);
213 t = _mm_srli_epi32(in, 24);
214 r = _mm_shuffle_epi8(range, r);
216 dfa_ofs = _mm_sub_epi32(t, r);
219 * Calculate number of range boundaries that are less than the
220 * input value. Range boundaries for each node are in signed 8 bit,
221 * ordered from -128 to 127 in the indices2 register.
222 * This is effectively a popcnt of bytes that are greater than the
227 temp = MM_CMPGT8(in, range);
229 /* convert -1 to 1 (bytes greater than input byte */
230 temp = MM_SIGN8(temp, temp);
232 /* horizontal add pairs of bytes into words */
233 temp = MM_MADD8(temp, temp);
235 /* horizontal add pairs of words into dwords */
236 quad_ofs = MM_MADD16(temp, ones_16);
238 /* mask to range type nodes */
239 temp = _mm_blendv_epi8(quad_ofs, dfa_ofs, dfa_msk);
241 /* add index into node position */
242 return MM_ADD32(addr, temp);
246 * Process 4 transitions (in 2 SIMD registers) in parallel
248 static inline __attribute__((always_inline)) xmm_t
249 transition4(xmm_t next_input, const uint64_t *trans,
250 xmm_t *indices1, xmm_t *indices2)
253 uint64_t trans0, trans2;
255 /* Calculate the address (array index) for all 4 transitions. */
257 addr = calc_addr_sse(xmm_index_mask.x, next_input, xmm_shuffle_input.x,
258 xmm_ones_16.x, *indices1, *indices2);
260 /* Gather 64 bit transitions and pack back into 2 registers. */
262 trans0 = trans[MM_CVT32(addr)];
266 /* {x0, x1, x2, x3} -> {x2, x1, x2, x3} */
267 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT2);
268 trans2 = trans[MM_CVT32(addr)];
272 /* {x2, x1, x2, x3} -> {x1, x1, x2, x3} */
273 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
274 *indices1 = MM_SET64(trans[MM_CVT32(addr)], trans0);
278 /* {x1, x1, x2, x3} -> {x3, x1, x2, x3} */
279 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT3);
280 *indices2 = MM_SET64(trans[MM_CVT32(addr)], trans2);
282 return MM_SRL32(next_input, CHAR_BIT);
286 * Execute trie traversal with 8 traversals in parallel
289 search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data,
290 uint32_t *results, uint32_t total_packets, uint32_t categories)
293 struct acl_flow_data flows;
294 uint64_t index_array[MAX_SEARCHES_SSE8];
295 struct completion cmplt[MAX_SEARCHES_SSE8];
296 struct parms parms[MAX_SEARCHES_SSE8];
297 xmm_t input0, input1;
298 xmm_t indices1, indices2, indices3, indices4;
300 acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
301 total_packets, categories, ctx->trans_table);
303 for (n = 0; n < MAX_SEARCHES_SSE8; n++) {
305 index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
309 * indices1 contains index_array[0,1]
310 * indices2 contains index_array[2,3]
311 * indices3 contains index_array[4,5]
312 * indices4 contains index_array[6,7]
315 indices1 = MM_LOADU((xmm_t *) &index_array[0]);
316 indices2 = MM_LOADU((xmm_t *) &index_array[2]);
318 indices3 = MM_LOADU((xmm_t *) &index_array[4]);
319 indices4 = MM_LOADU((xmm_t *) &index_array[6]);
321 /* Check for any matches. */
322 acl_match_check_x4(0, ctx, parms, &flows,
323 &indices1, &indices2, xmm_match_mask.x);
324 acl_match_check_x4(4, ctx, parms, &flows,
325 &indices3, &indices4, xmm_match_mask.x);
327 while (flows.started > 0) {
329 /* Gather 4 bytes of input data for each stream. */
330 input0 = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 0));
331 input1 = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 4));
333 input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 1), 1);
334 input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 5), 1);
336 input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 2), 2);
337 input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 6), 2);
339 input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 3), 3);
340 input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 7), 3);
342 /* Process the 4 bytes of input on each stream. */
344 input0 = transition4(input0, flows.trans,
345 &indices1, &indices2);
346 input1 = transition4(input1, flows.trans,
347 &indices3, &indices4);
349 input0 = transition4(input0, flows.trans,
350 &indices1, &indices2);
351 input1 = transition4(input1, flows.trans,
352 &indices3, &indices4);
354 input0 = transition4(input0, flows.trans,
355 &indices1, &indices2);
356 input1 = transition4(input1, flows.trans,
357 &indices3, &indices4);
359 input0 = transition4(input0, flows.trans,
360 &indices1, &indices2);
361 input1 = transition4(input1, flows.trans,
362 &indices3, &indices4);
364 /* Check for any matches. */
365 acl_match_check_x4(0, ctx, parms, &flows,
366 &indices1, &indices2, xmm_match_mask.x);
367 acl_match_check_x4(4, ctx, parms, &flows,
368 &indices3, &indices4, xmm_match_mask.x);
375 * Execute trie traversal with 4 traversals in parallel
378 search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data,
379 uint32_t *results, int total_packets, uint32_t categories)
382 struct acl_flow_data flows;
383 uint64_t index_array[MAX_SEARCHES_SSE4];
384 struct completion cmplt[MAX_SEARCHES_SSE4];
385 struct parms parms[MAX_SEARCHES_SSE4];
386 xmm_t input, indices1, indices2;
388 acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
389 total_packets, categories, ctx->trans_table);
391 for (n = 0; n < MAX_SEARCHES_SSE4; n++) {
393 index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
396 indices1 = MM_LOADU((xmm_t *) &index_array[0]);
397 indices2 = MM_LOADU((xmm_t *) &index_array[2]);
399 /* Check for any matches. */
400 acl_match_check_x4(0, ctx, parms, &flows,
401 &indices1, &indices2, xmm_match_mask.x);
403 while (flows.started > 0) {
405 /* Gather 4 bytes of input data for each stream. */
406 input = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 0));
407 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
408 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 2), 2);
409 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 3), 3);
411 /* Process the 4 bytes of input on each stream. */
412 input = transition4(input, flows.trans, &indices1, &indices2);
413 input = transition4(input, flows.trans, &indices1, &indices2);
414 input = transition4(input, flows.trans, &indices1, &indices2);
415 input = transition4(input, flows.trans, &indices1, &indices2);
417 /* Check for any matches. */
418 acl_match_check_x4(0, ctx, parms, &flows,
419 &indices1, &indices2, xmm_match_mask.x);