4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 SHUFFLE32_SLOT1 = 0xe5,
38 SHUFFLE32_SLOT2 = 0xe6,
39 SHUFFLE32_SLOT3 = 0xe7,
40 SHUFFLE32_SWAP64 = 0x4e,
43 static const rte_xmm_t mm_shuffle_input = {
44 .u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c},
47 static const rte_xmm_t mm_shuffle_input64 = {
48 .u32 = {0x00000000, 0x04040404, 0x80808080, 0x80808080},
51 static const rte_xmm_t mm_ones_16 = {
52 .u16 = {1, 1, 1, 1, 1, 1, 1, 1},
55 static const rte_xmm_t mm_match_mask = {
64 static const rte_xmm_t mm_match_mask64 = {
73 static const rte_xmm_t mm_index_mask = {
82 static const rte_xmm_t mm_index_mask64 = {
93 * Resolve priority for multiple results (sse version).
94 * This consists comparing the priority of the current traversal with the
95 * running set of results for the packet.
96 * For each result, keep a running array of the result (rule number) and
97 * its priority for each category.
100 resolve_priority_sse(uint64_t transition, int n, const struct rte_acl_ctx *ctx,
101 struct parms *parms, const struct rte_acl_match_results *p,
105 xmm_t results, priority, results1, priority1, selector;
106 xmm_t *saved_results, *saved_priority;
108 for (x = 0; x < categories; x += RTE_ACL_RESULTS_MULTIPLIER) {
110 saved_results = (xmm_t *)(&parms[n].cmplt->results[x]);
112 (xmm_t *)(&parms[n].cmplt->priority[x]);
114 /* get results and priorities for completed trie */
115 results = MM_LOADU((const xmm_t *)&p[transition].results[x]);
116 priority = MM_LOADU((const xmm_t *)&p[transition].priority[x]);
118 /* if this is not the first completed trie */
119 if (parms[n].cmplt->count != ctx->num_tries) {
121 /* get running best results and their priorities */
122 results1 = MM_LOADU(saved_results);
123 priority1 = MM_LOADU(saved_priority);
125 /* select results that are highest priority */
126 selector = MM_CMPGT32(priority1, priority);
127 results = MM_BLENDV8(results, results1, selector);
128 priority = MM_BLENDV8(priority, priority1, selector);
131 /* save running best results and their priorities */
132 MM_STOREU(saved_results, results);
133 MM_STOREU(saved_priority, priority);
138 * Extract transitions from an XMM register and check for any matches
141 acl_process_matches(xmm_t *indices, int slot, const struct rte_acl_ctx *ctx,
142 struct parms *parms, struct acl_flow_data *flows)
144 uint64_t transition1, transition2;
146 /* extract transition from low 64 bits. */
147 transition1 = MM_CVT64(*indices);
149 /* extract transition from high 64 bits. */
150 *indices = MM_SHUFFLE32(*indices, SHUFFLE32_SWAP64);
151 transition2 = MM_CVT64(*indices);
153 transition1 = acl_match_check(transition1, slot, ctx,
154 parms, flows, resolve_priority_sse);
155 transition2 = acl_match_check(transition2, slot + 1, ctx,
156 parms, flows, resolve_priority_sse);
158 /* update indices with new transitions. */
159 *indices = MM_SET64(transition2, transition1);
163 * Check for a match in 2 transitions (contained in SSE register)
166 acl_match_check_x2(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
167 struct acl_flow_data *flows, xmm_t *indices, xmm_t match_mask)
171 temp = MM_AND(match_mask, *indices);
172 while (!MM_TESTZ(temp, temp)) {
173 acl_process_matches(indices, slot, ctx, parms, flows);
174 temp = MM_AND(match_mask, *indices);
179 * Check for any match in 4 transitions (contained in 2 SSE registers)
182 acl_match_check_x4(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
183 struct acl_flow_data *flows, xmm_t *indices1, xmm_t *indices2,
188 /* put low 32 bits of each transition into one register */
189 temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1, (__m128)*indices2,
191 /* test for match node */
192 temp = MM_AND(match_mask, temp);
194 while (!MM_TESTZ(temp, temp)) {
195 acl_process_matches(indices1, slot, ctx, parms, flows);
196 acl_process_matches(indices2, slot + 2, ctx, parms, flows);
198 temp = (xmm_t)MM_SHUFFLEPS((__m128)*indices1,
201 temp = MM_AND(match_mask, temp);
206 * Calculate the address of the next transition for
207 * all types of nodes. Note that only DFA nodes and range
208 * nodes actually transition to another node. Match
212 acl_calc_addr(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
213 xmm_t ones_16, xmm_t indices1, xmm_t indices2)
215 xmm_t addr, node_types, range, temp;
216 xmm_t dfa_msk, dfa_ofs, quad_ofs;
219 const xmm_t range_base = _mm_set_epi32(0xffffff0c, 0xffffff08,
220 0xffffff04, 0xffffff00);
223 * Note that no transition is done for a match
224 * node and therefore a stream freezes when
225 * it reaches a match.
228 /* Shuffle low 32 into temp and high 32 into indices2 */
229 temp = (xmm_t)MM_SHUFFLEPS((__m128)indices1, (__m128)indices2, 0x88);
230 range = (xmm_t)MM_SHUFFLEPS((__m128)indices1, (__m128)indices2, 0xdd);
232 t = MM_XOR(index_mask, index_mask);
234 /* shuffle input byte to all 4 positions of 32 bit value */
235 in = MM_SHUFFLE8(next_input, shuffle_input);
237 /* Calc node type and node addr */
238 node_types = MM_ANDNOT(index_mask, temp);
239 addr = MM_AND(index_mask, temp);
242 * Calc addr for DFAs - addr = dfa_index + input_byte
245 /* mask for DFA type (0) nodes */
246 dfa_msk = MM_CMPEQ32(node_types, t);
248 r = _mm_srli_epi32(in, 30);
249 r = _mm_add_epi8(r, range_base);
251 t = _mm_srli_epi32(in, 24);
252 r = _mm_shuffle_epi8(range, r);
254 dfa_ofs = _mm_sub_epi32(t, r);
257 * Calculate number of range boundaries that are less than the
258 * input value. Range boundaries for each node are in signed 8 bit,
259 * ordered from -128 to 127 in the indices2 register.
260 * This is effectively a popcnt of bytes that are greater than the
265 temp = MM_CMPGT8(in, range);
267 /* convert -1 to 1 (bytes greater than input byte */
268 temp = MM_SIGN8(temp, temp);
270 /* horizontal add pairs of bytes into words */
271 temp = MM_MADD8(temp, temp);
273 /* horizontal add pairs of words into dwords */
274 quad_ofs = MM_MADD16(temp, ones_16);
276 /* mask to range type nodes */
277 temp = _mm_blendv_epi8(quad_ofs, dfa_ofs, dfa_msk);
279 /* add index into node position */
280 return MM_ADD32(addr, temp);
284 * Process 4 transitions (in 2 SIMD registers) in parallel
287 transition4(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
288 xmm_t ones_16, const uint64_t *trans,
289 xmm_t *indices1, xmm_t *indices2)
292 uint64_t trans0, trans2;
294 /* Calculate the address (array index) for all 4 transitions. */
296 addr = acl_calc_addr(index_mask, next_input, shuffle_input, ones_16,
297 *indices1, *indices2);
299 /* Gather 64 bit transitions and pack back into 2 registers. */
301 trans0 = trans[MM_CVT32(addr)];
305 /* {x0, x1, x2, x3} -> {x2, x1, x2, x3} */
306 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT2);
307 trans2 = trans[MM_CVT32(addr)];
311 /* {x2, x1, x2, x3} -> {x1, x1, x2, x3} */
312 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
313 *indices1 = MM_SET64(trans[MM_CVT32(addr)], trans0);
317 /* {x1, x1, x2, x3} -> {x3, x1, x2, x3} */
318 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT3);
319 *indices2 = MM_SET64(trans[MM_CVT32(addr)], trans2);
321 return MM_SRL32(next_input, 8);
325 * Execute trie traversal with 8 traversals in parallel
328 search_sse_8(const struct rte_acl_ctx *ctx, const uint8_t **data,
329 uint32_t *results, uint32_t total_packets, uint32_t categories)
332 struct acl_flow_data flows;
333 uint64_t index_array[MAX_SEARCHES_SSE8];
334 struct completion cmplt[MAX_SEARCHES_SSE8];
335 struct parms parms[MAX_SEARCHES_SSE8];
336 xmm_t input0, input1;
337 xmm_t indices1, indices2, indices3, indices4;
339 acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
340 total_packets, categories, ctx->trans_table);
342 for (n = 0; n < MAX_SEARCHES_SSE8; n++) {
344 index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
348 * indices1 contains index_array[0,1]
349 * indices2 contains index_array[2,3]
350 * indices3 contains index_array[4,5]
351 * indices4 contains index_array[6,7]
354 indices1 = MM_LOADU((xmm_t *) &index_array[0]);
355 indices2 = MM_LOADU((xmm_t *) &index_array[2]);
357 indices3 = MM_LOADU((xmm_t *) &index_array[4]);
358 indices4 = MM_LOADU((xmm_t *) &index_array[6]);
360 /* Check for any matches. */
361 acl_match_check_x4(0, ctx, parms, &flows,
362 &indices1, &indices2, mm_match_mask.x);
363 acl_match_check_x4(4, ctx, parms, &flows,
364 &indices3, &indices4, mm_match_mask.x);
366 while (flows.started > 0) {
368 /* Gather 4 bytes of input data for each stream. */
369 input0 = MM_INSERT32(mm_ones_16.x, GET_NEXT_4BYTES(parms, 0),
371 input1 = MM_INSERT32(mm_ones_16.x, GET_NEXT_4BYTES(parms, 4),
374 input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 1), 1);
375 input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 5), 1);
377 input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 2), 2);
378 input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 6), 2);
380 input0 = MM_INSERT32(input0, GET_NEXT_4BYTES(parms, 3), 3);
381 input1 = MM_INSERT32(input1, GET_NEXT_4BYTES(parms, 7), 3);
383 /* Process the 4 bytes of input on each stream. */
385 input0 = transition4(mm_index_mask.x, input0,
386 mm_shuffle_input.x, mm_ones_16.x,
387 flows.trans, &indices1, &indices2);
389 input1 = transition4(mm_index_mask.x, input1,
390 mm_shuffle_input.x, mm_ones_16.x,
391 flows.trans, &indices3, &indices4);
393 input0 = transition4(mm_index_mask.x, input0,
394 mm_shuffle_input.x, mm_ones_16.x,
395 flows.trans, &indices1, &indices2);
397 input1 = transition4(mm_index_mask.x, input1,
398 mm_shuffle_input.x, mm_ones_16.x,
399 flows.trans, &indices3, &indices4);
401 input0 = transition4(mm_index_mask.x, input0,
402 mm_shuffle_input.x, mm_ones_16.x,
403 flows.trans, &indices1, &indices2);
405 input1 = transition4(mm_index_mask.x, input1,
406 mm_shuffle_input.x, mm_ones_16.x,
407 flows.trans, &indices3, &indices4);
409 input0 = transition4(mm_index_mask.x, input0,
410 mm_shuffle_input.x, mm_ones_16.x,
411 flows.trans, &indices1, &indices2);
413 input1 = transition4(mm_index_mask.x, input1,
414 mm_shuffle_input.x, mm_ones_16.x,
415 flows.trans, &indices3, &indices4);
417 /* Check for any matches. */
418 acl_match_check_x4(0, ctx, parms, &flows,
419 &indices1, &indices2, mm_match_mask.x);
420 acl_match_check_x4(4, ctx, parms, &flows,
421 &indices3, &indices4, mm_match_mask.x);
428 * Execute trie traversal with 4 traversals in parallel
431 search_sse_4(const struct rte_acl_ctx *ctx, const uint8_t **data,
432 uint32_t *results, int total_packets, uint32_t categories)
435 struct acl_flow_data flows;
436 uint64_t index_array[MAX_SEARCHES_SSE4];
437 struct completion cmplt[MAX_SEARCHES_SSE4];
438 struct parms parms[MAX_SEARCHES_SSE4];
439 xmm_t input, indices1, indices2;
441 acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
442 total_packets, categories, ctx->trans_table);
444 for (n = 0; n < MAX_SEARCHES_SSE4; n++) {
446 index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
449 indices1 = MM_LOADU((xmm_t *) &index_array[0]);
450 indices2 = MM_LOADU((xmm_t *) &index_array[2]);
452 /* Check for any matches. */
453 acl_match_check_x4(0, ctx, parms, &flows,
454 &indices1, &indices2, mm_match_mask.x);
456 while (flows.started > 0) {
458 /* Gather 4 bytes of input data for each stream. */
459 input = MM_INSERT32(mm_ones_16.x, GET_NEXT_4BYTES(parms, 0), 0);
460 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
461 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 2), 2);
462 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 3), 3);
464 /* Process the 4 bytes of input on each stream. */
465 input = transition4(mm_index_mask.x, input,
466 mm_shuffle_input.x, mm_ones_16.x,
467 flows.trans, &indices1, &indices2);
469 input = transition4(mm_index_mask.x, input,
470 mm_shuffle_input.x, mm_ones_16.x,
471 flows.trans, &indices1, &indices2);
473 input = transition4(mm_index_mask.x, input,
474 mm_shuffle_input.x, mm_ones_16.x,
475 flows.trans, &indices1, &indices2);
477 input = transition4(mm_index_mask.x, input,
478 mm_shuffle_input.x, mm_ones_16.x,
479 flows.trans, &indices1, &indices2);
481 /* Check for any matches. */
482 acl_match_check_x4(0, ctx, parms, &flows,
483 &indices1, &indices2, mm_match_mask.x);
490 transition2(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
491 xmm_t ones_16, const uint64_t *trans, xmm_t *indices1)
494 xmm_t addr, indices2;
496 indices2 = MM_XOR(ones_16, ones_16);
498 addr = acl_calc_addr(index_mask, next_input, shuffle_input, ones_16,
499 *indices1, indices2);
501 /* Gather 64 bit transitions and pack 2 per register. */
503 t = trans[MM_CVT32(addr)];
506 addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
507 *indices1 = MM_SET64(trans[MM_CVT32(addr)], t);
509 return MM_SRL32(next_input, 8);
513 * Execute trie traversal with 2 traversals in parallel.
516 search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data,
517 uint32_t *results, uint32_t total_packets, uint32_t categories)
520 struct acl_flow_data flows;
521 uint64_t index_array[MAX_SEARCHES_SSE2];
522 struct completion cmplt[MAX_SEARCHES_SSE2];
523 struct parms parms[MAX_SEARCHES_SSE2];
524 xmm_t input, indices;
526 acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
527 total_packets, categories, ctx->trans_table);
529 for (n = 0; n < MAX_SEARCHES_SSE2; n++) {
531 index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
534 indices = MM_LOADU((xmm_t *) &index_array[0]);
536 /* Check for any matches. */
537 acl_match_check_x2(0, ctx, parms, &flows, &indices, mm_match_mask64.x);
539 while (flows.started > 0) {
541 /* Gather 4 bytes of input data for each stream. */
542 input = MM_INSERT32(mm_ones_16.x, GET_NEXT_4BYTES(parms, 0), 0);
543 input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
545 /* Process the 4 bytes of input on each stream. */
547 input = transition2(mm_index_mask64.x, input,
548 mm_shuffle_input64.x, mm_ones_16.x,
549 flows.trans, &indices);
551 input = transition2(mm_index_mask64.x, input,
552 mm_shuffle_input64.x, mm_ones_16.x,
553 flows.trans, &indices);
555 input = transition2(mm_index_mask64.x, input,
556 mm_shuffle_input64.x, mm_ones_16.x,
557 flows.trans, &indices);
559 input = transition2(mm_index_mask64.x, input,
560 mm_shuffle_input64.x, mm_ones_16.x,
561 flows.trans, &indices);
563 /* Check for any matches. */
564 acl_match_check_x2(0, ctx, parms, &flows, &indices,
572 rte_acl_classify_sse(const struct rte_acl_ctx *ctx, const uint8_t **data,
573 uint32_t *results, uint32_t num, uint32_t categories)
575 if (likely(num >= MAX_SEARCHES_SSE8))
576 return search_sse_8(ctx, data, results, num, categories);
577 else if (num >= MAX_SEARCHES_SSE4)
578 return search_sse_4(ctx, data, results, num, categories);
580 return search_sse_2(ctx, data, results, num, categories);