.u32 = {0x00000000, 0x04040404, 0x08080808, 0x0c0c0c0c},
};
-static const rte_xmm_t xmm_shuffle_input64 = {
- .u32 = {0x00000000, 0x04040404, 0x80808080, 0x80808080},
-};
-
static const rte_xmm_t xmm_ones_16 = {
.u16 = {1, 1, 1, 1, 1, 1, 1, 1},
};
},
};
-static const rte_xmm_t xmm_match_mask64 = {
- .u32 = {
- RTE_ACL_NODE_MATCH,
- 0,
- RTE_ACL_NODE_MATCH,
- 0,
- },
-};
-
static const rte_xmm_t xmm_index_mask = {
.u32 = {
RTE_ACL_NODE_INDEX,
},
};
-static const rte_xmm_t xmm_index_mask64 = {
- .u32 = {
- RTE_ACL_NODE_INDEX,
- RTE_ACL_NODE_INDEX,
- 0,
- 0,
- },
-};
-
-
/*
* Resolve priority for multiple results (sse version).
* This consists comparing the priority of the current traversal with the
*indices = MM_SET64(transition2, transition1);
}
-/*
- * Check for a match in 2 transitions (contained in SSE register)
- */
-static inline __attribute__((always_inline)) void
-acl_match_check_x2(int slot, const struct rte_acl_ctx *ctx, struct parms *parms,
- struct acl_flow_data *flows, xmm_t *indices, xmm_t match_mask)
-{
- xmm_t temp;
-
- temp = MM_AND(match_mask, *indices);
- while (!MM_TESTZ(temp, temp)) {
- acl_process_matches(indices, slot, ctx, parms, flows);
- temp = MM_AND(match_mask, *indices);
- }
-}
-
/*
* Check for any match in 4 transitions (contained in 2 SSE registers)
*/
*/
static inline __attribute__((always_inline)) xmm_t
calc_addr_sse(xmm_t index_mask, xmm_t next_input, xmm_t shuffle_input,
- xmm_t ones_16, xmm_t indices1, xmm_t indices2)
+ xmm_t ones_16, xmm_t tr_lo, xmm_t tr_hi)
{
- xmm_t addr, node_types, range, temp;
+ xmm_t addr, node_types;
xmm_t dfa_msk, dfa_ofs, quad_ofs;
xmm_t in, r, t;
* it reaches a match.
*/
- /* Shuffle low 32 into temp and high 32 into indices2 */
- temp = (xmm_t)MM_SHUFFLEPS((__m128)indices1, (__m128)indices2, 0x88);
- range = (xmm_t)MM_SHUFFLEPS((__m128)indices1, (__m128)indices2, 0xdd);
-
t = MM_XOR(index_mask, index_mask);
/* shuffle input byte to all 4 positions of 32 bit value */
in = MM_SHUFFLE8(next_input, shuffle_input);
/* Calc node type and node addr */
- node_types = MM_ANDNOT(index_mask, temp);
- addr = MM_AND(index_mask, temp);
+ node_types = MM_ANDNOT(index_mask, tr_lo);
+ addr = MM_AND(index_mask, tr_lo);
/*
* Calc addr for DFAs - addr = dfa_index + input_byte
r = _mm_add_epi8(r, range_base);
t = _mm_srli_epi32(in, 24);
- r = _mm_shuffle_epi8(range, r);
+ r = _mm_shuffle_epi8(tr_hi, r);
dfa_ofs = _mm_sub_epi32(t, r);
*/
/* check ranges */
- temp = MM_CMPGT8(in, range);
+ t = MM_CMPGT8(in, tr_hi);
/* convert -1 to 1 (bytes greater than input byte */
- temp = MM_SIGN8(temp, temp);
+ t = MM_SIGN8(t, t);
/* horizontal add pairs of bytes into words */
- temp = MM_MADD8(temp, temp);
+ t = MM_MADD8(t, t);
/* horizontal add pairs of words into dwords */
- quad_ofs = MM_MADD16(temp, ones_16);
+ quad_ofs = MM_MADD16(t, ones_16);
- /* mask to range type nodes */
- temp = _mm_blendv_epi8(quad_ofs, dfa_ofs, dfa_msk);
+ /* blend DFA and QUAD/SINGLE. */
+ t = _mm_blendv_epi8(quad_ofs, dfa_ofs, dfa_msk);
/* add index into node position */
- return MM_ADD32(addr, temp);
+ return MM_ADD32(addr, t);
}
/*
transition4(xmm_t next_input, const uint64_t *trans,
xmm_t *indices1, xmm_t *indices2)
{
- xmm_t addr;
+ xmm_t addr, tr_lo, tr_hi;
uint64_t trans0, trans2;
+ /* Shuffle low 32 into tr_lo and high 32 into tr_hi */
+ tr_lo = (xmm_t)_mm_shuffle_ps((__m128)*indices1, (__m128)*indices2,
+ 0x88);
+ tr_hi = (xmm_t)_mm_shuffle_ps((__m128)*indices1, (__m128)*indices2,
+ 0xdd);
+
/* Calculate the address (array index) for all 4 transitions. */
addr = calc_addr_sse(xmm_index_mask.x, next_input, xmm_shuffle_input.x,
- xmm_ones_16.x, *indices1, *indices2);
+ xmm_ones_16.x, tr_lo, tr_hi);
/* Gather 64 bit transitions and pack back into 2 registers. */
return 0;
}
-
-static inline __attribute__((always_inline)) xmm_t
-transition2(xmm_t next_input, const uint64_t *trans, xmm_t *indices1)
-{
- uint64_t t;
- xmm_t addr, indices2;
-
- indices2 = _mm_setzero_si128();
-
- addr = calc_addr_sse(xmm_index_mask.x, next_input, xmm_shuffle_input.x,
- xmm_ones_16.x, *indices1, indices2);
-
- /* Gather 64 bit transitions and pack 2 per register. */
-
- t = trans[MM_CVT32(addr)];
-
- /* get slot 1 */
- addr = MM_SHUFFLE32(addr, SHUFFLE32_SLOT1);
- *indices1 = MM_SET64(trans[MM_CVT32(addr)], t);
-
- return MM_SRL32(next_input, CHAR_BIT);
-}
-
-/*
- * Execute trie traversal with 2 traversals in parallel.
- */
-static inline int
-search_sse_2(const struct rte_acl_ctx *ctx, const uint8_t **data,
- uint32_t *results, uint32_t total_packets, uint32_t categories)
-{
- int n;
- struct acl_flow_data flows;
- uint64_t index_array[MAX_SEARCHES_SSE2];
- struct completion cmplt[MAX_SEARCHES_SSE2];
- struct parms parms[MAX_SEARCHES_SSE2];
- xmm_t input, indices;
-
- acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results,
- total_packets, categories, ctx->trans_table);
-
- for (n = 0; n < MAX_SEARCHES_SSE2; n++) {
- cmplt[n].count = 0;
- index_array[n] = acl_start_next_trie(&flows, parms, n, ctx);
- }
-
- indices = MM_LOADU((xmm_t *) &index_array[0]);
-
- /* Check for any matches. */
- acl_match_check_x2(0, ctx, parms, &flows, &indices,
- xmm_match_mask64.x);
-
- while (flows.started > 0) {
-
- /* Gather 4 bytes of input data for each stream. */
- input = _mm_cvtsi32_si128(GET_NEXT_4BYTES(parms, 0));
- input = MM_INSERT32(input, GET_NEXT_4BYTES(parms, 1), 1);
-
- /* Process the 4 bytes of input on each stream. */
-
- input = transition2(input, flows.trans, &indices);
- input = transition2(input, flows.trans, &indices);
- input = transition2(input, flows.trans, &indices);
- input = transition2(input, flows.trans, &indices);
-
- /* Check for any matches. */
- acl_match_check_x2(0, ctx, parms, &flows, &indices,
- xmm_match_mask64.x);
- }
-
- return 0;
-}