1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef _NET_CRC_NEON_H_
6 #define _NET_CRC_NEON_H_
8 #include <rte_branch_prediction.h>
9 #include <rte_net_crc.h>
11 #include <rte_cpuflags.h>
17 /** PMULL CRC computation context structure */
18 struct crc_pmull_ctx {
24 struct crc_pmull_ctx crc32_eth_pmull __rte_aligned(16);
25 struct crc_pmull_ctx crc16_ccitt_pmull __rte_aligned(16);
28 * @brief Performs one folding round
30 * Logically function operates as follows:
31 * DATA = READ_NEXT_16BYTES();
36 * FOLD = XOR(T1, T2, DATA)
38 * @param data_block 16 byte data block
39 * @param precomp precomputed rk1 constant
40 * @param fold running 16 byte folded data
42 * @return New 16 byte folded data
44 static inline uint64x2_t
45 crcr32_folding_round(uint64x2_t data_block, uint64x2_t precomp,
48 uint64x2_t tmp0 = vreinterpretq_u64_p128(vmull_p64(
49 vgetq_lane_p64(vreinterpretq_p64_u64(fold), 1),
50 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
52 uint64x2_t tmp1 = vreinterpretq_u64_p128(vmull_p64(
53 vgetq_lane_p64(vreinterpretq_p64_u64(fold), 0),
54 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
56 return veorq_u64(tmp1, veorq_u64(data_block, tmp0));
60 * Performs reduction from 128 bits to 64 bits
62 * @param data128 128 bits data to be reduced
63 * @param precomp rk5 and rk6 precomputed constants
65 * @return data reduced to 64 bits
67 static inline uint64x2_t
68 crcr32_reduce_128_to_64(uint64x2_t data128,
71 uint64x2_t tmp0, tmp1, tmp2;
74 tmp0 = vreinterpretq_u64_p128(vmull_p64(
75 vgetq_lane_p64(vreinterpretq_p64_u64(data128), 0),
76 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
77 tmp1 = vshift_bytes_right(data128, 8);
78 tmp0 = veorq_u64(tmp0, tmp1);
81 tmp2 = vshift_bytes_left(tmp0, 4);
82 tmp1 = vreinterpretq_u64_p128(vmull_p64(
83 vgetq_lane_p64(vreinterpretq_p64_u64(tmp2), 0),
84 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
86 return veorq_u64(tmp1, tmp0);
90 * Performs Barret's reduction from 64 bits to 32 bits
92 * @param data64 64 bits data to be reduced
93 * @param precomp rk7 precomputed constant
95 * @return data reduced to 32 bits
97 static inline uint32_t
98 crcr32_reduce_64_to_32(uint64x2_t data64,
101 static uint32_t mask1[4] __rte_aligned(16) = {
102 0xffffffff, 0xffffffff, 0x00000000, 0x00000000
104 static uint32_t mask2[4] __rte_aligned(16) = {
105 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff
107 uint64x2_t tmp0, tmp1, tmp2;
109 tmp0 = vandq_u64(data64, vld1q_u64((uint64_t *)mask2));
111 tmp1 = vreinterpretq_u64_p128(vmull_p64(
112 vgetq_lane_p64(vreinterpretq_p64_u64(tmp0), 0),
113 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 0)));
114 tmp1 = veorq_u64(tmp1, tmp0);
115 tmp1 = vandq_u64(tmp1, vld1q_u64((uint64_t *)mask1));
117 tmp2 = vreinterpretq_u64_p128(vmull_p64(
118 vgetq_lane_p64(vreinterpretq_p64_u64(tmp1), 0),
119 vgetq_lane_p64(vreinterpretq_p64_u64(precomp), 1)));
120 tmp2 = veorq_u64(tmp2, tmp1);
121 tmp2 = veorq_u64(tmp2, tmp0);
123 return vgetq_lane_u32(vreinterpretq_u32_u64(tmp2), 2);
126 static inline uint32_t
127 crc32_eth_calc_pmull(
131 const struct crc_pmull_ctx *params)
133 uint64x2_t temp, fold, k;
136 /* Get CRC init value */
137 temp = vreinterpretq_u64_u32(vsetq_lane_u32(crc, vmovq_n_u32(0), 0));
140 * Folding all data into single 16 byte data block
141 * Assumes: fold holds first 16 bytes of data
143 if (unlikely(data_len < 32)) {
144 if (unlikely(data_len == 16)) {
146 fold = vld1q_u64((const uint64_t *)data);
147 fold = veorq_u64(fold, temp);
148 goto reduction_128_64;
151 if (unlikely(data_len < 16)) {
153 uint8_t buffer[16] __rte_aligned(16);
155 memset(buffer, 0, sizeof(buffer));
156 memcpy(buffer, data, data_len);
158 fold = vld1q_u64((uint64_t *)buffer);
159 fold = veorq_u64(fold, temp);
160 if (unlikely(data_len < 4)) {
161 fold = vshift_bytes_left(fold, 8 - data_len);
162 goto barret_reduction;
164 fold = vshift_bytes_left(fold, 16 - data_len);
165 goto reduction_128_64;
168 fold = vld1q_u64((const uint64_t *)data);
169 fold = veorq_u64(fold, temp);
175 /** At least 32 bytes in the buffer */
176 /** Apply CRC initial value */
177 fold = vld1q_u64((const uint64_t *)data);
178 fold = veorq_u64(fold, temp);
180 /** Main folding loop - the last 16 bytes is processed separately */
182 for (n = 16; (n + 16) <= data_len; n += 16) {
183 temp = vld1q_u64((const uint64_t *)&data[n]);
184 fold = crcr32_folding_round(temp, k, fold);
188 if (likely(n < data_len)) {
189 uint64x2_t last16, a, b, mask;
190 uint32_t rem = data_len & 15;
192 last16 = vld1q_u64((const uint64_t *)&data[data_len - 16]);
193 a = vshift_bytes_left(fold, 16 - rem);
194 b = vshift_bytes_right(fold, rem);
195 mask = vshift_bytes_left(vdupq_n_u64(-1), 16 - rem);
196 b = vorrq_u64(b, vandq_u64(mask, last16));
199 temp = vreinterpretq_u64_p128(vmull_p64(
200 vgetq_lane_p64(vreinterpretq_p64_u64(a), 1),
201 vgetq_lane_p64(vreinterpretq_p64_u64(k), 0)));
202 fold = vreinterpretq_u64_p128(vmull_p64(
203 vgetq_lane_p64(vreinterpretq_p64_u64(a), 0),
204 vgetq_lane_p64(vreinterpretq_p64_u64(k), 1)));
205 fold = veorq_u64(fold, temp);
206 fold = veorq_u64(fold, b);
209 /** Reduction 128 -> 32 Assumes: fold holds 128bit folded data */
212 fold = crcr32_reduce_128_to_64(fold, k);
216 n = crcr32_reduce_64_to_32(fold, k);
222 rte_net_crc_neon_init(void)
224 /* Initialize CRC16 data */
225 uint64_t ccitt_k1_k2[2] = {0x189aeLLU, 0x8e10LLU};
226 uint64_t ccitt_k5_k6[2] = {0x189aeLLU, 0x114aaLLU};
227 uint64_t ccitt_k7_k8[2] = {0x11c581910LLU, 0x10811LLU};
229 /* Initialize CRC32 data */
230 uint64_t eth_k1_k2[2] = {0xccaa009eLLU, 0x1751997d0LLU};
231 uint64_t eth_k5_k6[2] = {0xccaa009eLLU, 0x163cd6124LLU};
232 uint64_t eth_k7_k8[2] = {0x1f7011640LLU, 0x1db710641LLU};
234 /** Save the params in context structure */
235 crc16_ccitt_pmull.rk1_rk2 = vld1q_u64(ccitt_k1_k2);
236 crc16_ccitt_pmull.rk5_rk6 = vld1q_u64(ccitt_k5_k6);
237 crc16_ccitt_pmull.rk7_rk8 = vld1q_u64(ccitt_k7_k8);
239 /** Save the params in context structure */
240 crc32_eth_pmull.rk1_rk2 = vld1q_u64(eth_k1_k2);
241 crc32_eth_pmull.rk5_rk6 = vld1q_u64(eth_k5_k6);
242 crc32_eth_pmull.rk7_rk8 = vld1q_u64(eth_k7_k8);
245 static inline uint32_t
246 rte_crc16_ccitt_neon_handler(const uint8_t *data,
249 return (uint16_t)~crc32_eth_calc_pmull(data,
255 static inline uint32_t
256 rte_crc32_eth_neon_handler(const uint8_t *data,
259 return ~crc32_eth_calc_pmull(data,
269 #endif /* _NET_CRC_NEON_H_ */