1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2019 Vladimir Medvedkin <medvedkinv@gmail.com>
5 #include <rte_common.h>
8 #include <rte_random.h>
9 #include <rte_malloc.h>
13 #include <rte_thash.h>
15 #define HASH_MSK(reta_sz) ((1 << reta_sz) - 1)
16 #define TUPLE_SZ (RTE_THASH_V4_L4_LEN * 4)
18 struct test_thash_v4 {
27 struct test_thash_v6 {
36 /*From 82599 Datasheet 7.1.2.8.3 RSS Verification Suite*/
37 struct test_thash_v4 v4_tbl[] = {
38 {RTE_IPV4(161, 142, 100, 80), RTE_IPV4(66, 9, 149, 187),
39 1766, 2794, 0x323e8fc2, 0x51ccc178},
40 {RTE_IPV4(65, 69, 140, 83), RTE_IPV4(199, 92, 111, 2),
41 4739, 14230, 0xd718262a, 0xc626b0ea},
42 {RTE_IPV4(12, 22, 207, 184), RTE_IPV4(24, 19, 198, 95),
43 38024, 12898, 0xd2d0a5de, 0x5c2b394a},
44 {RTE_IPV4(209, 142, 163, 6), RTE_IPV4(38, 27, 205, 30),
45 2217, 48228, 0x82989176, 0xafc7327f},
46 {RTE_IPV4(202, 188, 127, 2), RTE_IPV4(153, 39, 163, 191),
47 1303, 44251, 0x5d1809c5, 0x10e828a2},
50 struct test_thash_v6 v6_tbl[] = {
51 /*3ffe:2501:200:3::1*/
52 {{0x3f, 0xfe, 0x25, 0x01, 0x02, 0x00, 0x00, 0x03,
53 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,},
54 /*3ffe:2501:200:1fff::7*/
55 {0x3f, 0xfe, 0x25, 0x01, 0x02, 0x00, 0x1f, 0xff,
56 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,},
57 1766, 2794, 0x2cc18cd5, 0x40207d3d},
59 {{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,},
61 /*3ffe:501:8::260:97ff:fe40:efab*/
62 {0x3f, 0xfe, 0x05, 0x01, 0x00, 0x08, 0x00, 0x00,
63 0x02, 0x60, 0x97, 0xff, 0xfe, 0x40, 0xef, 0xab,},
64 4739, 14230, 0x0f0c461c, 0xdde51bbf},
65 /*fe80::200:f8ff:fe21:67cf*/
66 {{0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
67 0x02, 0x00, 0xf8, 0xff, 0xfe, 0x21, 0x67, 0xcf,},
68 /*3ffe:1900:4545:3:200:f8ff:fe21:67cf*/
69 {0x3f, 0xfe, 0x19, 0x00, 0x45, 0x45, 0x00, 0x03,
70 0x02, 0x00, 0xf8, 0xff, 0xfe, 0x21, 0x67, 0xcf,},
71 38024, 44251, 0x4b61e985, 0x02d1feef},
74 uint8_t default_rss_key[] = {
75 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
76 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
77 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
78 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
79 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
82 static const uint8_t big_rss_key[] = {
83 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
84 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
85 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
86 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
87 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
88 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
89 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
90 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
91 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
92 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
93 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
94 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
95 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
96 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
97 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
98 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
99 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
100 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
101 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
102 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
103 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
104 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
105 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
106 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
107 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
111 test_toeplitz_hash_calc(void)
114 union rte_thash_tuple tuple;
115 uint32_t rss_l3, rss_l3l4;
116 uint8_t rss_key_be[RTE_DIM(default_rss_key)];
117 struct rte_ipv6_hdr ipv6_hdr;
120 rte_convert_rss_key((uint32_t *)&default_rss_key,
121 (uint32_t *)rss_key_be, RTE_DIM(default_rss_key));
124 for (i = 0; i < RTE_DIM(v4_tbl); i++) {
125 tuple.v4.src_addr = v4_tbl[i].src_ip;
126 tuple.v4.dst_addr = v4_tbl[i].dst_ip;
127 tuple.v4.sport = v4_tbl[i].src_port;
128 tuple.v4.dport = v4_tbl[i].dst_port;
129 /*Calculate hash with original key*/
130 rss_l3 = rte_softrss((uint32_t *)&tuple,
131 RTE_THASH_V4_L3_LEN, default_rss_key);
132 rss_l3l4 = rte_softrss((uint32_t *)&tuple,
133 RTE_THASH_V4_L4_LEN, default_rss_key);
134 if ((rss_l3 != v4_tbl[i].hash_l3) ||
135 (rss_l3l4 != v4_tbl[i].hash_l3l4))
137 /*Calculate hash with converted key*/
138 rss_l3 = rte_softrss_be((uint32_t *)&tuple,
139 RTE_THASH_V4_L3_LEN, rss_key_be);
140 rss_l3l4 = rte_softrss_be((uint32_t *)&tuple,
141 RTE_THASH_V4_L4_LEN, rss_key_be);
142 if ((rss_l3 != v4_tbl[i].hash_l3) ||
143 (rss_l3l4 != v4_tbl[i].hash_l3l4))
146 for (i = 0; i < RTE_DIM(v6_tbl); i++) {
148 for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr); j++)
149 ipv6_hdr.src_addr[j] = v6_tbl[i].src_ip[j];
150 for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr); j++)
151 ipv6_hdr.dst_addr[j] = v6_tbl[i].dst_ip[j];
152 /*Load and convert ipv6 address into tuple*/
153 rte_thash_load_v6_addrs(&ipv6_hdr, &tuple);
154 tuple.v6.sport = v6_tbl[i].src_port;
155 tuple.v6.dport = v6_tbl[i].dst_port;
156 /*Calculate hash with original key*/
157 rss_l3 = rte_softrss((uint32_t *)&tuple,
158 RTE_THASH_V6_L3_LEN, default_rss_key);
159 rss_l3l4 = rte_softrss((uint32_t *)&tuple,
160 RTE_THASH_V6_L4_LEN, default_rss_key);
161 if ((rss_l3 != v6_tbl[i].hash_l3) ||
162 (rss_l3l4 != v6_tbl[i].hash_l3l4))
164 /*Calculate hash with converted key*/
165 rss_l3 = rte_softrss_be((uint32_t *)&tuple,
166 RTE_THASH_V6_L3_LEN, rss_key_be);
167 rss_l3l4 = rte_softrss_be((uint32_t *)&tuple,
168 RTE_THASH_V6_L4_LEN, rss_key_be);
169 if ((rss_l3 != v6_tbl[i].hash_l3) ||
170 (rss_l3l4 != v6_tbl[i].hash_l3l4))
177 test_toeplitz_hash_gfni(void)
180 union rte_thash_tuple tuple;
181 uint32_t rss_l3, rss_l3l4;
182 uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
184 if (!rte_thash_gfni_supported())
187 /* Convert RSS key into matrixes */
188 rte_thash_complete_matrix(rss_key_matrixes, default_rss_key,
189 RTE_DIM(default_rss_key));
191 for (i = 0; i < RTE_DIM(v4_tbl); i++) {
192 tuple.v4.src_addr = rte_cpu_to_be_32(v4_tbl[i].src_ip);
193 tuple.v4.dst_addr = rte_cpu_to_be_32(v4_tbl[i].dst_ip);
194 tuple.v4.sport = rte_cpu_to_be_16(v4_tbl[i].dst_port);
195 tuple.v4.dport = rte_cpu_to_be_16(v4_tbl[i].src_port);
197 rss_l3 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
198 RTE_THASH_V4_L3_LEN * 4);
199 rss_l3l4 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
200 RTE_THASH_V4_L4_LEN * 4);
201 if ((rss_l3 != v4_tbl[i].hash_l3) ||
202 (rss_l3l4 != v4_tbl[i].hash_l3l4))
206 for (i = 0; i < RTE_DIM(v6_tbl); i++) {
207 for (j = 0; j < RTE_DIM(tuple.v6.src_addr); j++)
208 tuple.v6.src_addr[j] = v6_tbl[i].src_ip[j];
209 for (j = 0; j < RTE_DIM(tuple.v6.dst_addr); j++)
210 tuple.v6.dst_addr[j] = v6_tbl[i].dst_ip[j];
211 tuple.v6.sport = rte_cpu_to_be_16(v6_tbl[i].dst_port);
212 tuple.v6.dport = rte_cpu_to_be_16(v6_tbl[i].src_port);
213 rss_l3 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
214 RTE_THASH_V6_L3_LEN * 4);
215 rss_l3l4 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
216 RTE_THASH_V6_L4_LEN * 4);
217 if ((rss_l3 != v6_tbl[i].hash_l3) ||
218 (rss_l3l4 != v6_tbl[i].hash_l3l4))
229 SCALAR_DATA_BUF_1_HASH_IDX = 0,
230 SCALAR_DATA_BUF_2_HASH_IDX,
231 GFNI_DATA_BUF_1_HASH_IDX,
232 GFNI_DATA_BUF_2_HASH_IDX,
233 GFNI_BULK_DATA_BUF_1_HASH_IDX,
234 GFNI_BULK_DATA_BUF_2_HASH_IDX,
239 test_toeplitz_hash_rand_data(void)
241 uint32_t data[2][DATA_SZ];
242 uint32_t scalar_data[2][DATA_SZ];
243 uint32_t hash[HASH_IDXES] = { 0 };
244 uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
246 uint8_t *bulk_data[2];
248 if (!rte_thash_gfni_supported())
251 rte_thash_complete_matrix(rss_key_matrixes, default_rss_key,
252 RTE_DIM(default_rss_key));
254 for (i = 0; i < 2; i++)
255 bulk_data[i] = (uint8_t *)data[i];
257 for (i = 0; i < ITER; i++) {
258 for (j = 0; j < DATA_SZ; j++) {
259 data[0][j] = rte_rand();
260 data[1][j] = rte_rand();
261 scalar_data[0][j] = rte_cpu_to_be_32(data[0][j]);
262 scalar_data[1][j] = rte_cpu_to_be_32(data[1][j]);
265 hash[SCALAR_DATA_BUF_1_HASH_IDX] = rte_softrss(scalar_data[0],
266 DATA_SZ, default_rss_key);
267 hash[SCALAR_DATA_BUF_2_HASH_IDX] = rte_softrss(scalar_data[1],
268 DATA_SZ, default_rss_key);
269 hash[GFNI_DATA_BUF_1_HASH_IDX] = rte_thash_gfni(
270 rss_key_matrixes, (uint8_t *)data[0],
271 DATA_SZ * sizeof(uint32_t));
272 hash[GFNI_DATA_BUF_2_HASH_IDX] = rte_thash_gfni(
273 rss_key_matrixes, (uint8_t *)data[1],
274 DATA_SZ * sizeof(uint32_t));
275 rte_thash_gfni_bulk(rss_key_matrixes,
276 DATA_SZ * sizeof(uint32_t), bulk_data,
277 &hash[GFNI_BULK_DATA_BUF_1_HASH_IDX], 2);
279 if ((hash[SCALAR_DATA_BUF_1_HASH_IDX] !=
280 hash[GFNI_DATA_BUF_1_HASH_IDX]) ||
281 (hash[SCALAR_DATA_BUF_1_HASH_IDX] !=
282 hash[GFNI_BULK_DATA_BUF_1_HASH_IDX]) ||
283 (hash[SCALAR_DATA_BUF_2_HASH_IDX] !=
284 hash[GFNI_DATA_BUF_2_HASH_IDX]) ||
285 (hash[SCALAR_DATA_BUF_2_HASH_IDX] !=
286 hash[GFNI_BULK_DATA_BUF_2_HASH_IDX]))
300 test_toeplitz_hash_gfni_bulk(void)
303 union rte_thash_tuple tuple[2];
305 uint32_t rss[2] = { 0 };
306 uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
308 if (!rte_thash_gfni_supported())
311 /* Convert RSS key into matrixes */
312 rte_thash_complete_matrix(rss_key_matrixes, default_rss_key,
313 RTE_DIM(default_rss_key));
315 for (i = 0; i < RTE_DIM(tuples); i++) {
316 /* allocate memory enough for a biggest tuple */
317 tuples[i] = rte_zmalloc(NULL, RTE_THASH_V6_L4_LEN * 4, 0);
318 if (tuples[i] == NULL)
322 for (i = 0; i < RTE_MIN(RTE_DIM(v4_tbl), RTE_DIM(v6_tbl)); i++) {
323 /*Load IPv4 headers and copy it into the corresponding tuple*/
324 tuple[0].v4.src_addr = rte_cpu_to_be_32(v4_tbl[i].src_ip);
325 tuple[0].v4.dst_addr = rte_cpu_to_be_32(v4_tbl[i].dst_ip);
326 tuple[0].v4.sport = rte_cpu_to_be_16(v4_tbl[i].dst_port);
327 tuple[0].v4.dport = rte_cpu_to_be_16(v4_tbl[i].src_port);
328 rte_memcpy(tuples[0], &tuple[0], RTE_THASH_V4_L4_LEN * 4);
330 /*Load IPv6 headers and copy it into the corresponding tuple*/
331 for (j = 0; j < RTE_DIM(tuple[1].v6.src_addr); j++)
332 tuple[1].v6.src_addr[j] = v6_tbl[i].src_ip[j];
333 for (j = 0; j < RTE_DIM(tuple[1].v6.dst_addr); j++)
334 tuple[1].v6.dst_addr[j] = v6_tbl[i].dst_ip[j];
335 tuple[1].v6.sport = rte_cpu_to_be_16(v6_tbl[i].dst_port);
336 tuple[1].v6.dport = rte_cpu_to_be_16(v6_tbl[i].src_port);
337 rte_memcpy(tuples[1], &tuple[1], RTE_THASH_V6_L4_LEN * 4);
339 rte_thash_gfni_bulk(rss_key_matrixes, RTE_THASH_V6_L4_LEN * 4,
342 if ((rss[RSS_V4_IDX] != v4_tbl[i].hash_l3l4) ||
343 (rss[RSS_V6_IDX] != v6_tbl[i].hash_l3l4))
351 test_big_tuple_gfni(void)
354 uint32_t arr_softrss[16];
355 uint32_t hash_1, hash_2;
356 uint64_t rss_key_matrixes[RTE_DIM(big_rss_key)];
357 unsigned int i, size = RTE_DIM(arr) * sizeof(uint32_t);
359 if (!rte_thash_gfni_supported())
362 /* Convert RSS key into matrixes */
363 rte_thash_complete_matrix(rss_key_matrixes, big_rss_key,
364 RTE_DIM(big_rss_key));
366 for (i = 0; i < RTE_DIM(arr); i++) {
368 arr_softrss[i] = rte_be_to_cpu_32(arr[i]);
371 hash_1 = rte_softrss(arr_softrss, RTE_DIM(arr), big_rss_key);
372 hash_2 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)arr, size);
374 if (hash_1 != hash_2)
381 test_create_invalid(void)
383 struct rte_thash_ctx *ctx;
387 ctx = rte_thash_init_ctx(NULL, key_len, reta_sz, NULL, 0);
388 RTE_TEST_ASSERT(ctx == NULL,
389 "Call succeeded with invalid parameters\n");
391 ctx = rte_thash_init_ctx("test", 0, reta_sz, NULL, 0);
392 RTE_TEST_ASSERT(ctx == NULL,
393 "Call succeeded with invalid parameters\n");
395 ctx = rte_thash_init_ctx(NULL, key_len, 1, NULL, 0);
396 RTE_TEST_ASSERT(ctx == NULL,
397 "Call succeeded with invalid parameters\n");
399 ctx = rte_thash_init_ctx(NULL, key_len, 17, NULL, 0);
400 RTE_TEST_ASSERT(ctx == NULL,
401 "Call succeeded with invalid parameters\n");
407 test_multiple_create(void)
409 struct rte_thash_ctx *ctx;
414 for (i = 0; i < 100; i++) {
415 ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
416 RTE_TEST_ASSERT(ctx != NULL, "Can not create CTX\n");
418 rte_thash_free_ctx(ctx);
427 struct rte_thash_ctx *ctx;
429 ctx = rte_thash_init_ctx("test", 40, 7, NULL, 0);
430 RTE_TEST_ASSERT(ctx != NULL, "Can not create CTX\n");
432 rte_thash_free_ctx(ctx);
433 rte_thash_free_ctx(NULL);
439 test_add_invalid_helper(void)
441 struct rte_thash_ctx *ctx;
442 const int key_len = 40;
446 ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
447 RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
449 ret = rte_thash_add_helper(NULL, "test", reta_sz, 0);
450 RTE_TEST_ASSERT(ret == -EINVAL,
451 "Call succeeded with invalid parameters\n");
453 ret = rte_thash_add_helper(ctx, NULL, reta_sz, 0);
454 RTE_TEST_ASSERT(ret == -EINVAL,
455 "Call succeeded with invalid parameters\n");
457 ret = rte_thash_add_helper(ctx, "test", reta_sz - 1, 0);
458 RTE_TEST_ASSERT(ret == -EINVAL,
459 "Call succeeded with invalid parameters\n");
461 ret = rte_thash_add_helper(ctx, "test", reta_sz, key_len * 8);
462 RTE_TEST_ASSERT(ret == -EINVAL,
463 "Call succeeded with invalid parameters\n");
465 ret = rte_thash_add_helper(ctx, "first_range", reta_sz, 0);
466 RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
468 ret = rte_thash_add_helper(ctx, "first_range", reta_sz, 0);
469 RTE_TEST_ASSERT(ret == -EEXIST,
470 "Call succeeded with duplicated name\n");
473 * Create second helper with offset 3 * reta_sz.
474 * Note first_range helper created range in key:
475 * [0, 32 + length{= reta_sz} - 1), i.e [0, 37).
476 * second range is [44, 81)
478 ret = rte_thash_add_helper(ctx, "second_range", reta_sz,
480 RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
483 * Try to create overlapping with first_ and second_ ranges,
486 ret = rte_thash_add_helper(ctx, "third_range", 2 * reta_sz, reta_sz);
487 RTE_TEST_ASSERT(ret == -EEXIST,
488 "Call succeeded with overlapping ranges\n");
490 rte_thash_free_ctx(ctx);
496 test_find_existing(void)
498 struct rte_thash_ctx *ctx, *ret_ctx;
500 ctx = rte_thash_init_ctx("test", 40, 7, NULL, 0);
501 RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
503 ret_ctx = rte_thash_find_existing("test");
504 RTE_TEST_ASSERT(ret_ctx != NULL, "can not find existing ctx\n");
506 rte_thash_free_ctx(ctx);
512 test_get_helper(void)
514 struct rte_thash_ctx *ctx;
515 struct rte_thash_subtuple_helper *h;
518 ctx = rte_thash_init_ctx("test", 40, 7, NULL, 0);
519 RTE_TEST_ASSERT(ctx != NULL, "Can not create thash ctx\n");
521 h = rte_thash_get_helper(NULL, "first_range");
522 RTE_TEST_ASSERT(h == NULL, "Call succeeded with invalid parameters\n");
524 h = rte_thash_get_helper(ctx, NULL);
525 RTE_TEST_ASSERT(h == NULL, "Call succeeded with invalid parameters\n");
527 ret = rte_thash_add_helper(ctx, "first_range", 8, 0);
528 RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
530 h = rte_thash_get_helper(ctx, "first_range");
531 RTE_TEST_ASSERT(h != NULL, "Can not find helper\n");
533 rte_thash_free_ctx(ctx);
539 test_period_overflow(void)
541 struct rte_thash_ctx *ctx;
542 int reta_sz = 7; /* reflects polynomial degree */
545 /* first create without RTE_THASH_IGNORE_PERIOD_OVERFLOW flag */
546 ctx = rte_thash_init_ctx("test", 40, reta_sz, NULL, 0);
547 RTE_TEST_ASSERT(ctx != NULL, "Can not create thash ctx\n");
549 /* requested range > (2^reta_sz) - 1 */
550 ret = rte_thash_add_helper(ctx, "test", (1 << reta_sz), 0);
551 RTE_TEST_ASSERT(ret == -ENOSPC,
552 "Call succeeded with invalid parameters\n");
554 /* requested range == len + 32 - 1, smaller than (2^reta_sz) - 1 */
555 ret = rte_thash_add_helper(ctx, "test", (1 << reta_sz) - 32, 0);
556 RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
558 rte_thash_free_ctx(ctx);
560 /* create with RTE_THASH_IGNORE_PERIOD_OVERFLOW flag */
561 ctx = rte_thash_init_ctx("test", 40, reta_sz, NULL,
562 RTE_THASH_IGNORE_PERIOD_OVERFLOW);
563 RTE_TEST_ASSERT(ctx != NULL, "Can not create thash ctx\n");
565 /* requested range > (2^reta_sz - 1) */
566 ret = rte_thash_add_helper(ctx, "test", (1 << reta_sz) + 10, 0);
567 RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
569 rte_thash_free_ctx(ctx);
575 test_predictable_rss_min_seq(void)
577 struct rte_thash_ctx *ctx;
578 struct rte_thash_subtuple_helper *h;
579 const int key_len = 40;
581 uint8_t initial_key[key_len];
582 const uint8_t *new_key;
584 union rte_thash_tuple tuple;
585 uint32_t orig_hash, adj_hash, adj;
586 unsigned int desired_value = 27 & HASH_MSK(reta_sz);
587 uint16_t port_value = 22;
589 memset(initial_key, 0, key_len);
591 ctx = rte_thash_init_ctx("test", key_len, reta_sz, initial_key,
592 RTE_THASH_MINIMAL_SEQ);
593 RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
595 ret = rte_thash_add_helper(ctx, "snat", sizeof(uint16_t) * 8,
596 offsetof(union rte_thash_tuple, v4.sport) * 8);
597 RTE_TEST_ASSERT(ret == 0, "can not add helper, ret %d\n", ret);
599 h = rte_thash_get_helper(ctx, "snat");
600 RTE_TEST_ASSERT(h != NULL, "can not find helper\n");
602 new_key = rte_thash_get_key(ctx);
603 tuple.v4.src_addr = RTE_IPV4(0, 0, 0, 0);
604 tuple.v4.dst_addr = RTE_IPV4(0, 0, 0, 0);
606 tuple.v4.sport = rte_cpu_to_be_16(port_value);
608 tuple.v4.sctp_tag = rte_be_to_cpu_32(tuple.v4.sctp_tag);
610 orig_hash = rte_softrss((uint32_t *)&tuple,
611 RTE_THASH_V4_L4_LEN, new_key);
612 adj = rte_thash_get_complement(h, orig_hash, desired_value);
614 tuple.v4.sctp_tag = rte_cpu_to_be_32(tuple.v4.sctp_tag);
615 tuple.v4.sport ^= rte_cpu_to_be_16(adj);
616 tuple.v4.sctp_tag = rte_be_to_cpu_32(tuple.v4.sctp_tag);
618 adj_hash = rte_softrss((uint32_t *)&tuple,
619 RTE_THASH_V4_L4_LEN, new_key);
620 RTE_TEST_ASSERT((adj_hash & HASH_MSK(reta_sz)) ==
621 desired_value, "bad desired value\n");
623 rte_thash_free_ctx(ctx);
629 * This test creates 7 subranges in the following order:
630 * range_one = [56, 95), len = 8, offset = 56
631 * range_two = [64, 103), len = 8, offset = 64
632 * range_three = [120, 159), len = 8, offset = 120
633 * range_four = [48, 87), len = 8, offset = 48
634 * range_five = [57, 95), len = 7, offset = 57
635 * range_six = [40, 111), len = 40, offset = 40
636 * range_seven = [0, 39), len = 8, offset = 0
645 struct range rng_arr[] = {
648 {"three", 8, 120, 15},
656 test_predictable_rss_multirange(void)
658 struct rte_thash_ctx *ctx;
659 struct rte_thash_subtuple_helper *h[RTE_DIM(rng_arr)];
660 const uint8_t *new_key;
661 const int key_len = 40;
663 unsigned int i, j, k;
665 uint32_t desired_value = rte_rand() & HASH_MSK(reta_sz);
666 uint8_t tuples[RTE_DIM(rng_arr)][16] = { {0} };
668 uint32_t hashes[RTE_DIM(rng_arr)];
669 uint32_t adj_hashes[RTE_DIM(rng_arr)];
672 ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
673 RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
675 for (i = 0; i < RTE_DIM(rng_arr); i++) {
676 ret = rte_thash_add_helper(ctx, rng_arr[i].name,
677 rng_arr[i].len, rng_arr[i].offset);
678 RTE_TEST_ASSERT(ret == 0, "can not add helper\n");
680 h[i] = rte_thash_get_helper(ctx, rng_arr[i].name);
681 RTE_TEST_ASSERT(h[i] != NULL, "can not find helper\n");
683 new_key = rte_thash_get_key(ctx);
686 * calculate hashes, complements, then adjust keys with
687 * complements and recalculate hashes
689 for (i = 0; i < RTE_DIM(rng_arr); i++) {
690 for (k = 0; k < 100; k++) {
691 /* init with random keys */
692 ptr = (uint32_t *)&tuples[i][0];
693 for (j = 0; j < 4; j++)
695 /* convert keys from BE to CPU byte order */
696 for (j = 0; j < 4; j++)
697 ptr[j] = rte_be_to_cpu_32(ptr[j]);
699 hashes[i] = rte_softrss(ptr, 4, new_key);
700 adj = rte_thash_get_complement(h[i], hashes[i],
702 /* convert back to BE to adjust the value */
703 for (j = 0; j < 4; j++)
704 ptr[j] = rte_cpu_to_be_32(ptr[j]);
706 tuples[i][rng_arr[i].byte_idx] ^= adj;
708 for (j = 0; j < 4; j++)
709 ptr[j] = rte_be_to_cpu_32(ptr[j]);
711 adj_hashes[i] = rte_softrss(ptr, 4, new_key);
712 RTE_TEST_ASSERT((adj_hashes[i] & HASH_MSK(reta_sz)) ==
714 "bad desired value for %d tuple\n", i);
718 rte_thash_free_ctx(ctx);
724 cmp_tuple_eq(void *userdata, uint8_t *tuple)
726 return memcmp(userdata, tuple, TUPLE_SZ);
730 test_adjust_tuple(void)
732 struct rte_thash_ctx *ctx;
733 struct rte_thash_subtuple_helper *h;
734 const int key_len = 40;
735 const uint8_t *new_key;
736 uint8_t tuple[TUPLE_SZ];
737 uint32_t tmp_tuple[TUPLE_SZ / sizeof(uint32_t)];
738 uint32_t tuple_copy[TUPLE_SZ / sizeof(uint32_t)];
740 int reta_sz = CHAR_BIT;
742 unsigned int i, desired_value = rte_rand() & HASH_MSK(reta_sz);
744 memset(tuple, 0xab, TUPLE_SZ);
746 ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
747 RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
750 * set offset to be in the middle of a byte
751 * set size of the subtuple to be 2 * rets_sz
752 * to have the room for random bits
754 ret = rte_thash_add_helper(ctx, "test", reta_sz * 2,
756 RTE_TEST_ASSERT(ret == 0, "can not add helper, ret %d\n", ret);
758 new_key = rte_thash_get_key(ctx);
760 h = rte_thash_get_helper(ctx, "test");
761 RTE_TEST_ASSERT(h != NULL, "can not find helper\n");
763 ret = rte_thash_adjust_tuple(ctx, h, tuple, TUPLE_SZ, desired_value,
765 RTE_TEST_ASSERT(ret == 0, "can not adjust tuple, ret %d\n", ret);
767 for (i = 0; i < (TUPLE_SZ / 4); i++)
769 rte_be_to_cpu_32(*(uint32_t *)&tuple[i * 4]);
771 hash = rte_softrss(tmp_tuple, TUPLE_SZ / 4, new_key);
772 RTE_TEST_ASSERT((hash & HASH_MSK(reta_sz)) ==
773 desired_value, "bad desired value\n");
776 /* Pass previously calculated tuple to callback function */
777 memcpy(tuple_copy, tuple, TUPLE_SZ);
779 memset(tuple, 0xab, TUPLE_SZ);
780 ret = rte_thash_adjust_tuple(ctx, h, tuple, TUPLE_SZ, desired_value,
781 1, cmp_tuple_eq, tuple_copy);
782 RTE_TEST_ASSERT(ret == -EEXIST,
783 "adjust tuple didn't indicate collision\n");
786 * Make the function to generate random bits into subtuple
787 * after first adjustment attempt.
789 memset(tuple, 0xab, TUPLE_SZ);
790 ret = rte_thash_adjust_tuple(ctx, h, tuple, TUPLE_SZ, desired_value,
791 2, cmp_tuple_eq, tuple_copy);
792 RTE_TEST_ASSERT(ret == 0, "can not adjust tuple, ret %d\n", ret);
794 for (i = 0; i < (TUPLE_SZ / 4); i++)
796 rte_be_to_cpu_32(*(uint32_t *)&tuple[i * 4]);
798 hash = rte_softrss(tmp_tuple, TUPLE_SZ / 4, new_key);
799 RTE_TEST_ASSERT((hash & HASH_MSK(reta_sz)) ==
800 desired_value, "bad desired value\n");
802 rte_thash_free_ctx(ctx);
807 static struct unit_test_suite thash_tests = {
808 .suite_name = "thash autotest",
812 TEST_CASE(test_toeplitz_hash_calc),
813 TEST_CASE(test_toeplitz_hash_gfni),
814 TEST_CASE(test_toeplitz_hash_rand_data),
815 TEST_CASE(test_toeplitz_hash_gfni_bulk),
816 TEST_CASE(test_big_tuple_gfni),
817 TEST_CASE(test_create_invalid),
818 TEST_CASE(test_multiple_create),
819 TEST_CASE(test_free_null),
820 TEST_CASE(test_add_invalid_helper),
821 TEST_CASE(test_find_existing),
822 TEST_CASE(test_get_helper),
823 TEST_CASE(test_period_overflow),
824 TEST_CASE(test_predictable_rss_min_seq),
825 TEST_CASE(test_predictable_rss_multirange),
826 TEST_CASE(test_adjust_tuple),
834 return unit_test_suite_runner(&thash_tests);
837 REGISTER_TEST_COMMAND(thash_autotest, test_thash);