1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
6 #include <rte_malloc.h>
7 #include <rte_prefetch.h>
8 #include <rte_random.h>
12 #include "rte_member.h"
13 #include "rte_member_ht.h"
15 #if defined(RTE_ARCH_X86)
16 #include "rte_member_x86.h"
19 /* Search bucket for entry with tmp_sig and update set_id */
21 update_entry_search(uint32_t bucket_id, member_sig_t tmp_sig,
22 struct member_ht_bucket *buckets,
27 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
28 if (buckets[bucket_id].sigs[i] == tmp_sig) {
29 buckets[bucket_id].sets[i] = set_id;
37 search_bucket_single(uint32_t bucket_id, member_sig_t tmp_sig,
38 struct member_ht_bucket *buckets,
43 for (iter = 0; iter < RTE_MEMBER_BUCKET_ENTRIES; iter++) {
44 if (tmp_sig == buckets[bucket_id].sigs[iter] &&
45 buckets[bucket_id].sets[iter] !=
46 RTE_MEMBER_NO_MATCH) {
47 *set_id = buckets[bucket_id].sets[iter];
55 search_bucket_multi(uint32_t bucket_id, member_sig_t tmp_sig,
56 struct member_ht_bucket *buckets,
58 uint32_t matches_per_key,
63 for (iter = 0; iter < RTE_MEMBER_BUCKET_ENTRIES; iter++) {
64 if (tmp_sig == buckets[bucket_id].sigs[iter] &&
65 buckets[bucket_id].sets[iter] !=
66 RTE_MEMBER_NO_MATCH) {
67 set_id[*counter] = buckets[bucket_id].sets[iter];
69 if (*counter >= matches_per_key)
76 rte_member_create_ht(struct rte_member_setsum *ss,
77 const struct rte_member_parameters *params)
80 uint32_t size_bucket_t;
81 uint32_t num_entries = rte_align32pow2(params->num_keys);
83 if ((num_entries > RTE_MEMBER_ENTRIES_MAX) ||
84 !rte_is_power_of_2(RTE_MEMBER_BUCKET_ENTRIES) ||
85 num_entries < RTE_MEMBER_BUCKET_ENTRIES) {
88 "Membership HT create with invalid parameters\n");
92 uint32_t num_buckets = num_entries / RTE_MEMBER_BUCKET_ENTRIES;
94 size_bucket_t = sizeof(struct member_ht_bucket);
96 struct member_ht_bucket *buckets = rte_zmalloc_socket(NULL,
97 num_buckets * size_bucket_t,
98 RTE_CACHE_LINE_SIZE, ss->socket_id);
100 if (buckets == NULL) {
101 RTE_MEMBER_LOG(ERR, "memory allocation failed for HT "
107 ss->bucket_cnt = num_buckets;
108 ss->bucket_mask = num_buckets - 1;
109 ss->cache = params->is_cache;
111 for (i = 0; i < num_buckets; i++) {
112 for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++)
113 buckets[i].sets[j] = RTE_MEMBER_NO_MATCH;
115 #if defined(RTE_ARCH_X86)
116 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
117 RTE_MEMBER_BUCKET_ENTRIES == 16 &&
118 rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
119 ss->sig_cmp_fn = RTE_MEMBER_COMPARE_AVX2;
122 ss->sig_cmp_fn = RTE_MEMBER_COMPARE_SCALAR;
124 RTE_MEMBER_LOG(DEBUG, "Hash table based filter created, "
125 "the table has %u entries, %u buckets\n",
126 num_entries, num_buckets);
131 get_buckets_index(const struct rte_member_setsum *ss, const void *key,
132 uint32_t *prim_bkt, uint32_t *sec_bkt, member_sig_t *sig)
134 uint32_t first_hash = MEMBER_HASH_FUNC(key, ss->key_len,
136 uint32_t sec_hash = MEMBER_HASH_FUNC(&first_hash, sizeof(uint32_t),
139 * We use the first hash value for the signature, and the second hash
140 * value to derive the primary and secondary bucket locations.
142 * For non-cache mode, we use the lower bits for the primary bucket
143 * location. Then we xor primary bucket location and the signature
144 * to get the secondary bucket location. This is called "partial-key
145 * cuckoo hashing" proposed by B. Fan, et al's paper
146 * "Cuckoo Filter: Practically Better Than Bloom". The benefit to use
147 * xor is that one could derive the alternative bucket location
148 * by only using the current bucket location and the signature. This is
149 * generally required by non-cache mode's eviction and deletion
150 * process without the need to store alternative hash value nor the full
153 * For cache mode, we use the lower bits for the primary bucket
154 * location and the higher bits for the secondary bucket location. In
155 * cache mode, keys are simply overwritten if bucket is full. We do not
156 * use xor since lower/higher bits are more independent hash values thus
157 * should provide slightly better table load.
161 *prim_bkt = sec_hash & ss->bucket_mask;
162 *sec_bkt = (sec_hash >> 16) & ss->bucket_mask;
164 *prim_bkt = sec_hash & ss->bucket_mask;
165 *sec_bkt = (*prim_bkt ^ *sig) & ss->bucket_mask;
170 rte_member_lookup_ht(const struct rte_member_setsum *ss,
171 const void *key, member_set_t *set_id)
173 uint32_t prim_bucket, sec_bucket;
174 member_sig_t tmp_sig;
175 struct member_ht_bucket *buckets = ss->table;
177 *set_id = RTE_MEMBER_NO_MATCH;
178 get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
180 switch (ss->sig_cmp_fn) {
181 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
182 case RTE_MEMBER_COMPARE_AVX2:
183 if (search_bucket_single_avx(prim_bucket, tmp_sig, buckets,
185 search_bucket_single_avx(sec_bucket, tmp_sig,
191 if (search_bucket_single(prim_bucket, tmp_sig, buckets,
193 search_bucket_single(sec_bucket, tmp_sig,
202 rte_member_lookup_bulk_ht(const struct rte_member_setsum *ss,
203 const void **keys, uint32_t num_keys, member_set_t *set_id)
206 uint32_t num_matches = 0;
207 struct member_ht_bucket *buckets = ss->table;
208 member_sig_t tmp_sig[RTE_MEMBER_LOOKUP_BULK_MAX];
209 uint32_t prim_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
210 uint32_t sec_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
212 for (i = 0; i < num_keys; i++) {
213 get_buckets_index(ss, keys[i], &prim_buckets[i],
214 &sec_buckets[i], &tmp_sig[i]);
215 rte_prefetch0(&buckets[prim_buckets[i]]);
216 rte_prefetch0(&buckets[sec_buckets[i]]);
219 for (i = 0; i < num_keys; i++) {
220 switch (ss->sig_cmp_fn) {
221 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
222 case RTE_MEMBER_COMPARE_AVX2:
223 if (search_bucket_single_avx(prim_buckets[i],
224 tmp_sig[i], buckets, &set_id[i]) ||
225 search_bucket_single_avx(sec_buckets[i],
226 tmp_sig[i], buckets, &set_id[i]))
229 set_id[i] = RTE_MEMBER_NO_MATCH;
233 if (search_bucket_single(prim_buckets[i], tmp_sig[i],
234 buckets, &set_id[i]) ||
235 search_bucket_single(sec_buckets[i],
236 tmp_sig[i], buckets, &set_id[i]))
239 set_id[i] = RTE_MEMBER_NO_MATCH;
246 rte_member_lookup_multi_ht(const struct rte_member_setsum *ss,
247 const void *key, uint32_t match_per_key,
248 member_set_t *set_id)
250 uint32_t num_matches = 0;
251 uint32_t prim_bucket, sec_bucket;
252 member_sig_t tmp_sig;
253 struct member_ht_bucket *buckets = ss->table;
255 get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
257 switch (ss->sig_cmp_fn) {
258 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
259 case RTE_MEMBER_COMPARE_AVX2:
260 search_bucket_multi_avx(prim_bucket, tmp_sig, buckets,
261 &num_matches, match_per_key, set_id);
262 if (num_matches < match_per_key)
263 search_bucket_multi_avx(sec_bucket, tmp_sig,
264 buckets, &num_matches, match_per_key, set_id);
268 search_bucket_multi(prim_bucket, tmp_sig, buckets, &num_matches,
269 match_per_key, set_id);
270 if (num_matches < match_per_key)
271 search_bucket_multi(sec_bucket, tmp_sig,
272 buckets, &num_matches, match_per_key, set_id);
278 rte_member_lookup_multi_bulk_ht(const struct rte_member_setsum *ss,
279 const void **keys, uint32_t num_keys, uint32_t match_per_key,
280 uint32_t *match_count,
281 member_set_t *set_ids)
284 uint32_t num_matches = 0;
285 struct member_ht_bucket *buckets = ss->table;
286 uint32_t match_cnt_tmp;
287 member_sig_t tmp_sig[RTE_MEMBER_LOOKUP_BULK_MAX];
288 uint32_t prim_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
289 uint32_t sec_buckets[RTE_MEMBER_LOOKUP_BULK_MAX];
291 for (i = 0; i < num_keys; i++) {
292 get_buckets_index(ss, keys[i], &prim_buckets[i],
293 &sec_buckets[i], &tmp_sig[i]);
294 rte_prefetch0(&buckets[prim_buckets[i]]);
295 rte_prefetch0(&buckets[sec_buckets[i]]);
297 for (i = 0; i < num_keys; i++) {
300 switch (ss->sig_cmp_fn) {
301 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
302 case RTE_MEMBER_COMPARE_AVX2:
303 search_bucket_multi_avx(prim_buckets[i], tmp_sig[i],
304 buckets, &match_cnt_tmp, match_per_key,
305 &set_ids[i*match_per_key]);
306 if (match_cnt_tmp < match_per_key)
307 search_bucket_multi_avx(sec_buckets[i],
308 tmp_sig[i], buckets, &match_cnt_tmp,
310 &set_ids[i*match_per_key]);
311 match_count[i] = match_cnt_tmp;
312 if (match_cnt_tmp != 0)
317 search_bucket_multi(prim_buckets[i], tmp_sig[i],
318 buckets, &match_cnt_tmp, match_per_key,
319 &set_ids[i*match_per_key]);
320 if (match_cnt_tmp < match_per_key)
321 search_bucket_multi(sec_buckets[i], tmp_sig[i],
322 buckets, &match_cnt_tmp, match_per_key,
323 &set_ids[i*match_per_key]);
324 match_count[i] = match_cnt_tmp;
325 if (match_cnt_tmp != 0)
333 try_insert(struct member_ht_bucket *buckets, uint32_t prim, uint32_t sec,
334 member_sig_t sig, member_set_t set_id)
337 /* If not full then insert into one slot */
338 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
339 if (buckets[prim].sets[i] == RTE_MEMBER_NO_MATCH) {
340 buckets[prim].sigs[i] = sig;
341 buckets[prim].sets[i] = set_id;
345 /* If prim failed, we need to access second bucket */
346 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
347 if (buckets[sec].sets[i] == RTE_MEMBER_NO_MATCH) {
348 buckets[sec].sigs[i] = sig;
349 buckets[sec].sets[i] = set_id;
357 try_update(struct member_ht_bucket *buckets, uint32_t prim, uint32_t sec,
358 member_sig_t sig, member_set_t set_id,
359 enum rte_member_sig_compare_function cmp_fn)
362 #if defined(RTE_ARCH_X86) && defined(__AVX2__)
363 case RTE_MEMBER_COMPARE_AVX2:
364 if (update_entry_search_avx(prim, sig, buckets, set_id) ||
365 update_entry_search_avx(sec, sig, buckets,
371 if (update_entry_search(prim, sig, buckets, set_id) ||
372 update_entry_search(sec, sig, buckets,
380 evict_from_bucket(void)
382 /* For now, we randomly pick one entry to evict */
383 return rte_rand() & (RTE_MEMBER_BUCKET_ENTRIES - 1);
387 * This function is similar to the cuckoo hash make_space function in hash
391 make_space_bucket(const struct rte_member_setsum *ss, uint32_t bkt_idx,
392 unsigned int *nr_pushes)
396 struct member_ht_bucket *buckets = ss->table;
397 uint32_t next_bucket_idx;
398 struct member_ht_bucket *next_bkt[RTE_MEMBER_BUCKET_ENTRIES];
399 struct member_ht_bucket *bkt = &buckets[bkt_idx];
400 /* MSB is set to indicate if an entry has been already pushed */
401 member_set_t flag_mask = 1U << (sizeof(member_set_t) * 8 - 1);
404 * Push existing item (search for bucket with space in
405 * alternative locations) to its alternative location
407 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
408 /* Search for space in alternative locations */
409 next_bucket_idx = (bkt->sigs[i] ^ bkt_idx) & ss->bucket_mask;
410 next_bkt[i] = &buckets[next_bucket_idx];
411 for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++) {
412 if (next_bkt[i]->sets[j] == RTE_MEMBER_NO_MATCH)
416 if (j != RTE_MEMBER_BUCKET_ENTRIES)
420 /* Alternative location has spare room (end of recursive function) */
421 if (i != RTE_MEMBER_BUCKET_ENTRIES) {
422 next_bkt[i]->sigs[j] = bkt->sigs[i];
423 next_bkt[i]->sets[j] = bkt->sets[i];
427 /* Pick entry that has not been pushed yet */
428 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++)
429 if ((bkt->sets[i] & flag_mask) == 0)
432 /* All entries have been pushed, so entry cannot be added */
433 if (i == RTE_MEMBER_BUCKET_ENTRIES ||
434 ++(*nr_pushes) > RTE_MEMBER_MAX_PUSHES)
437 next_bucket_idx = (bkt->sigs[i] ^ bkt_idx) & ss->bucket_mask;
438 /* Set flag to indicate that this entry is going to be pushed */
439 bkt->sets[i] |= flag_mask;
441 /* Need room in alternative bucket to insert the pushed entry */
442 ret = make_space_bucket(ss, next_bucket_idx, nr_pushes);
444 * After recursive function.
445 * Clear flags and insert the pushed entry
446 * in its alternative location if successful,
449 bkt->sets[i] &= ~flag_mask;
451 next_bkt[i]->sigs[ret] = bkt->sigs[i];
452 next_bkt[i]->sets[ret] = bkt->sets[i];
459 rte_member_add_ht(const struct rte_member_setsum *ss,
460 const void *key, member_set_t set_id)
463 unsigned int nr_pushes = 0;
464 uint32_t prim_bucket, sec_bucket;
465 member_sig_t tmp_sig;
466 struct member_ht_bucket *buckets = ss->table;
467 member_set_t flag_mask = 1U << (sizeof(member_set_t) * 8 - 1);
469 if (set_id == RTE_MEMBER_NO_MATCH || (set_id & flag_mask) != 0)
472 get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
475 * If it is cache based setsummary, we try overwriting (updating)
476 * existing entry with the same signature first. In cache mode, we allow
477 * false negatives and only cache the most recent keys.
479 * For non-cache mode, we do not update existing entry with the same
480 * signature. This is because if two keys with same signature update
481 * each other, false negative may happen, which is not the expected
482 * behavior for non-cache setsummary.
485 ret = try_update(buckets, prim_bucket, sec_bucket, tmp_sig,
486 set_id, ss->sig_cmp_fn);
490 /* If not full then insert into one slot */
491 ret = try_insert(buckets, prim_bucket, sec_bucket, tmp_sig, set_id);
495 /* Random pick prim or sec for recursive displacement */
496 uint32_t select_bucket = (tmp_sig && 1U) ? prim_bucket : sec_bucket;
498 ret = evict_from_bucket();
499 buckets[select_bucket].sigs[ret] = tmp_sig;
500 buckets[select_bucket].sets[ret] = set_id;
504 ret = make_space_bucket(ss, select_bucket, &nr_pushes);
506 buckets[select_bucket].sigs[ret] = tmp_sig;
507 buckets[select_bucket].sets[ret] = set_id;
515 rte_member_free_ht(struct rte_member_setsum *ss)
521 rte_member_delete_ht(const struct rte_member_setsum *ss, const void *key,
525 uint32_t prim_bucket, sec_bucket;
526 member_sig_t tmp_sig;
527 struct member_ht_bucket *buckets = ss->table;
529 get_buckets_index(ss, key, &prim_bucket, &sec_bucket, &tmp_sig);
531 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
532 if (tmp_sig == buckets[prim_bucket].sigs[i] &&
533 set_id == buckets[prim_bucket].sets[i]) {
534 buckets[prim_bucket].sets[i] = RTE_MEMBER_NO_MATCH;
539 for (i = 0; i < RTE_MEMBER_BUCKET_ENTRIES; i++) {
540 if (tmp_sig == buckets[sec_bucket].sigs[i] &&
541 set_id == buckets[sec_bucket].sets[i]) {
542 buckets[sec_bucket].sets[i] = RTE_MEMBER_NO_MATCH;
550 rte_member_reset_ht(const struct rte_member_setsum *ss)
553 struct member_ht_bucket *buckets = ss->table;
555 for (i = 0; i < ss->bucket_cnt; i++) {
556 for (j = 0; j < RTE_MEMBER_BUCKET_ENTRIES; j++)
557 buckets[i].sets[j] = RTE_MEMBER_NO_MATCH;