4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_malloc.h>
38 #include <rte_memory.h>
39 #include <rte_errno.h>
42 #include "rte_member.h"
43 #include "rte_member_vbf.h"
46 * vBF currently implemented as a big array.
47 * The BFs have a vertical layout. Bits in same location of all bfs will stay
48 * in the same cache line.
49 * For example, if we have 32 bloom filters, we use a uint32_t array to
50 * represent all of them. array[0] represent the first location of all the
51 * bloom filters, array[1] represents the second location of all the
52 * bloom filters, etc. The advantage of this layout is to minimize the average
53 * number of memory accesses to test all bloom filters.
55 * Currently the implementation supports vBF containing 1,2,4,8,16,32 BFs.
58 rte_member_create_vbf(struct rte_member_setsum *ss,
59 const struct rte_member_parameters *params)
62 if (params->num_set > RTE_MEMBER_MAX_BF ||
63 !rte_is_power_of_2(params->num_set) ||
64 params->num_keys == 0 ||
65 params->false_positive_rate == 0 ||
66 params->false_positive_rate > 1) {
68 RTE_MEMBER_LOG(ERR, "Membership vBF create with invalid parameters\n");
72 /* We assume expected keys evenly distribute to all BFs */
73 uint32_t num_keys_per_bf = 1 + (params->num_keys - 1) / ss->num_set;
76 * Note that the false positive rate is for all BFs in the vBF
77 * such that the single BF's false positive rate needs to be
79 * Assume each BF's False positive rate is fp_one_bf. The total false
80 * positive rate is fp = 1-(1-fp_one_bf)^n.
81 * => fp_one_bf = 1 - (1-fp)^(1/n)
84 float fp_one_bf = 1 - pow((1 - params->false_positive_rate),
89 RTE_MEMBER_LOG(ERR, "Membership BF false positive rate is too small\n");
93 uint32_t bits = ceil((num_keys_per_bf *
95 log(1.0 / (pow(2.0, log(2.0)))));
97 /* We round to power of 2 for performance during lookup */
98 ss->bits = rte_align32pow2(bits);
100 ss->num_hashes = (uint32_t)(log(2.0) * bits / num_keys_per_bf);
101 ss->bit_mask = ss->bits - 1;
104 * Since we round the bits to power of 2, the final false positive
105 * rate will probably not be same as the user specified. We log the
106 * new value as debug message.
108 float new_fp = pow((1 - pow((1 - 1.0 / ss->bits), num_keys_per_bf *
109 ss->num_hashes)), ss->num_hashes);
110 new_fp = 1 - pow((1 - new_fp), ss->num_set);
113 * Reduce hash function count, until we approach the user specified
114 * false-positive rate. Otherwise it is too conservative
116 int tmp_num_hash = ss->num_hashes;
118 while (tmp_num_hash > 1) {
119 float tmp_fp = new_fp;
122 new_fp = pow((1 - pow((1 - 1.0 / ss->bits), num_keys_per_bf *
123 tmp_num_hash)), tmp_num_hash);
124 new_fp = 1 - pow((1 - new_fp), ss->num_set);
126 if (new_fp > params->false_positive_rate) {
133 ss->num_hashes = tmp_num_hash;
136 * To avoid multiplication and division:
137 * mul_shift is used for multiplication shift during bit test
138 * div_shift is used for division shift, to be divided by number of bits
139 * represented by a uint32_t variable
141 ss->mul_shift = __builtin_ctzl(ss->num_set);
142 ss->div_shift = __builtin_ctzl(32 >> ss->mul_shift);
144 RTE_MEMBER_LOG(DEBUG, "vector bloom filter created, "
145 "each bloom filter expects %u keys, needs %u bits, %u hashes, "
146 "with false positive rate set as %.5f, "
147 "The new calculated vBF false positive rate is %.5f\n",
148 num_keys_per_bf, ss->bits, ss->num_hashes, fp_one_bf, new_fp);
150 ss->table = rte_zmalloc_socket(NULL, ss->num_set * (ss->bits >> 3),
151 RTE_CACHE_LINE_SIZE, ss->socket_id);
152 if (ss->table == NULL)
158 static inline uint32_t
159 test_bit(uint32_t bit_loc, const struct rte_member_setsum *ss)
161 uint32_t *vbf = ss->table;
162 uint32_t n = ss->num_set;
163 uint32_t div_shift = ss->div_shift;
164 uint32_t mul_shift = ss->mul_shift;
166 * a is how many bits in one BF are represented by one 32bit
169 uint32_t a = 32 >> mul_shift;
171 * x>>b is the divide, x & (a-1) is the mod, & (1<<n-1) to mask out bits
174 return (vbf[bit_loc >> div_shift] >>
175 ((bit_loc & (a - 1)) << mul_shift)) & ((1ULL << n) - 1);
179 set_bit(uint32_t bit_loc, const struct rte_member_setsum *ss, int32_t set)
181 uint32_t *vbf = ss->table;
182 uint32_t div_shift = ss->div_shift;
183 uint32_t mul_shift = ss->mul_shift;
184 uint32_t a = 32 >> mul_shift;
186 vbf[bit_loc >> div_shift] |=
187 1UL << (((bit_loc & (a - 1)) << mul_shift) + set - 1);
191 rte_member_lookup_vbf(const struct rte_member_setsum *ss, const void *key,
192 member_set_t *set_id)
195 uint32_t h1 = MEMBER_HASH_FUNC(key, ss->key_len, ss->prim_hash_seed);
196 uint32_t h2 = MEMBER_HASH_FUNC(&h1, sizeof(uint32_t),
201 for (j = 0; j < ss->num_hashes; j++) {
202 bit_loc = (h1 + j * h2) & ss->bit_mask;
203 mask &= test_bit(bit_loc, ss);
207 *set_id = __builtin_ctzl(mask) + 1;
211 *set_id = RTE_MEMBER_NO_MATCH;
216 rte_member_lookup_bulk_vbf(const struct rte_member_setsum *ss,
217 const void **keys, uint32_t num_keys, member_set_t *set_ids)
220 uint32_t num_matches = 0;
221 uint32_t mask[RTE_MEMBER_LOOKUP_BULK_MAX];
222 uint32_t h1[RTE_MEMBER_LOOKUP_BULK_MAX], h2[RTE_MEMBER_LOOKUP_BULK_MAX];
225 for (i = 0; i < num_keys; i++)
226 h1[i] = MEMBER_HASH_FUNC(keys[i], ss->key_len,
228 for (i = 0; i < num_keys; i++)
229 h2[i] = MEMBER_HASH_FUNC(&h1[i], sizeof(uint32_t),
231 for (i = 0; i < num_keys; i++) {
233 for (k = 0; k < ss->num_hashes; k++) {
234 bit_loc = (h1[i] + k * h2[i]) & ss->bit_mask;
235 mask[i] &= test_bit(bit_loc, ss);
238 for (i = 0; i < num_keys; i++) {
240 set_ids[i] = __builtin_ctzl(mask[i]) + 1;
243 set_ids[i] = RTE_MEMBER_NO_MATCH;
249 rte_member_lookup_multi_vbf(const struct rte_member_setsum *ss,
250 const void *key, uint32_t match_per_key,
251 member_set_t *set_id)
253 uint32_t num_matches = 0;
255 uint32_t h1 = MEMBER_HASH_FUNC(key, ss->key_len, ss->prim_hash_seed);
256 uint32_t h2 = MEMBER_HASH_FUNC(&h1, sizeof(uint32_t),
261 for (j = 0; j < ss->num_hashes; j++) {
262 bit_loc = (h1 + j * h2) & ss->bit_mask;
263 mask &= test_bit(bit_loc, ss);
266 uint32_t loc = __builtin_ctzl(mask);
267 set_id[num_matches] = loc + 1;
269 if (num_matches >= match_per_key)
271 mask &= ~(1UL << loc);
277 rte_member_lookup_multi_bulk_vbf(const struct rte_member_setsum *ss,
278 const void **keys, uint32_t num_keys, uint32_t match_per_key,
279 uint32_t *match_count,
280 member_set_t *set_ids)
283 uint32_t num_matches = 0;
284 uint32_t match_cnt_t;
285 uint32_t mask[RTE_MEMBER_LOOKUP_BULK_MAX];
286 uint32_t h1[RTE_MEMBER_LOOKUP_BULK_MAX], h2[RTE_MEMBER_LOOKUP_BULK_MAX];
289 for (i = 0; i < num_keys; i++)
290 h1[i] = MEMBER_HASH_FUNC(keys[i], ss->key_len,
292 for (i = 0; i < num_keys; i++)
293 h2[i] = MEMBER_HASH_FUNC(&h1[i], sizeof(uint32_t),
295 for (i = 0; i < num_keys; i++) {
297 for (k = 0; k < ss->num_hashes; k++) {
298 bit_loc = (h1[i] + k * h2[i]) & ss->bit_mask;
299 mask[i] &= test_bit(bit_loc, ss);
302 for (i = 0; i < num_keys; i++) {
305 uint32_t loc = __builtin_ctzl(mask[i]);
306 set_ids[i * match_per_key + match_cnt_t] = loc + 1;
308 if (match_cnt_t >= match_per_key)
310 mask[i] &= ~(1UL << loc);
312 match_count[i] = match_cnt_t;
313 if (match_cnt_t != 0)
320 rte_member_add_vbf(const struct rte_member_setsum *ss,
321 const void *key, member_set_t set_id)
326 if (set_id > ss->num_set || set_id == RTE_MEMBER_NO_MATCH)
329 h1 = MEMBER_HASH_FUNC(key, ss->key_len, ss->prim_hash_seed);
330 h2 = MEMBER_HASH_FUNC(&h1, sizeof(uint32_t), ss->sec_hash_seed);
332 for (i = 0; i < ss->num_hashes; i++) {
333 bit_loc = (h1 + i * h2) & ss->bit_mask;
334 set_bit(bit_loc, ss, set_id);
340 rte_member_free_vbf(struct rte_member_setsum *ss)
346 rte_member_reset_vbf(const struct rte_member_setsum *ss)
348 uint32_t *vbf = ss->table;
349 memset(vbf, 0, (ss->num_set * ss->bits) >> 3);