#include "rte_hash.h"
#include "rte_cuckoo_hash.h"
+/* Mask of all flags supported by this version */
+#define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \
+ RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \
+ RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \
+ RTE_HASH_EXTRA_FLAGS_EXT_TABLE | \
+ RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \
+ RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)
+
#define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \
for (CURRENT_BKT = START_BUCKET; \
CURRENT_BKT != NULL; \
return NULL;
}
+ if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) {
+ rte_errno = EINVAL;
+ RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n");
+ return NULL;
+ }
+
/* Validate correct usage of extra options */
if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
(params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
return h->hash_func(key, h->key_len, h->hash_func_init_val);
}
+int32_t
+rte_hash_max_key_id(const struct rte_hash *h)
+{
+ RETURN_IF_TRUE((h == NULL), -EINVAL);
+ if (h->use_local_cache)
+ /*
+ * Increase number of slots by total number of indices
+ * that can be stored in the lcore caches
+ */
+ return (h->entries + ((RTE_MAX_LCORE - 1) *
+ (LCORE_CACHE_SIZE - 1)));
+ else
+ return h->entries;
+}
+
int32_t
rte_hash_count(const struct rte_hash *h)
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
struct rte_hash_key *new_k, *keys = h->key_store;
+ uint32_t ext_bkt_id = 0;
uint32_t slot_id;
- uint32_t ext_bkt_id;
int ret;
unsigned n_slots;
unsigned lcore_id;
* extendable bucket. We first get a free bucket from ring.
*/
if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
- sizeof(uint32_t)) != 0) {
+ sizeof(uint32_t)) != 0 ||
+ ext_bkt_id == 0) {
ret = -ENOSPC;
goto failure;
}
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
- if (ret != -1) {
- __hash_rw_reader_unlock(h);
+ if (ret != -1)
return ret;
- }
/* Calculate secondary hash */
bkt = &h->buckets[sec_bucket_idx];
FOR_EACH_BUCKET(cur_bkt, bkt) {
ret = search_one_bucket_lf(h, key, short_sig,
data, cur_bkt);
- if (ret != -1) {
- __hash_rw_reader_unlock(h);
+ if (ret != -1)
return ret;
- }
}
/* The loads of sig_current in search_one_bucket
}
}
-#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_l(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
__hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
}
static inline void
-__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
- int32_t num_keys, int32_t *positions,
- uint64_t *hit_mask, void *data[])
+__bulk_lookup_lf(const struct rte_hash *h, const void **keys,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt,
+ uint16_t *sig, int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
{
uint64_t hits = 0;
int32_t i;
int32_t ret;
- uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
- uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
- const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
uint32_t cnt_b, cnt_a;
- /* Prefetch first keys */
- for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
- rte_prefetch0(keys[i]);
-
- /*
- * Prefetch rest of the keys, calculate primary and
- * secondary bucket and prefetch them
- */
- for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
- rte_prefetch0(keys[i + PREFETCH_OFFSET]);
-
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
- /* Calculate and prefetch rest of the buckets */
- for (; i < num_keys; i++) {
- prim_hash[i] = rte_hash_hash(h, keys[i]);
-
- sig[i] = get_short_sig(prim_hash[i]);
- prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
- sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
-
- primary_bkt[i] = &h->buckets[prim_index[i]];
- secondary_bkt[i] = &h->buckets[sec_index[i]];
-
- rte_prefetch0(primary_bkt[i]);
- rte_prefetch0(secondary_bkt[i]);
- }
-
for (i = 0; i < num_keys; i++)
positions[i] = -ENOENT;
*hit_mask = hits;
}
+#define PREFETCH_OFFSET 4
+static inline void
+__bulk_lookup_prefetching_loop(const struct rte_hash *h,
+ const void **keys, int32_t num_keys,
+ uint16_t *sig,
+ const struct rte_hash_bucket **primary_bkt,
+ const struct rte_hash_bucket **secondary_bkt)
+{
+ int32_t i;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+}
+
+
+static inline void
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ __bulk_lookup_prefetching_loop(h, keys, num_keys, sig,
+ primary_bkt, secondary_bkt);
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
static inline void
__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
return __builtin_popcountl(*hit_mask);
}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h,
+ const void **keys, hash_sig_t *prim_hash,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ int32_t i;
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+
+ /*
+ * Prefetch keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < num_keys; i++) {
+ rte_prefetch0(keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys,
+ positions, hit_mask, data);
+}
+
+static inline void
+__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *prim_hash, int32_t num_keys,
+ int32_t *positions, uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+ else
+ __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash,
+ num_keys, positions, hit_mask, data);
+}
+
+int
+rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys,
+ hash_sig_t *sig, uint32_t num_keys, int32_t *positions)
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (positions == NULL)), -EINVAL);
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, NULL, NULL);
+ return 0;
+}
+
+int
+rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h,
+ const void **keys, hash_sig_t *sig,
+ uint32_t num_keys, uint64_t *hit_mask, void *data[])
+{
+ RETURN_IF_TRUE(((h == NULL) || (keys == NULL) ||
+ (sig == NULL) || (num_keys == 0) ||
+ (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
+ (hit_mask == NULL)), -EINVAL);
+
+ int32_t positions[num_keys];
+
+ __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys,
+ positions, hit_mask, data);
+
+ /* Return number of hits */
+ return __builtin_popcountl(*hit_mask);
+}
+
int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{