#include <rte_common.h>
#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
#include <rte_log.h>
-#include <rte_memcpy.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_malloc.h>
#include <rte_spinlock.h>
#include <rte_ring.h>
#include <rte_compat.h>
+#include <rte_vect.h>
+#include <rte_tailq.h>
#include "rte_hash.h"
#include "rte_cuckoo_hash.h"
hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_lock();
TAILQ_FOREACH(te, hash_list, next) {
h = (struct rte_hash *) te->data;
if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
break;
}
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_unlock();
if (te == NULL) {
rte_errno = ENOENT;
unsigned int readwrite_concur_support = 0;
unsigned int writer_takes_lock = 0;
unsigned int no_free_on_del = 0;
+ uint32_t *ext_bkt_to_free = NULL;
uint32_t *tbl_chng_cnt = NULL;
unsigned int readwrite_concur_lf_support = 0;
return NULL;
}
- if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) &&
- (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)) {
- rte_errno = EINVAL;
- RTE_LOG(ERR, HASH, "rte_hash_create: extendable bucket "
- "feature not supported with rw concurrency "
- "lock free\n");
- return NULL;
- }
-
/* Check extra flags field to check extra options. */
if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
hw_trans_mem_support = 1;
snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* guarantee there's no existing: this is normally already checked
* by ring creation above */
*/
for (i = 1; i <= num_buckets; i++)
rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
+
+ if (readwrite_concur_lf_support) {
+ ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
+ num_key_slots, 0);
+ if (ext_bkt_to_free == NULL) {
+ RTE_LOG(ERR, HASH, "ext bkt to free memory allocation "
+ "failed\n");
+ goto err_unlock;
+ }
+ }
}
const uint32_t key_entry_size =
default_hash_func = (rte_hash_function)rte_hash_crc;
#endif
/* Setup hash context */
- snprintf(h->name, sizeof(h->name), "%s", params->name);
+ strlcpy(h->name, params->name, sizeof(h->name));
h->entries = params->entries;
h->key_len = params->key_len;
h->key_entry_size = key_entry_size;
default_hash_func : params->hash_func;
h->key_store = k;
h->free_slots = r;
+ h->ext_bkt_to_free = ext_bkt_to_free;
h->tbl_chng_cnt = tbl_chng_cnt;
*h->tbl_chng_cnt = 0;
h->hw_trans_mem_support = hw_trans_mem_support;
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
else
+#elif defined(RTE_ARCH_ARM64)
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ h->sig_cmp_fn = RTE_HASH_COMPARE_NEON;
+ else
#endif
h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
te->data = (void *) h;
TAILQ_INSERT_TAIL(hash_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return h;
err_unlock:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
err:
rte_ring_free(r);
rte_ring_free(r_ext);
rte_free(buckets_ext);
rte_free(k);
rte_free(tbl_chng_cnt);
+ rte_free(ext_bkt_to_free);
return NULL;
}
hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* find out tailq entry */
TAILQ_FOREACH(te, hash_list, next) {
}
if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return;
}
TAILQ_REMOVE(hash_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
if (h->use_local_cache)
rte_free(h->local_free_slots);
rte_free(h->buckets);
rte_free(h->buckets_ext);
rte_free(h->tbl_chng_cnt);
+ rte_free(h->ext_bkt_to_free);
rte_free(h);
rte_free(te);
}
__atomic_store_n(h->tbl_chng_cnt,
*h->tbl_chng_cnt + 1,
__ATOMIC_RELEASE);
- /* The stores to sig_alt and sig_current should not
+ /* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
__atomic_thread_fence(__ATOMIC_RELEASE);
__atomic_store_n(h->tbl_chng_cnt,
*h->tbl_chng_cnt + 1,
__ATOMIC_RELEASE);
- /* The stores to sig_alt and sig_current should not
+ /* The store to sig_current should not
* move above the store to tbl_chng_cnt.
*/
__atomic_thread_fence(__ATOMIC_RELEASE);
new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
new_idx = (uint32_t)((uintptr_t) slot_id);
/* Copy key */
- rte_memcpy(new_k->key, key, h->key_len);
+ memcpy(new_k->key, key, h->key_len);
/* Key can be of arbitrary length, so it is not possible to store
* it atomically. Hence the new key element's memory stores
* (key as well as data) should be complete before it is referenced.
/* Check if slot is available */
if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
cur_bkt->sig_current[i] = short_sig;
- cur_bkt->key_idx[i] = new_idx;
+ /* Store to signature should not leak after
+ * the store to key_idx
+ */
+ __atomic_store_n(&cur_bkt->key_idx[i],
+ new_idx,
+ __ATOMIC_RELEASE);
__hash_rw_writer_unlock(h);
return new_idx - 1;
}
bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
/* Use the first location of the new bucket */
(h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
- (h->buckets_ext[bkt_id]).key_idx[0] = new_idx;
+ /* Store to signature should not leak after
+ * the store to key_idx
+ */
+ __atomic_store_n(&(h->buckets_ext[bkt_id]).key_idx[0],
+ new_idx,
+ __ATOMIC_RELEASE);
/* Link the new bucket to sec bucket linked list */
last = rte_hash_get_last_bkt(sec_bkt);
last->next = &h->buckets_ext[bkt_id];
return ret;
}
+/* Search one bucket to find the match key - uses rw lock */
+static inline int32_t
+search_one_bucket_l(const struct rte_hash *h, const void *key,
+ uint16_t sig, void **data,
+ const struct rte_hash_bucket *bkt)
+{
+ int i;
+ struct rte_hash_key *k, *keys = h->key_store;
+
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (bkt->sig_current[i] == sig &&
+ bkt->key_idx[i] != EMPTY_SLOT) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+
+ if (rte_hash_cmp_eq(key, k->key, h) == 0) {
+ if (data != NULL)
+ *data = k->pdata;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return bkt->key_idx[i] - 1;
+ }
+ }
+ }
+ return -1;
+}
+
/* Search one bucket to find the match key */
static inline int32_t
-search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
+search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
void **data, const struct rte_hash_bucket *bkt)
{
int i;
}
static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
- hash_sig_t sig, void **data)
+__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
{
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *bkt, *cur_bkt;
- uint32_t cnt_b, cnt_a;
int ret;
uint16_t short_sig;
prim_bucket_idx = get_prim_bucket_index(h, sig);
sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+ bkt = &h->buckets[prim_bucket_idx];
+
__hash_rw_reader_lock(h);
+ /* Check if key is in primary location */
+ ret = search_one_bucket_l(h, key, short_sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ /* Calculate secondary hash */
+ bkt = &h->buckets[sec_bucket_idx];
+
+ /* Check if key is in secondary location */
+ FOR_EACH_BUCKET(cur_bkt, bkt) {
+ ret = search_one_bucket_l(h, key, short_sig,
+ data, cur_bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ return -ENOENT;
+}
+
+static inline int32_t
+__rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ uint32_t prim_bucket_idx, sec_bucket_idx;
+ struct rte_hash_bucket *bkt, *cur_bkt;
+ uint32_t cnt_b, cnt_a;
+ int ret;
+ uint16_t short_sig;
+
+ short_sig = get_short_sig(sig);
+ prim_bucket_idx = get_prim_bucket_index(h, sig);
+ sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
+
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
- ret = search_one_bucket(h, key, short_sig, data, bkt);
+ ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
return ret;
/* Check if key is in secondary location */
FOR_EACH_BUCKET(cur_bkt, bkt) {
- ret = search_one_bucket(h, key, short_sig,
+ ret = search_one_bucket_lf(h, key, short_sig,
data, cur_bkt);
if (ret != -1) {
__hash_rw_reader_unlock(h);
__ATOMIC_ACQUIRE);
} while (cnt_b != cnt_a);
- __hash_rw_reader_unlock(h);
-
return -ENOENT;
}
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ if (h->readwrite_concur_lf_support)
+ return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
+ else
+ return __rte_hash_lookup_with_hash_l(h, key, sig, data);
+}
+
int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
const void *key, hash_sig_t sig)
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
LCORE_CACHE_SIZE, NULL);
+ ERR_IF_TRUE((n_slots == 0),
+ "%s: could not enqueue free slots in global ring\n",
+ __func__);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
* empty slot.
*/
static inline void
-__rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) {
+__rte_hash_compact_ll(const struct rte_hash *h,
+ struct rte_hash_bucket *cur_bkt, int pos) {
int i;
struct rte_hash_bucket *last_bkt;
for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
if (last_bkt->key_idx[i] != EMPTY_SLOT) {
- cur_bkt->key_idx[pos] = last_bkt->key_idx[i];
cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
+ __atomic_store_n(&cur_bkt->key_idx[pos],
+ last_bkt->key_idx[i],
+ __ATOMIC_RELEASE);
+ if (h->readwrite_concur_lf_support) {
+ /* Inform the readers that the table has changed
+ * Since there is one writer, load acquire on
+ * tbl_chng_cnt is not required.
+ */
+ __atomic_store_n(h->tbl_chng_cnt,
+ *h->tbl_chng_cnt + 1,
+ __ATOMIC_RELEASE);
+ /* The store to sig_current should
+ * not move above the store to tbl_chng_cnt.
+ */
+ __atomic_thread_fence(__ATOMIC_RELEASE);
+ }
last_bkt->sig_current[i] = NULL_SIGNATURE;
- last_bkt->key_idx[i] = EMPTY_SLOT;
+ __atomic_store_n(&last_bkt->key_idx[i],
+ EMPTY_SLOT,
+ __ATOMIC_RELEASE);
return;
}
}
/* look for key in primary bucket */
ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
if (ret != -1) {
- __rte_hash_compact_ll(prim_bkt, pos);
+ __rte_hash_compact_ll(h, prim_bkt, pos);
last_bkt = prim_bkt->next;
prev_bkt = prim_bkt;
goto return_bkt;
FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
if (ret != -1) {
- __rte_hash_compact_ll(cur_bkt, pos);
+ __rte_hash_compact_ll(h, cur_bkt, pos);
last_bkt = sec_bkt->next;
prev_bkt = sec_bkt;
goto return_bkt;
}
/* found empty bucket and recycle */
if (i == RTE_HASH_BUCKET_ENTRIES) {
- prev_bkt->next = last_bkt->next = NULL;
+ prev_bkt->next = NULL;
uint32_t index = last_bkt - h->buckets_ext + 1;
- rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
+ /* Recycle the empty bkt if
+ * no_free_on_del is disabled.
+ */
+ if (h->no_free_on_del)
+ /* Store index of an empty ext bkt to be recycled
+ * on calling rte_hash_del_xxx APIs.
+ * When lock free read-write concurrency is enabled,
+ * an empty ext bkt cannot be put into free list
+ * immediately (as readers might be using it still).
+ * Hence freeing of the ext bkt is piggy-backed to
+ * freeing of the key index.
+ */
+ h->ext_bkt_to_free[ret] = index;
+ else
+ rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
}
-
__hash_rw_writer_unlock(h);
return ret;
}
return 0;
}
-int __rte_experimental
+int
rte_hash_free_key_with_position(const struct rte_hash *h,
const int32_t position)
{
- RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
+ /* Key index where key is stored, adding the first dummy index */
+ uint32_t key_idx = position + 1;
+
+ RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL);
unsigned int lcore_id, n_slots;
struct lcore_cache *cached_free_slots;
- const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
+ const uint32_t total_entries = h->use_local_cache ?
+ h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1
+ : h->entries + 1;
/* Out of bounds */
- if (position >= total_entries)
+ if (key_idx >= total_entries)
return -EINVAL;
+ if (h->ext_table_support && h->readwrite_concur_lf_support) {
+ uint32_t index = h->ext_bkt_to_free[position];
+ if (index) {
+ /* Recycle empty ext bkt to free list. */
+ rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
+ h->ext_bkt_to_free[position] = 0;
+ }
+ }
if (h->use_local_cache) {
lcore_id = rte_lcore_id();
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
LCORE_CACHE_SIZE, NULL);
+ RETURN_IF_TRUE((n_slots == 0), -EFAULT);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
cached_free_slots->objs[cached_free_slots->len] =
- (void *)((uintptr_t)position);
+ (void *)((uintptr_t)key_idx);
cached_free_slots->len++;
} else {
rte_ring_sp_enqueue(h->free_slots,
- (void *)((uintptr_t)position));
+ (void *)((uintptr_t)key_idx));
}
return 0;
/* For match mask the first bit of every two bits indicates the match */
switch (sig_cmp_fn) {
-#ifdef RTE_MACHINE_CPUFLAG_SSE2
+#if defined(RTE_MACHINE_CPUFLAG_SSE2)
case RTE_HASH_COMPARE_SSE:
/* Compare all signatures in the bucket */
*prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
(__m128i const *)sec_bkt->sig_current),
_mm_set1_epi16(sig)));
break;
+#elif defined(RTE_MACHINE_CPUFLAG_NEON)
+ case RTE_HASH_COMPARE_NEON: {
+ uint16x8_t vmat, vsig, x;
+ int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1};
+
+ vsig = vld1q_dup_u16((uint16_t const *)&sig);
+ /* Compare all signatures in the primary bucket */
+ vmat = vceqq_u16(vsig,
+ vld1q_u16((uint16_t const *)prim_bkt->sig_current));
+ x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
+ *prim_hash_matches = (uint32_t)(vaddvq_u16(x));
+ /* Compare all signatures in the secondary bucket */
+ vmat = vceqq_u16(vsig,
+ vld1q_u16((uint16_t const *)sec_bkt->sig_current));
+ x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift);
+ *sec_hash_matches = (uint32_t)(vaddvq_u16(x));
+ }
+ break;
#endif
default:
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
#define PREFETCH_OFFSET 4
static inline void
-__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ uint64_t hits = 0;
+ int32_t i;
+ int32_t ret;
+ uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
+ uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
+ struct rte_hash_bucket *cur_bkt, *next_bkt;
+
+ /* Prefetch first keys */
+ for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
+ rte_prefetch0(keys[i]);
+
+ /*
+ * Prefetch rest of the keys, calculate primary and
+ * secondary bucket and prefetch them
+ */
+ for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
+ rte_prefetch0(keys[i + PREFETCH_OFFSET]);
+
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ /* Calculate and prefetch rest of the buckets */
+ for (; i < num_keys; i++) {
+ prim_hash[i] = rte_hash_hash(h, keys[i]);
+
+ sig[i] = get_short_sig(prim_hash[i]);
+ prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
+ sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
+
+ primary_bkt[i] = &h->buckets[prim_index[i]];
+ secondary_bkt[i] = &h->buckets[sec_index[i]];
+
+ rte_prefetch0(primary_bkt[i]);
+ rte_prefetch0(secondary_bkt[i]);
+ }
+
+ __hash_rw_reader_lock(h);
+
+ /* Compare signatures and prefetch key slot of first hit */
+ for (i = 0; i < num_keys; i++) {
+ compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+ primary_bkt[i], secondary_bkt[i],
+ sig[i], h->sig_cmp_fn);
+
+ if (prim_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ continue;
+ }
+
+ if (sec_hitmask[i]) {
+ uint32_t first_hit =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[first_hit];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+ rte_prefetch0(key_slot);
+ }
+ }
+
+ /* Compare keys, first hits in primary first */
+ for (i = 0; i < num_keys; i++) {
+ positions[i] = -ENOENT;
+ while (prim_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(prim_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ primary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+
+ while (sec_hitmask[i]) {
+ uint32_t hit_index =
+ __builtin_ctzl(sec_hitmask[i])
+ >> 1;
+ uint32_t key_idx =
+ secondary_bkt[i]->key_idx[hit_index];
+ const struct rte_hash_key *key_slot =
+ (const struct rte_hash_key *)(
+ (const char *)h->key_store +
+ key_idx * h->key_entry_size);
+
+ /*
+ * If key index is 0, do not compare key,
+ * as it is checking the dummy slot
+ */
+
+ if (!!key_idx &
+ !rte_hash_cmp_eq(
+ key_slot->key, keys[i], h)) {
+ if (data != NULL)
+ data[i] = key_slot->pdata;
+
+ hits |= 1ULL << i;
+ positions[i] = key_idx - 1;
+ goto next_key;
+ }
+ sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+ }
+next_key:
+ continue;
+ }
+
+ /* all found, do not need to go through ext bkt */
+ if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ __hash_rw_reader_unlock(h);
+ return;
+ }
+
+ /* need to check ext buckets for match */
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], &data[i], cur_bkt);
+ else
+ ret = search_one_bucket_l(h, keys[i],
+ sig[i], NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+
+ __hash_rw_reader_unlock(h);
+
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+}
+
+static inline void
+__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
int32_t num_keys, int32_t *positions,
uint64_t *hit_mask, void *data[])
{
rte_prefetch0(secondary_bkt[i]);
}
- __hash_rw_reader_lock(h);
+ for (i = 0; i < num_keys; i++)
+ positions[i] = -ENOENT;
+
do {
/* Load the table change counter before the lookup
* starts. Acquire semantics will make sure that
/* Compare keys, first hits in primary first */
for (i = 0; i < num_keys; i++) {
- positions[i] = -ENOENT;
while (prim_hitmask[i]) {
uint32_t hit_index =
__builtin_ctzl(prim_hitmask[i])
continue;
}
+ /* all found, do not need to go through ext bkt */
+ if (hits == ((1ULL << num_keys) - 1)) {
+ if (hit_mask != NULL)
+ *hit_mask = hits;
+ return;
+ }
+ /* need to check ext buckets for match */
+ if (h->ext_table_support) {
+ for (i = 0; i < num_keys; i++) {
+ if ((hits & (1ULL << i)) != 0)
+ continue;
+ next_bkt = secondary_bkt[i]->next;
+ FOR_EACH_BUCKET(cur_bkt, next_bkt) {
+ if (data != NULL)
+ ret = search_one_bucket_lf(h,
+ keys[i], sig[i],
+ &data[i], cur_bkt);
+ else
+ ret = search_one_bucket_lf(h,
+ keys[i], sig[i],
+ NULL, cur_bkt);
+ if (ret != -1) {
+ positions[i] = ret;
+ hits |= 1ULL << i;
+ break;
+ }
+ }
+ }
+ }
/* The loads of sig_current in compare_signatures
* should not move below the load from tbl_chng_cnt.
*/
__ATOMIC_ACQUIRE);
} while (cnt_b != cnt_a);
- /* all found, do not need to go through ext bkt */
- if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
- if (hit_mask != NULL)
- *hit_mask = hits;
- __hash_rw_reader_unlock(h);
- return;
- }
-
- /* need to check ext buckets for match */
- for (i = 0; i < num_keys; i++) {
- if ((hits & (1ULL << i)) != 0)
- continue;
- next_bkt = secondary_bkt[i]->next;
- FOR_EACH_BUCKET(cur_bkt, next_bkt) {
- if (data != NULL)
- ret = search_one_bucket(h, keys[i],
- sig[i], &data[i], cur_bkt);
- else
- ret = search_one_bucket(h, keys[i],
- sig[i], NULL, cur_bkt);
- if (ret != -1) {
- positions[i] = ret;
- hits |= 1ULL << i;
- break;
- }
- }
- }
-
- __hash_rw_reader_unlock(h);
-
if (hit_mask != NULL)
*hit_mask = hits;
}
+static inline void
+__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
+ int32_t num_keys, int32_t *positions,
+ uint64_t *hit_mask, void *data[])
+{
+ if (h->readwrite_concur_lf_support)
+ __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions,
+ hit_mask, data);
+ else
+ __rte_hash_lookup_bulk_l(h, keys, num_keys, positions,
+ hit_mask, data);
+}
+
int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
uint32_t num_keys, int32_t *positions)