#include <rte_cpuflags.h>
#include <rte_rwlock.h>
#include <rte_spinlock.h>
-#include <rte_ring.h>
+#include <rte_ring_elem.h>
#include <rte_compat.h>
#include <rte_vect.h>
+#include <rte_tailq.h>
#include "rte_hash.h"
#include "rte_cuckoo_hash.h"
hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_lock();
TAILQ_FOREACH(te, hash_list, next) {
h = (struct rte_hash *) te->data;
if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
break;
}
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_unlock();
if (te == NULL) {
rte_errno = ENOENT;
char ring_name[RTE_RING_NAMESIZE];
char ext_ring_name[RTE_RING_NAMESIZE];
unsigned num_key_slots;
- unsigned i;
unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
unsigned int ext_table_support = 0;
unsigned int readwrite_concur_support = 0;
uint32_t *ext_bkt_to_free = NULL;
uint32_t *tbl_chng_cnt = NULL;
unsigned int readwrite_concur_lf_support = 0;
+ uint32_t i;
rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
/* Create ring (Dummy slot index is not enqueued) */
- r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
- params->socket_id, 0);
+ r = rte_ring_create_elem(ring_name, sizeof(uint32_t),
+ rte_align32pow2(num_key_slots), params->socket_id, 0);
if (r == NULL) {
RTE_LOG(ERR, HASH, "memory allocation failed\n");
goto err;
if (ext_table_support) {
snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
params->name);
- r_ext = rte_ring_create(ext_ring_name,
+ r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t),
rte_align32pow2(num_buckets + 1),
params->socket_id, 0);
snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* guarantee there's no existing: this is normally already checked
* by ring creation above */
* for next bucket
*/
for (i = 1; i <= num_buckets; i++)
- rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
+ rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t));
if (readwrite_concur_lf_support) {
ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) *
/* Populate free slots ring. Entry zero is reserved for key misses. */
for (i = 1; i < num_key_slots; i++)
- rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
+ rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t));
te->data = (void *) h;
TAILQ_INSERT_TAIL(hash_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return h;
err_unlock:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
err:
rte_ring_free(r);
rte_ring_free(r_ext);
hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
/* find out tailq entry */
TAILQ_FOREACH(te, hash_list, next) {
}
if (te == NULL) {
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return;
}
TAILQ_REMOVE(hash_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
if (h->use_local_cache)
rte_free(h->local_free_slots);
return h->hash_func(key, h->key_len, h->hash_func_init_val);
}
+int32_t
+rte_hash_max_key_id(const struct rte_hash *h)
+{
+ RETURN_IF_TRUE((h == NULL), -EINVAL);
+ if (h->use_local_cache)
+ /*
+ * Increase number of slots by total number of indices
+ * that can be stored in the lcore caches
+ */
+ return (h->entries + ((RTE_MAX_LCORE - 1) *
+ (LCORE_CACHE_SIZE - 1)));
+ else
+ return h->entries;
+}
+
int32_t
rte_hash_count(const struct rte_hash *h)
{
void
rte_hash_reset(struct rte_hash *h)
{
- void *ptr;
uint32_t tot_ring_cnt, i;
if (h == NULL)
memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
*h->tbl_chng_cnt = 0;
- /* clear the free ring */
- while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
- continue;
+ /* reset the free ring */
+ rte_ring_reset(h->free_slots);
- /* clear free extendable bucket ring and memory */
+ /* flush free extendable bucket ring and memory */
if (h->ext_table_support) {
memset(h->buckets_ext, 0, h->num_buckets *
sizeof(struct rte_hash_bucket));
- while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
- continue;
+ rte_ring_reset(h->free_ext_bkts);
}
/* Repopulate the free slots ring. Entry zero is reserved for key misses */
tot_ring_cnt = h->entries;
for (i = 1; i < tot_ring_cnt + 1; i++)
- rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
+ rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t));
/* Repopulate the free ext bkt ring. */
if (h->ext_table_support) {
for (i = 1; i <= h->num_buckets; i++)
- rte_ring_sp_enqueue(h->free_ext_bkts,
- (void *)((uintptr_t) i));
+ rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i,
+ sizeof(uint32_t));
}
if (h->use_local_cache) {
static inline void
enqueue_slot_back(const struct rte_hash *h,
struct lcore_cache *cached_free_slots,
- void *slot_id)
+ uint32_t slot_id)
{
if (h->use_local_cache) {
cached_free_slots->objs[cached_free_slots->len] = slot_id;
cached_free_slots->len++;
} else
- rte_ring_sp_enqueue(h->free_slots, slot_id);
+ rte_ring_sp_enqueue_elem(h->free_slots, &slot_id,
+ sizeof(uint32_t));
}
/* Search a key from bucket and update its data.
k = (struct rte_hash_key *) ((char *)keys +
bkt->key_idx[i] * h->key_entry_size);
if (rte_hash_cmp_eq(key, k->key, h) == 0) {
- /* 'pdata' acts as the synchronization point
- * when an existing hash entry is updated.
- * Key is not updated in this case.
+ /* The store to application data at *data
+ * should not leak after the store to pdata
+ * in the key store. i.e. pdata is the guard
+ * variable. Release the application data
+ * to the readers.
*/
__atomic_store_n(&k->pdata,
data,
/* Check if slot is available */
if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
prim_bkt->sig_current[i] = sig;
- /* Key can be of arbitrary length, so it is
- * not possible to store it atomically.
- * Hence the new key element's memory stores
- * (key as well as data) should be complete
- * before it is referenced.
+ /* Store to signature and key should not
+ * leak after the store to key_idx. i.e.
+ * key_idx is the guard variable for signature
+ * and key.
*/
__atomic_store_n(&prim_bkt->key_idx[i],
new_idx,
uint32_t prim_bucket_idx, sec_bucket_idx;
struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
struct rte_hash_key *new_k, *keys = h->key_store;
- void *slot_id = NULL;
- void *ext_bkt_id = NULL;
- uint32_t new_idx, bkt_id;
+ uint32_t slot_id;
+ uint32_t ext_bkt_id;
int ret;
unsigned n_slots;
unsigned lcore_id;
/* Try to get a free slot from the local cache */
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
- n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
+ n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots,
cached_free_slots->objs,
+ sizeof(uint32_t),
LCORE_CACHE_SIZE, NULL);
if (n_slots == 0) {
return -ENOSPC;
cached_free_slots->len--;
slot_id = cached_free_slots->objs[cached_free_slots->len];
} else {
- if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
+ if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id,
+ sizeof(uint32_t)) != 0) {
return -ENOSPC;
}
}
- new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
- new_idx = (uint32_t)((uintptr_t) slot_id);
- /* Copy key */
- memcpy(new_k->key, key, h->key_len);
- /* Key can be of arbitrary length, so it is not possible to store
- * it atomically. Hence the new key element's memory stores
- * (key as well as data) should be complete before it is referenced.
- * 'pdata' acts as the synchronization point when an existing hash
- * entry is updated.
+ new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size);
+ /* The store to application data (by the application) at *data should
+ * not leak after the store of pdata in the key store. i.e. pdata is
+ * the guard variable. Release the application data to the readers.
*/
__atomic_store_n(&new_k->pdata,
data,
__ATOMIC_RELEASE);
+ /* Copy key */
+ memcpy(new_k->key, key, h->key_len);
/* Find an empty slot and insert */
ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
- short_sig, new_idx, &ret_val);
+ short_sig, slot_id, &ret_val);
if (ret == 0)
- return new_idx - 1;
+ return slot_id - 1;
else if (ret == 1) {
enqueue_slot_back(h, cached_free_slots, slot_id);
return ret_val;
/* Primary bucket full, need to make space for new entry */
ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
- short_sig, prim_bucket_idx, new_idx, &ret_val);
+ short_sig, prim_bucket_idx, slot_id, &ret_val);
if (ret == 0)
- return new_idx - 1;
+ return slot_id - 1;
else if (ret == 1) {
enqueue_slot_back(h, cached_free_slots, slot_id);
return ret_val;
/* Also search secondary bucket to get better occupancy */
ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
- short_sig, sec_bucket_idx, new_idx, &ret_val);
+ short_sig, sec_bucket_idx, slot_id, &ret_val);
if (ret == 0)
- return new_idx - 1;
+ return slot_id - 1;
else if (ret == 1) {
enqueue_slot_back(h, cached_free_slots, slot_id);
return ret_val;
/* Check if slot is available */
if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
cur_bkt->sig_current[i] = short_sig;
- /* Store to signature should not leak after
- * the store to key_idx
+ /* Store to signature and key should not
+ * leak after the store to key_idx. i.e.
+ * key_idx is the guard variable for signature
+ * and key.
*/
__atomic_store_n(&cur_bkt->key_idx[i],
- new_idx,
+ slot_id,
__ATOMIC_RELEASE);
__hash_rw_writer_unlock(h);
- return new_idx - 1;
+ return slot_id - 1;
}
}
}
/* Failed to get an empty entry from extendable buckets. Link a new
* extendable bucket. We first get a free bucket from ring.
*/
- if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
+ if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id,
+ sizeof(uint32_t)) != 0) {
ret = -ENOSPC;
goto failure;
}
- bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
/* Use the first location of the new bucket */
- (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
- /* Store to signature should not leak after
- * the store to key_idx
+ (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig;
+ /* Store to signature and key should not leak after
+ * the store to key_idx. i.e. key_idx is the guard variable
+ * for signature and key.
*/
- __atomic_store_n(&(h->buckets_ext[bkt_id]).key_idx[0],
- new_idx,
+ __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0],
+ slot_id,
__ATOMIC_RELEASE);
/* Link the new bucket to sec bucket linked list */
last = rte_hash_get_last_bkt(sec_bkt);
- last->next = &h->buckets_ext[bkt_id];
+ last->next = &h->buckets_ext[ext_bkt_id - 1];
__hash_rw_writer_unlock(h);
- return new_idx - 1;
+ return slot_id - 1;
failure:
__hash_rw_writer_unlock(h);
{
int i;
uint32_t key_idx;
- void *pdata;
struct rte_hash_key *k, *keys = h->key_store;
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- key_idx = __atomic_load_n(&bkt->key_idx[i],
+ /* Signature comparison is done before the acquire-load
+ * of the key index to achieve better performance.
+ * This can result in the reader loading old signature
+ * (which matches), while the key_idx is updated to a
+ * value that belongs to a new key. However, the full
+ * key comparison will ensure that the lookup fails.
+ */
+ if (bkt->sig_current[i] == sig) {
+ key_idx = __atomic_load_n(&bkt->key_idx[i],
__ATOMIC_ACQUIRE);
- if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
- k = (struct rte_hash_key *) ((char *)keys +
- key_idx * h->key_entry_size);
- pdata = __atomic_load_n(&k->pdata,
- __ATOMIC_ACQUIRE);
-
- if (rte_hash_cmp_eq(key, k->key, h) == 0) {
- if (data != NULL)
- *data = pdata;
- /*
- * Return index where key is stored,
- * subtracting the first dummy index
- */
- return key_idx - 1;
+ if (key_idx != EMPTY_SLOT) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ key_idx * h->key_entry_size);
+
+ if (rte_hash_cmp_eq(key, k->key, h) == 0) {
+ if (data != NULL) {
+ *data = __atomic_load_n(
+ &k->pdata,
+ __ATOMIC_ACQUIRE);
+ }
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return key_idx - 1;
+ }
}
}
}
/* Check if key is in primary location */
bkt = &h->buckets[prim_bucket_idx];
ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
- if (ret != -1) {
- __hash_rw_reader_unlock(h);
+ if (ret != -1)
return ret;
- }
/* Calculate secondary hash */
bkt = &h->buckets[sec_bucket_idx];
FOR_EACH_BUCKET(cur_bkt, bkt) {
ret = search_one_bucket_lf(h, key, short_sig,
data, cur_bkt);
- if (ret != -1) {
- __hash_rw_reader_unlock(h);
+ if (ret != -1)
return ret;
- }
}
/* The loads of sig_current in search_one_bucket
/* Cache full, need to free it. */
if (cached_free_slots->len == LCORE_CACHE_SIZE) {
/* Need to enqueue the free slots in global ring. */
- n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
+ n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
cached_free_slots->objs,
+ sizeof(uint32_t),
LCORE_CACHE_SIZE, NULL);
ERR_IF_TRUE((n_slots == 0),
"%s: could not enqueue free slots in global ring\n",
}
/* Put index of new free slot in cache. */
cached_free_slots->objs[cached_free_slots->len] =
- (void *)((uintptr_t)bkt->key_idx[i]);
+ bkt->key_idx[i];
cached_free_slots->len++;
} else {
- rte_ring_sp_enqueue(h->free_slots,
- (void *)((uintptr_t)bkt->key_idx[i]));
+ rte_ring_sp_enqueue_elem(h->free_slots,
+ &bkt->key_idx[i], sizeof(uint32_t));
}
}
*/
h->ext_bkt_to_free[ret] = index;
else
- rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
+ rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
+ sizeof(uint32_t));
}
__hash_rw_writer_unlock(h);
return ret;
uint32_t index = h->ext_bkt_to_free[position];
if (index) {
/* Recycle empty ext bkt to free list. */
- rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
+ rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index,
+ sizeof(uint32_t));
h->ext_bkt_to_free[position] = 0;
}
}
/* Cache full, need to free it. */
if (cached_free_slots->len == LCORE_CACHE_SIZE) {
/* Need to enqueue the free slots in global ring. */
- n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
+ n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots,
cached_free_slots->objs,
+ sizeof(uint32_t),
LCORE_CACHE_SIZE, NULL);
RETURN_IF_TRUE((n_slots == 0), -EFAULT);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
- cached_free_slots->objs[cached_free_slots->len] =
- (void *)((uintptr_t)key_idx);
+ cached_free_slots->objs[cached_free_slots->len] = key_idx;
cached_free_slots->len++;
} else {
- rte_ring_sp_enqueue(h->free_slots,
- (void *)((uintptr_t)key_idx));
+ rte_ring_sp_enqueue_elem(h->free_slots, &key_idx,
+ sizeof(uint32_t));
}
return 0;
uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
struct rte_hash_bucket *cur_bkt, *next_bkt;
- void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
uint32_t cnt_b, cnt_a;
/* Prefetch first keys */
(const char *)h->key_store +
key_idx * h->key_entry_size);
- if (key_idx != EMPTY_SLOT)
- pdata[i] = __atomic_load_n(
- &key_slot->pdata,
- __ATOMIC_ACQUIRE);
/*
* If key index is 0, do not compare key,
* as it is checking the dummy slot
!rte_hash_cmp_eq(
key_slot->key, keys[i], h)) {
if (data != NULL)
- data[i] = pdata[i];
+ data[i] = __atomic_load_n(
+ &key_slot->pdata,
+ __ATOMIC_ACQUIRE);
hits |= 1ULL << i;
positions[i] = key_idx - 1;
(const char *)h->key_store +
key_idx * h->key_entry_size);
- if (key_idx != EMPTY_SLOT)
- pdata[i] = __atomic_load_n(
- &key_slot->pdata,
- __ATOMIC_ACQUIRE);
/*
* If key index is 0, do not compare key,
* as it is checking the dummy slot
!rte_hash_cmp_eq(
key_slot->key, keys[i], h)) {
if (data != NULL)
- data[i] = pdata[i];
+ data[i] = __atomic_load_n(
+ &key_slot->pdata,
+ __ATOMIC_ACQUIRE);
hits |= 1ULL << i;
positions[i] = key_idx - 1;