X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_hash%2Frte_cuckoo_hash.c;h=5f701d5792bfdbd5385cf8612c92904966ecc733;hb=d4d85aa6f13aed654b2cd8344fe30ec7b7aba7c7;hp=3539a10625482e2eba1dc04f83ea8e671b59c92b;hpb=40f8e9c28c4e432a0a1300008a25097042382654;p=dpdk.git diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index 3539a10625..5f701d5792 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2016 Intel Corporation + * Copyright(c) 2018 Arm Limited */ #include @@ -12,7 +13,6 @@ #include #include /* for definition of RTE_CACHE_LINE_SIZE */ #include -#include #include #include #include @@ -24,13 +24,22 @@ #include #include #include -#include +#include #include -#include +#include +#include #include "rte_hash.h" #include "rte_cuckoo_hash.h" +/* Mask of all flags supported by this version */ +#define RTE_HASH_EXTRA_FLAGS_MASK (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT | \ + RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD | \ + RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY | \ + RTE_HASH_EXTRA_FLAGS_EXT_TABLE | \ + RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL | \ + RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) + #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET) \ for (CURRENT_BKT = START_BUCKET; \ CURRENT_BKT != NULL; \ @@ -52,13 +61,13 @@ rte_hash_find_existing(const char *name) hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list); - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_lock(); TAILQ_FOREACH(te, hash_list, next) { h = (struct rte_hash *) te->data; if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0) break; } - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_unlock(); if (te == NULL) { rte_errno = ENOENT; @@ -135,11 +144,15 @@ rte_hash_create(const struct rte_hash_parameters *params) char ring_name[RTE_RING_NAMESIZE]; char ext_ring_name[RTE_RING_NAMESIZE]; unsigned num_key_slots; - unsigned i; unsigned int hw_trans_mem_support = 0, use_local_cache = 0; unsigned int ext_table_support = 0; unsigned int readwrite_concur_support = 0; unsigned int writer_takes_lock = 0; + unsigned int no_free_on_del = 0; + uint32_t *ext_bkt_to_free = NULL; + uint32_t *tbl_chng_cnt = NULL; + unsigned int readwrite_concur_lf_support = 0; + uint32_t i; rte_hash_function default_hash_func = (rte_hash_function)rte_jhash; @@ -159,6 +172,21 @@ rte_hash_create(const struct rte_hash_parameters *params) return NULL; } + if (params->extra_flag & ~RTE_HASH_EXTRA_FLAGS_MASK) { + rte_errno = EINVAL; + RTE_LOG(ERR, HASH, "rte_hash_create: unsupported extra flags\n"); + return NULL; + } + + /* Validate correct usage of extra options */ + if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) && + (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) { + rte_errno = EINVAL; + RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or " + "rw concurrency lock free\n"); + return NULL; + } + /* Check extra flags field to check extra options. */ if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT) hw_trans_mem_support = 1; @@ -176,6 +204,15 @@ rte_hash_create(const struct rte_hash_parameters *params) if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE) ext_table_support = 1; + if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL) + no_free_on_del = 1; + + if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) { + readwrite_concur_lf_support = 1; + /* Enable not freeing internal memory/index on delete */ + no_free_on_del = 1; + } + /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */ if (use_local_cache) /* @@ -190,8 +227,8 @@ rte_hash_create(const struct rte_hash_parameters *params) snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name); /* Create ring (Dummy slot index is not enqueued) */ - r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots), - params->socket_id, 0); + r = rte_ring_create_elem(ring_name, sizeof(uint32_t), + rte_align32pow2(num_key_slots), params->socket_id, 0); if (r == NULL) { RTE_LOG(ERR, HASH, "memory allocation failed\n"); goto err; @@ -204,7 +241,7 @@ rte_hash_create(const struct rte_hash_parameters *params) if (ext_table_support) { snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s", params->name); - r_ext = rte_ring_create(ext_ring_name, + r_ext = rte_ring_create_elem(ext_ring_name, sizeof(uint32_t), rte_align32pow2(num_buckets + 1), params->socket_id, 0); @@ -217,7 +254,7 @@ rte_hash_create(const struct rte_hash_parameters *params) snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* guarantee there's no existing: this is normally already checked * by ring creation above */ @@ -272,10 +309,22 @@ rte_hash_create(const struct rte_hash_parameters *params) * for next bucket */ for (i = 1; i <= num_buckets; i++) - rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(r_ext, &i, sizeof(uint32_t)); + + if (readwrite_concur_lf_support) { + ext_bkt_to_free = rte_zmalloc(NULL, sizeof(uint32_t) * + num_key_slots, 0); + if (ext_bkt_to_free == NULL) { + RTE_LOG(ERR, HASH, "ext bkt to free memory allocation " + "failed\n"); + goto err_unlock; + } + } } - const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len; + const uint32_t key_entry_size = + RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len, + KEY_ALIGNMENT); const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots; k = rte_zmalloc_socket(NULL, key_tbl_size, @@ -286,6 +335,14 @@ rte_hash_create(const struct rte_hash_parameters *params) goto err_unlock; } + tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t), + RTE_CACHE_LINE_SIZE, params->socket_id); + + if (tbl_chng_cnt == NULL) { + RTE_LOG(ERR, HASH, "memory allocation failed\n"); + goto err_unlock; + } + /* * If x86 architecture is used, select appropriate compare function, * which may use x86 intrinsics, otherwise use memcmp @@ -339,7 +396,7 @@ rte_hash_create(const struct rte_hash_parameters *params) default_hash_func = (rte_hash_function)rte_hash_crc; #endif /* Setup hash context */ - snprintf(h->name, sizeof(h->name), "%s", params->name); + strlcpy(h->name, params->name, sizeof(h->name)); h->entries = params->entries; h->key_len = params->key_len; h->key_entry_size = key_entry_size; @@ -354,16 +411,25 @@ rte_hash_create(const struct rte_hash_parameters *params) default_hash_func : params->hash_func; h->key_store = k; h->free_slots = r; + h->ext_bkt_to_free = ext_bkt_to_free; + h->tbl_chng_cnt = tbl_chng_cnt; + *h->tbl_chng_cnt = 0; h->hw_trans_mem_support = hw_trans_mem_support; h->use_local_cache = use_local_cache; h->readwrite_concur_support = readwrite_concur_support; h->ext_table_support = ext_table_support; h->writer_takes_lock = writer_takes_lock; + h->no_free_on_del = no_free_on_del; + h->readwrite_concur_lf_support = readwrite_concur_lf_support; #if defined(RTE_ARCH_X86) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2)) h->sig_cmp_fn = RTE_HASH_COMPARE_SSE; else +#elif defined(RTE_ARCH_ARM64) + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) + h->sig_cmp_fn = RTE_HASH_COMPARE_NEON; + else #endif h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR; @@ -382,15 +448,15 @@ rte_hash_create(const struct rte_hash_parameters *params) /* Populate free slots ring. Entry zero is reserved for key misses. */ for (i = 1; i < num_key_slots; i++) - rte_ring_sp_enqueue(r, (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(r, &i, sizeof(uint32_t)); te->data = (void *) h; TAILQ_INSERT_TAIL(hash_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return h; err_unlock: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); err: rte_ring_free(r); rte_ring_free(r_ext); @@ -399,6 +465,8 @@ err: rte_free(buckets); rte_free(buckets_ext); rte_free(k); + rte_free(tbl_chng_cnt); + rte_free(ext_bkt_to_free); return NULL; } @@ -413,7 +481,7 @@ rte_hash_free(struct rte_hash *h) hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); /* find out tailq entry */ TAILQ_FOREACH(te, hash_list, next) { @@ -422,13 +490,13 @@ rte_hash_free(struct rte_hash *h) } if (te == NULL) { - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return; } TAILQ_REMOVE(hash_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); if (h->use_local_cache) rte_free(h->local_free_slots); @@ -439,6 +507,8 @@ rte_hash_free(struct rte_hash *h) rte_free(h->key_store); rte_free(h->buckets); rte_free(h->buckets_ext); + rte_free(h->tbl_chng_cnt); + rte_free(h->ext_bkt_to_free); rte_free(h); rte_free(te); } @@ -450,6 +520,21 @@ rte_hash_hash(const struct rte_hash *h, const void *key) return h->hash_func(key, h->key_len, h->hash_func_init_val); } +int32_t +rte_hash_max_key_id(const struct rte_hash *h) +{ + RETURN_IF_TRUE((h == NULL), -EINVAL); + if (h->use_local_cache) + /* + * Increase number of slots by total number of indices + * that can be stored in the lcore caches + */ + return (h->entries + ((RTE_MAX_LCORE - 1) * + (LCORE_CACHE_SIZE - 1))); + else + return h->entries; +} + int32_t rte_hash_count(const struct rte_hash *h) { @@ -514,7 +599,6 @@ __hash_rw_reader_unlock(const struct rte_hash *h) void rte_hash_reset(struct rte_hash *h) { - void *ptr; uint32_t tot_ring_cnt, i; if (h == NULL) @@ -523,17 +607,16 @@ rte_hash_reset(struct rte_hash *h) __hash_rw_writer_lock(h); memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket)); memset(h->key_store, 0, h->key_entry_size * (h->entries + 1)); + *h->tbl_chng_cnt = 0; - /* clear the free ring */ - while (rte_ring_dequeue(h->free_slots, &ptr) == 0) - rte_pause(); + /* reset the free ring */ + rte_ring_reset(h->free_slots); - /* clear free extendable bucket ring and memory */ + /* flush free extendable bucket ring and memory */ if (h->ext_table_support) { memset(h->buckets_ext, 0, h->num_buckets * sizeof(struct rte_hash_bucket)); - while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0) - rte_pause(); + rte_ring_reset(h->free_ext_bkts); } /* Repopulate the free slots ring. Entry zero is reserved for key misses */ @@ -544,13 +627,13 @@ rte_hash_reset(struct rte_hash *h) tot_ring_cnt = h->entries; for (i = 1; i < tot_ring_cnt + 1; i++) - rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(h->free_slots, &i, sizeof(uint32_t)); /* Repopulate the free ext bkt ring. */ if (h->ext_table_support) { for (i = 1; i <= h->num_buckets; i++) - rte_ring_sp_enqueue(h->free_ext_bkts, - (void *)((uintptr_t) i)); + rte_ring_sp_enqueue_elem(h->free_ext_bkts, &i, + sizeof(uint32_t)); } if (h->use_local_cache) { @@ -569,16 +652,19 @@ rte_hash_reset(struct rte_hash *h) static inline void enqueue_slot_back(const struct rte_hash *h, struct lcore_cache *cached_free_slots, - void *slot_id) + uint32_t slot_id) { if (h->use_local_cache) { cached_free_slots->objs[cached_free_slots->len] = slot_id; cached_free_slots->len++; } else - rte_ring_sp_enqueue(h->free_slots, slot_id); + rte_ring_sp_enqueue_elem(h->free_slots, &slot_id, + sizeof(uint32_t)); } -/* Search a key from bucket and update its data */ +/* Search a key from bucket and update its data. + * Writer holds the lock before calling this. + */ static inline int32_t search_and_update(const struct rte_hash *h, void *data, const void *key, struct rte_hash_bucket *bkt, uint16_t sig) @@ -591,8 +677,15 @@ search_and_update(const struct rte_hash *h, void *data, const void *key, k = (struct rte_hash_key *) ((char *)keys + bkt->key_idx[i] * h->key_entry_size); if (rte_hash_cmp_eq(key, k->key, h) == 0) { - /* Update data */ - k->pdata = data; + /* The store to application data at *data + * should not leak after the store to pdata + * in the key store. i.e. pdata is the guard + * variable. Release the application data + * to the readers. + */ + __atomic_store_n(&k->pdata, + data, + __ATOMIC_RELEASE); /* * Return index where key is stored, * subtracting the first dummy index @@ -648,7 +741,14 @@ rte_hash_cuckoo_insert_mw(const struct rte_hash *h, /* Check if slot is available */ if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) { prim_bkt->sig_current[i] = sig; - prim_bkt->key_idx[i] = new_idx; + /* Store to signature and key should not + * leak after the store to key_idx. i.e. + * key_idx is the guard variable for signature + * and key. + */ + __atomic_store_n(&prim_bkt->key_idx[i], + new_idx, + __ATOMIC_RELEASE); break; } } @@ -721,27 +821,66 @@ rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h, if (unlikely(&h->buckets[prev_alt_bkt_idx] != curr_bkt)) { /* revert it to empty, otherwise duplicated keys */ - curr_bkt->key_idx[curr_slot] = EMPTY_SLOT; + __atomic_store_n(&curr_bkt->key_idx[curr_slot], + EMPTY_SLOT, + __ATOMIC_RELEASE); __hash_rw_writer_unlock(h); return -1; } + if (h->readwrite_concur_lf_support) { + /* Inform the previous move. The current move need + * not be informed now as the current bucket entry + * is present in both primary and secondary. + * Since there is one writer, load acquires on + * tbl_chng_cnt are not required. + */ + __atomic_store_n(h->tbl_chng_cnt, + *h->tbl_chng_cnt + 1, + __ATOMIC_RELEASE); + /* The store to sig_current should not + * move above the store to tbl_chng_cnt. + */ + __atomic_thread_fence(__ATOMIC_RELEASE); + } + /* Need to swap current/alt sig to allow later * Cuckoo insert to move elements back to its * primary bucket if available */ curr_bkt->sig_current[curr_slot] = prev_bkt->sig_current[prev_slot]; - curr_bkt->key_idx[curr_slot] = - prev_bkt->key_idx[prev_slot]; + /* Release the updated bucket entry */ + __atomic_store_n(&curr_bkt->key_idx[curr_slot], + prev_bkt->key_idx[prev_slot], + __ATOMIC_RELEASE); curr_slot = prev_slot; curr_node = prev_node; curr_bkt = curr_node->bkt; } + if (h->readwrite_concur_lf_support) { + /* Inform the previous move. The current move need + * not be informed now as the current bucket entry + * is present in both primary and secondary. + * Since there is one writer, load acquires on + * tbl_chng_cnt are not required. + */ + __atomic_store_n(h->tbl_chng_cnt, + *h->tbl_chng_cnt + 1, + __ATOMIC_RELEASE); + /* The store to sig_current should not + * move above the store to tbl_chng_cnt. + */ + __atomic_thread_fence(__ATOMIC_RELEASE); + } + curr_bkt->sig_current[curr_slot] = sig; - curr_bkt->key_idx[curr_slot] = new_idx; + /* Release the new bucket entry */ + __atomic_store_n(&curr_bkt->key_idx[curr_slot], + new_idx, + __ATOMIC_RELEASE); __hash_rw_writer_unlock(h); @@ -814,9 +953,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, uint32_t prim_bucket_idx, sec_bucket_idx; struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt; struct rte_hash_key *new_k, *keys = h->key_store; - void *slot_id = NULL; - void *ext_bkt_id = NULL; - uint32_t new_idx, bkt_id; + uint32_t ext_bkt_id = 0; + uint32_t slot_id; int ret; unsigned n_slots; unsigned lcore_id; @@ -859,8 +997,9 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Try to get a free slot from the local cache */ if (cached_free_slots->len == 0) { /* Need to get another burst of free slots from global ring */ - n_slots = rte_ring_mc_dequeue_burst(h->free_slots, + n_slots = rte_ring_mc_dequeue_burst_elem(h->free_slots, cached_free_slots->objs, + sizeof(uint32_t), LCORE_CACHE_SIZE, NULL); if (n_slots == 0) { return -ENOSPC; @@ -873,23 +1012,28 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, cached_free_slots->len--; slot_id = cached_free_slots->objs[cached_free_slots->len]; } else { - if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) { + if (rte_ring_sc_dequeue_elem(h->free_slots, &slot_id, + sizeof(uint32_t)) != 0) { return -ENOSPC; } } - new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size); - new_idx = (uint32_t)((uintptr_t) slot_id); + new_k = RTE_PTR_ADD(keys, slot_id * h->key_entry_size); + /* The store to application data (by the application) at *data should + * not leak after the store of pdata in the key store. i.e. pdata is + * the guard variable. Release the application data to the readers. + */ + __atomic_store_n(&new_k->pdata, + data, + __ATOMIC_RELEASE); /* Copy key */ - rte_memcpy(new_k->key, key, h->key_len); - new_k->pdata = data; - + memcpy(new_k->key, key, h->key_len); /* Find an empty slot and insert */ ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data, - short_sig, new_idx, &ret_val); + short_sig, slot_id, &ret_val); if (ret == 0) - return new_idx - 1; + return slot_id - 1; else if (ret == 1) { enqueue_slot_back(h, cached_free_slots, slot_id); return ret_val; @@ -897,9 +1041,9 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Primary bucket full, need to make space for new entry */ ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data, - short_sig, prim_bucket_idx, new_idx, &ret_val); + short_sig, prim_bucket_idx, slot_id, &ret_val); if (ret == 0) - return new_idx - 1; + return slot_id - 1; else if (ret == 1) { enqueue_slot_back(h, cached_free_slots, slot_id); return ret_val; @@ -907,10 +1051,10 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Also search secondary bucket to get better occupancy */ ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data, - short_sig, sec_bucket_idx, new_idx, &ret_val); + short_sig, sec_bucket_idx, slot_id, &ret_val); if (ret == 0) - return new_idx - 1; + return slot_id - 1; else if (ret == 1) { enqueue_slot_back(h, cached_free_slots, slot_id); return ret_val; @@ -947,9 +1091,16 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Check if slot is available */ if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) { cur_bkt->sig_current[i] = short_sig; - cur_bkt->key_idx[i] = new_idx; + /* Store to signature and key should not + * leak after the store to key_idx. i.e. + * key_idx is the guard variable for signature + * and key. + */ + __atomic_store_n(&cur_bkt->key_idx[i], + slot_id, + __ATOMIC_RELEASE); __hash_rw_writer_unlock(h); - return new_idx - 1; + return slot_id - 1; } } } @@ -957,20 +1108,27 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, /* Failed to get an empty entry from extendable buckets. Link a new * extendable bucket. We first get a free bucket from ring. */ - if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) { + if (rte_ring_sc_dequeue_elem(h->free_ext_bkts, &ext_bkt_id, + sizeof(uint32_t)) != 0 || + ext_bkt_id == 0) { ret = -ENOSPC; goto failure; } - bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1; /* Use the first location of the new bucket */ - (h->buckets_ext[bkt_id]).sig_current[0] = short_sig; - (h->buckets_ext[bkt_id]).key_idx[0] = new_idx; + (h->buckets_ext[ext_bkt_id - 1]).sig_current[0] = short_sig; + /* Store to signature and key should not leak after + * the store to key_idx. i.e. key_idx is the guard variable + * for signature and key. + */ + __atomic_store_n(&(h->buckets_ext[ext_bkt_id - 1]).key_idx[0], + slot_id, + __ATOMIC_RELEASE); /* Link the new bucket to sec bucket linked list */ last = rte_hash_get_last_bkt(sec_bkt); - last->next = &h->buckets_ext[bkt_id]; + last->next = &h->buckets_ext[ext_bkt_id - 1]; __hash_rw_writer_unlock(h); - return new_idx - 1; + return slot_id - 1; failure: __hash_rw_writer_unlock(h); @@ -1021,10 +1179,11 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data) return ret; } -/* Search one bucket to find the match key */ +/* Search one bucket to find the match key - uses rw lock */ static inline int32_t -search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig, - void **data, const struct rte_hash_bucket *bkt) +search_one_bucket_l(const struct rte_hash *h, const void *key, + uint16_t sig, void **data, + const struct rte_hash_bucket *bkt) { int i; struct rte_hash_key *k, *keys = h->key_store; @@ -1034,6 +1193,7 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig, bkt->key_idx[i] != EMPTY_SLOT) { k = (struct rte_hash_key *) ((char *)keys + bkt->key_idx[i] * h->key_entry_size); + if (rte_hash_cmp_eq(key, k->key, h) == 0) { if (data != NULL) *data = k->pdata; @@ -1048,9 +1208,51 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig, return -1; } +/* Search one bucket to find the match key */ static inline int32_t -__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, - hash_sig_t sig, void **data) +search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig, + void **data, const struct rte_hash_bucket *bkt) +{ + int i; + uint32_t key_idx; + struct rte_hash_key *k, *keys = h->key_store; + + for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { + /* Signature comparison is done before the acquire-load + * of the key index to achieve better performance. + * This can result in the reader loading old signature + * (which matches), while the key_idx is updated to a + * value that belongs to a new key. However, the full + * key comparison will ensure that the lookup fails. + */ + if (bkt->sig_current[i] == sig) { + key_idx = __atomic_load_n(&bkt->key_idx[i], + __ATOMIC_ACQUIRE); + if (key_idx != EMPTY_SLOT) { + k = (struct rte_hash_key *) ((char *)keys + + key_idx * h->key_entry_size); + + if (rte_hash_cmp_eq(key, k->key, h) == 0) { + if (data != NULL) { + *data = __atomic_load_n( + &k->pdata, + __ATOMIC_ACQUIRE); + } + /* + * Return index where key is stored, + * subtracting the first dummy index + */ + return key_idx - 1; + } + } + } + } + return -1; +} + +static inline int32_t +__rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key, + hash_sig_t sig, void **data) { uint32_t prim_bucket_idx, sec_bucket_idx; struct rte_hash_bucket *bkt, *cur_bkt; @@ -1060,12 +1262,13 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, short_sig = get_short_sig(sig); prim_bucket_idx = get_prim_bucket_index(h, sig); sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig); + bkt = &h->buckets[prim_bucket_idx]; __hash_rw_reader_lock(h); /* Check if key is in primary location */ - ret = search_one_bucket(h, key, short_sig, data, bkt); + ret = search_one_bucket_l(h, key, short_sig, data, bkt); if (ret != -1) { __hash_rw_reader_unlock(h); return ret; @@ -1075,16 +1278,86 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, /* Check if key is in secondary location */ FOR_EACH_BUCKET(cur_bkt, bkt) { - ret = search_one_bucket(h, key, short_sig, data, cur_bkt); + ret = search_one_bucket_l(h, key, short_sig, + data, cur_bkt); if (ret != -1) { __hash_rw_reader_unlock(h); return ret; } } + __hash_rw_reader_unlock(h); + + return -ENOENT; +} + +static inline int32_t +__rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key, + hash_sig_t sig, void **data) +{ + uint32_t prim_bucket_idx, sec_bucket_idx; + struct rte_hash_bucket *bkt, *cur_bkt; + uint32_t cnt_b, cnt_a; + int ret; + uint16_t short_sig; + + short_sig = get_short_sig(sig); + prim_bucket_idx = get_prim_bucket_index(h, sig); + sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig); + + do { + /* Load the table change counter before the lookup + * starts. Acquire semantics will make sure that + * loads in search_one_bucket are not hoisted. + */ + cnt_b = __atomic_load_n(h->tbl_chng_cnt, + __ATOMIC_ACQUIRE); + + /* Check if key is in primary location */ + bkt = &h->buckets[prim_bucket_idx]; + ret = search_one_bucket_lf(h, key, short_sig, data, bkt); + if (ret != -1) + return ret; + /* Calculate secondary hash */ + bkt = &h->buckets[sec_bucket_idx]; + + /* Check if key is in secondary location */ + FOR_EACH_BUCKET(cur_bkt, bkt) { + ret = search_one_bucket_lf(h, key, short_sig, + data, cur_bkt); + if (ret != -1) + return ret; + } + + /* The loads of sig_current in search_one_bucket + * should not move below the load from tbl_chng_cnt. + */ + __atomic_thread_fence(__ATOMIC_ACQUIRE); + /* Re-read the table change counter to check if the + * table has changed during search. If yes, re-do + * the search. + * This load should not get hoisted. The load + * acquires on cnt_b, key index in primary bucket + * and key index in secondary bucket will make sure + * that it does not get hoisted. + */ + cnt_a = __atomic_load_n(h->tbl_chng_cnt, + __ATOMIC_ACQUIRE); + } while (cnt_b != cnt_a); + return -ENOENT; } +static inline int32_t +__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, + hash_sig_t sig, void **data) +{ + if (h->readwrite_concur_lf_support) + return __rte_hash_lookup_with_hash_lf(h, key, sig, data); + else + return __rte_hash_lookup_with_hash_l(h, key, sig, data); +} + int32_t rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, hash_sig_t sig) @@ -1121,25 +1394,28 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i) unsigned lcore_id, n_slots; struct lcore_cache *cached_free_slots; - bkt->sig_current[i] = NULL_SIGNATURE; if (h->use_local_cache) { lcore_id = rte_lcore_id(); cached_free_slots = &h->local_free_slots[lcore_id]; /* Cache full, need to free it. */ if (cached_free_slots->len == LCORE_CACHE_SIZE) { /* Need to enqueue the free slots in global ring. */ - n_slots = rte_ring_mp_enqueue_burst(h->free_slots, + n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots, cached_free_slots->objs, + sizeof(uint32_t), LCORE_CACHE_SIZE, NULL); + ERR_IF_TRUE((n_slots == 0), + "%s: could not enqueue free slots in global ring\n", + __func__); cached_free_slots->len -= n_slots; } /* Put index of new free slot in cache. */ cached_free_slots->objs[cached_free_slots->len] = - (void *)((uintptr_t)bkt->key_idx[i]); + bkt->key_idx[i]; cached_free_slots->len++; } else { - rte_ring_sp_enqueue(h->free_slots, - (void *)((uintptr_t)bkt->key_idx[i])); + rte_ring_sp_enqueue_elem(h->free_slots, + &bkt->key_idx[i], sizeof(uint32_t)); } } @@ -1147,7 +1423,8 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i) * empty slot. */ static inline void -__rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) { +__rte_hash_compact_ll(const struct rte_hash *h, + struct rte_hash_bucket *cur_bkt, int pos) { int i; struct rte_hash_bucket *last_bkt; @@ -1158,40 +1435,69 @@ __rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) { for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) { if (last_bkt->key_idx[i] != EMPTY_SLOT) { - cur_bkt->key_idx[pos] = last_bkt->key_idx[i]; cur_bkt->sig_current[pos] = last_bkt->sig_current[i]; + __atomic_store_n(&cur_bkt->key_idx[pos], + last_bkt->key_idx[i], + __ATOMIC_RELEASE); + if (h->readwrite_concur_lf_support) { + /* Inform the readers that the table has changed + * Since there is one writer, load acquire on + * tbl_chng_cnt is not required. + */ + __atomic_store_n(h->tbl_chng_cnt, + *h->tbl_chng_cnt + 1, + __ATOMIC_RELEASE); + /* The store to sig_current should + * not move above the store to tbl_chng_cnt. + */ + __atomic_thread_fence(__ATOMIC_RELEASE); + } last_bkt->sig_current[i] = NULL_SIGNATURE; - last_bkt->key_idx[i] = EMPTY_SLOT; + __atomic_store_n(&last_bkt->key_idx[i], + EMPTY_SLOT, + __ATOMIC_RELEASE); return; } } } -/* Search one bucket and remove the matched key */ +/* Search one bucket and remove the matched key. + * Writer is expected to hold the lock while calling this + * function. + */ static inline int32_t search_and_remove(const struct rte_hash *h, const void *key, struct rte_hash_bucket *bkt, uint16_t sig, int *pos) { struct rte_hash_key *k, *keys = h->key_store; unsigned int i; - int32_t ret; + uint32_t key_idx; /* Check if key is in bucket */ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - if (bkt->sig_current[i] == sig && - bkt->key_idx[i] != EMPTY_SLOT) { + key_idx = __atomic_load_n(&bkt->key_idx[i], + __ATOMIC_ACQUIRE); + if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) { k = (struct rte_hash_key *) ((char *)keys + - bkt->key_idx[i] * h->key_entry_size); + key_idx * h->key_entry_size); if (rte_hash_cmp_eq(key, k->key, h) == 0) { - remove_entry(h, bkt, i); + bkt->sig_current[i] = NULL_SIGNATURE; + /* Free the key store index if + * no_free_on_del is disabled. + */ + if (!h->no_free_on_del) + remove_entry(h, bkt, i); + + __atomic_store_n(&bkt->key_idx[i], + EMPTY_SLOT, + __ATOMIC_RELEASE); - /* Return index where key is stored, + *pos = i; + /* + * Return index where key is stored, * subtracting the first dummy index */ - ret = bkt->key_idx[i] - 1; - bkt->key_idx[i] = EMPTY_SLOT; - *pos = i; - return ret; + return key_idx - 1; } } } @@ -1218,7 +1524,7 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, /* look for key in primary bucket */ ret = search_and_remove(h, key, prim_bkt, short_sig, &pos); if (ret != -1) { - __rte_hash_compact_ll(prim_bkt, pos); + __rte_hash_compact_ll(h, prim_bkt, pos); last_bkt = prim_bkt->next; prev_bkt = prim_bkt; goto return_bkt; @@ -1230,7 +1536,7 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, FOR_EACH_BUCKET(cur_bkt, sec_bkt) { ret = search_and_remove(h, key, cur_bkt, short_sig, &pos); if (ret != -1) { - __rte_hash_compact_ll(cur_bkt, pos); + __rte_hash_compact_ll(h, cur_bkt, pos); last_bkt = sec_bkt->next; prev_bkt = sec_bkt; goto return_bkt; @@ -1257,11 +1563,25 @@ return_bkt: } /* found empty bucket and recycle */ if (i == RTE_HASH_BUCKET_ENTRIES) { - prev_bkt->next = last_bkt->next = NULL; + prev_bkt->next = NULL; uint32_t index = last_bkt - h->buckets_ext + 1; - rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index); + /* Recycle the empty bkt if + * no_free_on_del is disabled. + */ + if (h->no_free_on_del) + /* Store index of an empty ext bkt to be recycled + * on calling rte_hash_del_xxx APIs. + * When lock free read-write concurrency is enabled, + * an empty ext bkt cannot be put into free list + * immediately (as readers might be using it still). + * Hence freeing of the ext bkt is piggy-backed to + * freeing of the key index. + */ + h->ext_bkt_to_free[ret] = index; + else + rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index, + sizeof(uint32_t)); } - __hash_rw_writer_unlock(h); return ret; } @@ -1301,6 +1621,58 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position, return 0; } +int +rte_hash_free_key_with_position(const struct rte_hash *h, + const int32_t position) +{ + /* Key index where key is stored, adding the first dummy index */ + uint32_t key_idx = position + 1; + + RETURN_IF_TRUE(((h == NULL) || (key_idx == EMPTY_SLOT)), -EINVAL); + + unsigned int lcore_id, n_slots; + struct lcore_cache *cached_free_slots; + const uint32_t total_entries = h->use_local_cache ? + h->entries + (RTE_MAX_LCORE - 1) * (LCORE_CACHE_SIZE - 1) + 1 + : h->entries + 1; + + /* Out of bounds */ + if (key_idx >= total_entries) + return -EINVAL; + if (h->ext_table_support && h->readwrite_concur_lf_support) { + uint32_t index = h->ext_bkt_to_free[position]; + if (index) { + /* Recycle empty ext bkt to free list. */ + rte_ring_sp_enqueue_elem(h->free_ext_bkts, &index, + sizeof(uint32_t)); + h->ext_bkt_to_free[position] = 0; + } + } + + if (h->use_local_cache) { + lcore_id = rte_lcore_id(); + cached_free_slots = &h->local_free_slots[lcore_id]; + /* Cache full, need to free it. */ + if (cached_free_slots->len == LCORE_CACHE_SIZE) { + /* Need to enqueue the free slots in global ring. */ + n_slots = rte_ring_mp_enqueue_burst_elem(h->free_slots, + cached_free_slots->objs, + sizeof(uint32_t), + LCORE_CACHE_SIZE, NULL); + RETURN_IF_TRUE((n_slots == 0), -EFAULT); + cached_free_slots->len -= n_slots; + } + /* Put index of new free slot in cache. */ + cached_free_slots->objs[cached_free_slots->len] = key_idx; + cached_free_slots->len++; + } else { + rte_ring_sp_enqueue_elem(h->free_slots, &key_idx, + sizeof(uint32_t)); + } + + return 0; +} + static inline void compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, const struct rte_hash_bucket *prim_bkt, @@ -1312,7 +1684,7 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, /* For match mask the first bit of every two bits indicates the match */ switch (sig_cmp_fn) { -#ifdef RTE_MACHINE_CPUFLAG_SSE2 +#if defined(RTE_MACHINE_CPUFLAG_SSE2) case RTE_HASH_COMPARE_SSE: /* Compare all signatures in the bucket */ *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16( @@ -1325,6 +1697,24 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, (__m128i const *)sec_bkt->sig_current), _mm_set1_epi16(sig))); break; +#elif defined(RTE_MACHINE_CPUFLAG_NEON) + case RTE_HASH_COMPARE_NEON: { + uint16x8_t vmat, vsig, x; + int16x8_t shift = {-15, -13, -11, -9, -7, -5, -3, -1}; + + vsig = vld1q_dup_u16((uint16_t const *)&sig); + /* Compare all signatures in the primary bucket */ + vmat = vceqq_u16(vsig, + vld1q_u16((uint16_t const *)prim_bkt->sig_current)); + x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift); + *prim_hash_matches = (uint32_t)(vaddvq_u16(x)); + /* Compare all signatures in the secondary bucket */ + vmat = vceqq_u16(vsig, + vld1q_u16((uint16_t const *)sec_bkt->sig_current)); + x = vshlq_u16(vandq_u16(vmat, vdupq_n_u16(0x8000)), shift); + *sec_hash_matches = (uint32_t)(vaddvq_u16(x)); + } + break; #endif default: for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { @@ -1336,75 +1726,34 @@ compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches, } } -#define PREFETCH_OFFSET 4 static inline void -__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, - int32_t num_keys, int32_t *positions, - uint64_t *hit_mask, void *data[]) +__bulk_lookup_l(const struct rte_hash *h, const void **keys, + const struct rte_hash_bucket **primary_bkt, + const struct rte_hash_bucket **secondary_bkt, + uint16_t *sig, int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) { uint64_t hits = 0; int32_t i; int32_t ret; - uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX]; - uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; - uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; - uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; - const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; - const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; struct rte_hash_bucket *cur_bkt, *next_bkt; - /* Prefetch first keys */ - for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++) - rte_prefetch0(keys[i]); - - /* - * Prefetch rest of the keys, calculate primary and - * secondary bucket and prefetch them - */ - for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) { - rte_prefetch0(keys[i + PREFETCH_OFFSET]); - - prim_hash[i] = rte_hash_hash(h, keys[i]); - - sig[i] = get_short_sig(prim_hash[i]); - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); - - primary_bkt[i] = &h->buckets[prim_index[i]]; - secondary_bkt[i] = &h->buckets[sec_index[i]]; - - rte_prefetch0(primary_bkt[i]); - rte_prefetch0(secondary_bkt[i]); - } - - /* Calculate and prefetch rest of the buckets */ - for (; i < num_keys; i++) { - prim_hash[i] = rte_hash_hash(h, keys[i]); - - sig[i] = get_short_sig(prim_hash[i]); - prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); - sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); - - primary_bkt[i] = &h->buckets[prim_index[i]]; - secondary_bkt[i] = &h->buckets[sec_index[i]]; - - rte_prefetch0(primary_bkt[i]); - rte_prefetch0(secondary_bkt[i]); - } - __hash_rw_reader_lock(h); + /* Compare signatures and prefetch key slot of first hit */ for (i = 0; i < num_keys; i++) { compare_signatures(&prim_hitmask[i], &sec_hitmask[i], - primary_bkt[i], secondary_bkt[i], - sig[i], h->sig_cmp_fn); + primary_bkt[i], secondary_bkt[i], + sig[i], h->sig_cmp_fn); if (prim_hitmask[i]) { uint32_t first_hit = - __builtin_ctzl(prim_hitmask[i]) >> 1; - uint32_t key_idx = primary_bkt[i]->key_idx[first_hit]; + __builtin_ctzl(prim_hitmask[i]) + >> 1; + uint32_t key_idx = + primary_bkt[i]->key_idx[first_hit]; const struct rte_hash_key *key_slot = (const struct rte_hash_key *)( (const char *)h->key_store + @@ -1415,8 +1764,10 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, if (sec_hitmask[i]) { uint32_t first_hit = - __builtin_ctzl(sec_hitmask[i]) >> 1; - uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit]; + __builtin_ctzl(sec_hitmask[i]) + >> 1; + uint32_t key_idx = + secondary_bkt[i]->key_idx[first_hit]; const struct rte_hash_key *key_slot = (const struct rte_hash_key *)( (const char *)h->key_store + @@ -1430,18 +1781,22 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, positions[i] = -ENOENT; while (prim_hitmask[i]) { uint32_t hit_index = - __builtin_ctzl(prim_hitmask[i]) >> 1; - - uint32_t key_idx = primary_bkt[i]->key_idx[hit_index]; + __builtin_ctzl(prim_hitmask[i]) + >> 1; + uint32_t key_idx = + primary_bkt[i]->key_idx[hit_index]; const struct rte_hash_key *key_slot = (const struct rte_hash_key *)( (const char *)h->key_store + key_idx * h->key_entry_size); + /* * If key index is 0, do not compare key, * as it is checking the dummy slot */ - if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) { + if (!!key_idx & + !rte_hash_cmp_eq( + key_slot->key, keys[i], h)) { if (data != NULL) data[i] = key_slot->pdata; @@ -1454,19 +1809,23 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, while (sec_hitmask[i]) { uint32_t hit_index = - __builtin_ctzl(sec_hitmask[i]) >> 1; - - uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index]; + __builtin_ctzl(sec_hitmask[i]) + >> 1; + uint32_t key_idx = + secondary_bkt[i]->key_idx[hit_index]; const struct rte_hash_key *key_slot = (const struct rte_hash_key *)( (const char *)h->key_store + key_idx * h->key_entry_size); + /* * If key index is 0, do not compare key, * as it is checking the dummy slot */ - if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) { + if (!!key_idx & + !rte_hash_cmp_eq( + key_slot->key, keys[i], h)) { if (data != NULL) data[i] = key_slot->pdata; @@ -1476,7 +1835,6 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, } sec_hitmask[i] &= ~(3ULL << (hit_index << 1)); } - next_key: continue; } @@ -1496,10 +1854,10 @@ next_key: next_bkt = secondary_bkt[i]->next; FOR_EACH_BUCKET(cur_bkt, next_bkt) { if (data != NULL) - ret = search_one_bucket(h, keys[i], + ret = search_one_bucket_l(h, keys[i], sig[i], &data[i], cur_bkt); else - ret = search_one_bucket(h, keys[i], + ret = search_one_bucket_l(h, keys[i], sig[i], NULL, cur_bkt); if (ret != -1) { positions[i] = ret; @@ -1515,6 +1873,284 @@ next_key: *hit_mask = hits; } +static inline void +__bulk_lookup_lf(const struct rte_hash *h, const void **keys, + const struct rte_hash_bucket **primary_bkt, + const struct rte_hash_bucket **secondary_bkt, + uint16_t *sig, int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + uint64_t hits = 0; + int32_t i; + int32_t ret; + uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; + uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0}; + struct rte_hash_bucket *cur_bkt, *next_bkt; + uint32_t cnt_b, cnt_a; + + for (i = 0; i < num_keys; i++) + positions[i] = -ENOENT; + + do { + /* Load the table change counter before the lookup + * starts. Acquire semantics will make sure that + * loads in compare_signatures are not hoisted. + */ + cnt_b = __atomic_load_n(h->tbl_chng_cnt, + __ATOMIC_ACQUIRE); + + /* Compare signatures and prefetch key slot of first hit */ + for (i = 0; i < num_keys; i++) { + compare_signatures(&prim_hitmask[i], &sec_hitmask[i], + primary_bkt[i], secondary_bkt[i], + sig[i], h->sig_cmp_fn); + + if (prim_hitmask[i]) { + uint32_t first_hit = + __builtin_ctzl(prim_hitmask[i]) + >> 1; + uint32_t key_idx = + primary_bkt[i]->key_idx[first_hit]; + const struct rte_hash_key *key_slot = + (const struct rte_hash_key *)( + (const char *)h->key_store + + key_idx * h->key_entry_size); + rte_prefetch0(key_slot); + continue; + } + + if (sec_hitmask[i]) { + uint32_t first_hit = + __builtin_ctzl(sec_hitmask[i]) + >> 1; + uint32_t key_idx = + secondary_bkt[i]->key_idx[first_hit]; + const struct rte_hash_key *key_slot = + (const struct rte_hash_key *)( + (const char *)h->key_store + + key_idx * h->key_entry_size); + rte_prefetch0(key_slot); + } + } + + /* Compare keys, first hits in primary first */ + for (i = 0; i < num_keys; i++) { + while (prim_hitmask[i]) { + uint32_t hit_index = + __builtin_ctzl(prim_hitmask[i]) + >> 1; + uint32_t key_idx = + __atomic_load_n( + &primary_bkt[i]->key_idx[hit_index], + __ATOMIC_ACQUIRE); + const struct rte_hash_key *key_slot = + (const struct rte_hash_key *)( + (const char *)h->key_store + + key_idx * h->key_entry_size); + + /* + * If key index is 0, do not compare key, + * as it is checking the dummy slot + */ + if (!!key_idx & + !rte_hash_cmp_eq( + key_slot->key, keys[i], h)) { + if (data != NULL) + data[i] = __atomic_load_n( + &key_slot->pdata, + __ATOMIC_ACQUIRE); + + hits |= 1ULL << i; + positions[i] = key_idx - 1; + goto next_key; + } + prim_hitmask[i] &= ~(3ULL << (hit_index << 1)); + } + + while (sec_hitmask[i]) { + uint32_t hit_index = + __builtin_ctzl(sec_hitmask[i]) + >> 1; + uint32_t key_idx = + __atomic_load_n( + &secondary_bkt[i]->key_idx[hit_index], + __ATOMIC_ACQUIRE); + const struct rte_hash_key *key_slot = + (const struct rte_hash_key *)( + (const char *)h->key_store + + key_idx * h->key_entry_size); + + /* + * If key index is 0, do not compare key, + * as it is checking the dummy slot + */ + + if (!!key_idx & + !rte_hash_cmp_eq( + key_slot->key, keys[i], h)) { + if (data != NULL) + data[i] = __atomic_load_n( + &key_slot->pdata, + __ATOMIC_ACQUIRE); + + hits |= 1ULL << i; + positions[i] = key_idx - 1; + goto next_key; + } + sec_hitmask[i] &= ~(3ULL << (hit_index << 1)); + } +next_key: + continue; + } + + /* all found, do not need to go through ext bkt */ + if (hits == ((1ULL << num_keys) - 1)) { + if (hit_mask != NULL) + *hit_mask = hits; + return; + } + /* need to check ext buckets for match */ + if (h->ext_table_support) { + for (i = 0; i < num_keys; i++) { + if ((hits & (1ULL << i)) != 0) + continue; + next_bkt = secondary_bkt[i]->next; + FOR_EACH_BUCKET(cur_bkt, next_bkt) { + if (data != NULL) + ret = search_one_bucket_lf(h, + keys[i], sig[i], + &data[i], cur_bkt); + else + ret = search_one_bucket_lf(h, + keys[i], sig[i], + NULL, cur_bkt); + if (ret != -1) { + positions[i] = ret; + hits |= 1ULL << i; + break; + } + } + } + } + /* The loads of sig_current in compare_signatures + * should not move below the load from tbl_chng_cnt. + */ + __atomic_thread_fence(__ATOMIC_ACQUIRE); + /* Re-read the table change counter to check if the + * table has changed during search. If yes, re-do + * the search. + * This load should not get hoisted. The load + * acquires on cnt_b, primary key index and secondary + * key index will make sure that it does not get + * hoisted. + */ + cnt_a = __atomic_load_n(h->tbl_chng_cnt, + __ATOMIC_ACQUIRE); + } while (cnt_b != cnt_a); + + if (hit_mask != NULL) + *hit_mask = hits; +} + +#define PREFETCH_OFFSET 4 +static inline void +__bulk_lookup_prefetching_loop(const struct rte_hash *h, + const void **keys, int32_t num_keys, + uint16_t *sig, + const struct rte_hash_bucket **primary_bkt, + const struct rte_hash_bucket **secondary_bkt) +{ + int32_t i; + uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; + + /* Prefetch first keys */ + for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++) + rte_prefetch0(keys[i]); + + /* + * Prefetch rest of the keys, calculate primary and + * secondary bucket and prefetch them + */ + for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) { + rte_prefetch0(keys[i + PREFETCH_OFFSET]); + + prim_hash[i] = rte_hash_hash(h, keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } + + /* Calculate and prefetch rest of the buckets */ + for (; i < num_keys; i++) { + prim_hash[i] = rte_hash_hash(h, keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } +} + + +static inline void +__rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + __bulk_lookup_prefetching_loop(h, keys, num_keys, sig, + primary_bkt, secondary_bkt); + + __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + __bulk_lookup_prefetching_loop(h, keys, num_keys, sig, + primary_bkt, secondary_bkt); + + __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + if (h->readwrite_concur_lf_support) + __rte_hash_lookup_bulk_lf(h, keys, num_keys, positions, + hit_mask, data); + else + __rte_hash_lookup_bulk_l(h, keys, num_keys, positions, + hit_mask, data); +} + int rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, uint32_t num_keys, int32_t *positions) @@ -1543,6 +2179,123 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys, return __builtin_popcountl(*hit_mask); } + +static inline void +__rte_hash_lookup_with_hash_bulk_l(const struct rte_hash *h, + const void **keys, hash_sig_t *prim_hash, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + int32_t i; + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + /* + * Prefetch keys, calculate primary and + * secondary bucket and prefetch them + */ + for (i = 0; i < num_keys; i++) { + rte_prefetch0(keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } + + __bulk_lookup_l(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_with_hash_bulk_lf(const struct rte_hash *h, + const void **keys, hash_sig_t *prim_hash, + int32_t num_keys, int32_t *positions, + uint64_t *hit_mask, void *data[]) +{ + int32_t i; + uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX]; + uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX]; + + /* + * Prefetch keys, calculate primary and + * secondary bucket and prefetch them + */ + for (i = 0; i < num_keys; i++) { + rte_prefetch0(keys[i]); + + sig[i] = get_short_sig(prim_hash[i]); + prim_index[i] = get_prim_bucket_index(h, prim_hash[i]); + sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]); + + primary_bkt[i] = &h->buckets[prim_index[i]]; + secondary_bkt[i] = &h->buckets[sec_index[i]]; + + rte_prefetch0(primary_bkt[i]); + rte_prefetch0(secondary_bkt[i]); + } + + __bulk_lookup_lf(h, keys, primary_bkt, secondary_bkt, sig, num_keys, + positions, hit_mask, data); +} + +static inline void +__rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys, + hash_sig_t *prim_hash, int32_t num_keys, + int32_t *positions, uint64_t *hit_mask, void *data[]) +{ + if (h->readwrite_concur_lf_support) + __rte_hash_lookup_with_hash_bulk_lf(h, keys, prim_hash, + num_keys, positions, hit_mask, data); + else + __rte_hash_lookup_with_hash_bulk_l(h, keys, prim_hash, + num_keys, positions, hit_mask, data); +} + +int +rte_hash_lookup_with_hash_bulk(const struct rte_hash *h, const void **keys, + hash_sig_t *sig, uint32_t num_keys, int32_t *positions) +{ + RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || + (sig == NULL) || (num_keys == 0) || + (num_keys > RTE_HASH_LOOKUP_BULK_MAX) || + (positions == NULL)), -EINVAL); + + __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys, + positions, NULL, NULL); + return 0; +} + +int +rte_hash_lookup_with_hash_bulk_data(const struct rte_hash *h, + const void **keys, hash_sig_t *sig, + uint32_t num_keys, uint64_t *hit_mask, void *data[]) +{ + RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || + (sig == NULL) || (num_keys == 0) || + (num_keys > RTE_HASH_LOOKUP_BULK_MAX) || + (hit_mask == NULL)), -EINVAL); + + int32_t positions[num_keys]; + + __rte_hash_lookup_with_hash_bulk(h, keys, sig, num_keys, + positions, hit_mask, data); + + /* Return number of hits */ + return __builtin_popcountl(*hit_mask); +} + int32_t rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next) { @@ -1564,7 +2317,8 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32 idx = *next % RTE_HASH_BUCKET_ENTRIES; /* If current position is empty, go to the next one */ - while ((position = h->buckets[bucket_idx].key_idx[idx]) == EMPTY_SLOT) { + while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx], + __ATOMIC_ACQUIRE)) == EMPTY_SLOT) { (*next)++; /* End of table */ if (*next == total_entries_main)