- uint32_t idx;
- struct mlx5_hlist_head *first;
- struct mlx5_hlist_entry *node;
-
- MLX5_ASSERT(h && entry && cb && ctx);
- idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
- first = &h->heads[idx];
- /* No need to reuse the lookup function. */
- LIST_FOREACH(node, first, next) {
- if (!cb(node, ctx))
- return -EEXIST;
+ struct mlx5_cache_entry *entry;
+
+ rte_rwlock_read_lock(&list->lock);
+ entry = __cache_lookup(list, ctx, reuse);
+ rte_rwlock_read_unlock(&list->lock);
+ return entry;
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
+{
+ return cache_lookup(list, ctx, false);
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
+{
+ struct mlx5_cache_entry *entry;
+ uint32_t prev_gen_cnt = 0;
+
+ MLX5_ASSERT(list);
+ prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
+ /* Lookup with read lock, reuse if found. */
+ entry = cache_lookup(list, ctx, true);
+ if (entry)
+ return entry;
+ /* Not found, append with write lock - block read from other threads. */
+ rte_rwlock_write_lock(&list->lock);
+ /* If list changed by other threads before lock, search again. */
+ if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
+ /* Lookup and reuse w/o read lock. */
+ entry = __cache_lookup(list, ctx, true);
+ if (entry)
+ goto done;