struct lcore_cache {
unsigned len; /**< Cache len */
- void *objs[LCORE_CACHE_SIZE]; /**< Cache objects */
+ uint32_t objs[LCORE_CACHE_SIZE]; /**< Cache objects */
} __rte_cache_aligned;
/* Structure that stores key-value pair */
struct lcore_cache *local_free_slots;
/**< Local cache per lcore, storing some indexes of the free slots */
+ /* RCU config */
+ struct rte_hash_rcu_config *hash_rcu_cfg;
+ /**< HASH RCU QSBR configuration structure */
+ struct rte_rcu_qsbr_dq *dq; /**< RCU QSBR defer queue. */
+
/* Fields used in lookup */
uint32_t key_len __rte_cache_aligned;
rte_rwlock_t *readwrite_lock; /**< Read-write lock thread-safety. */
struct rte_hash_bucket *buckets_ext; /**< Extra buckets array */
struct rte_ring *free_ext_bkts; /**< Ring of indexes of free buckets */
+ /* Stores index of an empty ext bkt to be recycled on calling
+ * rte_hash_del_xxx APIs. When lock free read-write concurrency is
+ * enabled, an empty ext bkt cannot be put into free list immediately
+ * (as readers might be using it still). Hence freeing of the ext bkt
+ * is piggy-backed to freeing of the key index.
+ */
+ uint32_t *ext_bkt_to_free;
uint32_t *tbl_chng_cnt;
/**< Indicates if the hash table changed from last read. */
} __rte_cache_aligned;
int prev_slot; /* Parent(slot) in search path */
};
+/** @internal Default RCU defer queue entries to reclaim in one go. */
+#define RTE_HASH_RCU_DQ_RECLAIM_MAX 16
+
#endif