#include "mlx5_utils.h"
+/********************* Hash List **********************/
+
+static struct mlx5_hlist_entry *
+mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused,
+ void *ctx __rte_unused)
+{
+ return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY);
+}
+
+static void
+mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused,
+ struct mlx5_hlist_entry *entry)
+{
+ mlx5_free(entry);
+}
+
struct mlx5_hlist *
-mlx5_hlist_create(const char *name, uint32_t size)
+mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size,
+ uint32_t flags, mlx5_hlist_create_cb cb_create,
+ mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove)
{
struct mlx5_hlist *h;
uint32_t act_size;
uint32_t alloc_size;
+ uint32_t i;
- if (!size)
+ if (!size || !cb_match || (!cb_create ^ !cb_remove))
return NULL;
/* Align to the next power of 2, 32bits integer is enough now. */
if (!rte_is_power_of_2(size)) {
act_size = size;
}
alloc_size = sizeof(struct mlx5_hlist) +
- sizeof(struct mlx5_hlist_head) * act_size;
+ sizeof(struct mlx5_hlist_bucket) * act_size;
/* Using zmalloc, then no need to initialize the heads. */
h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name);
h->table_sz = act_size;
h->mask = act_size - 1;
+ h->entry_sz = entry_size;
+ h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY);
+ h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST);
+ h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb;
+ h->cb_match = cb_match;
+ h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb;
+ for (i = 0; i < act_size; i++)
+ rte_rwlock_init(&h->buckets[i].lock);
DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.",
h->name, act_size);
return h;
}
-struct mlx5_hlist_entry *
-mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key)
+static struct mlx5_hlist_entry *
+__hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
+ void *ctx, bool reuse)
{
- uint32_t idx;
struct mlx5_hlist_head *first;
struct mlx5_hlist_entry *node;
MLX5_ASSERT(h);
- idx = rte_hash_crc_8byte(key, 0) & h->mask;
- first = &h->heads[idx];
+ first = &h->buckets[idx].head;
LIST_FOREACH(node, first, next) {
- if (node->key == key)
- return node;
+ if (!h->cb_match(h, node, key, ctx)) {
+ if (reuse) {
+ __atomic_add_fetch(&node->ref_cnt, 1,
+ __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "Hash list %s entry %p "
+ "reuse: %u.",
+ h->name, (void *)node, node->ref_cnt);
+ }
+ break;
+ }
}
- return NULL;
+ return node;
}
-int
-mlx5_hlist_insert(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
+static struct mlx5_hlist_entry *
+hlist_lookup(struct mlx5_hlist *h, uint64_t key, uint32_t idx,
+ void *ctx, bool reuse)
{
- uint32_t idx;
- struct mlx5_hlist_head *first;
struct mlx5_hlist_entry *node;
- MLX5_ASSERT(h && entry);
- idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
- first = &h->heads[idx];
- /* No need to reuse the lookup function. */
- LIST_FOREACH(node, first, next) {
- if (node->key == entry->key)
- return -EEXIST;
- }
- LIST_INSERT_HEAD(first, entry, next);
- return 0;
+ MLX5_ASSERT(h);
+ rte_rwlock_read_lock(&h->buckets[idx].lock);
+ node = __hlist_lookup(h, key, idx, ctx, reuse);
+ rte_rwlock_read_unlock(&h->buckets[idx].lock);
+ return node;
}
struct mlx5_hlist_entry *
-mlx5_hlist_lookup_ex(struct mlx5_hlist *h, uint64_t key,
- mlx5_hlist_match_callback_fn cb, void *ctx)
+mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx)
{
uint32_t idx;
- struct mlx5_hlist_head *first;
- struct mlx5_hlist_entry *node;
- MLX5_ASSERT(h && cb && ctx);
- idx = rte_hash_crc_8byte(key, 0) & h->mask;
- first = &h->heads[idx];
- LIST_FOREACH(node, first, next) {
- if (!cb(node, ctx))
- return node;
- }
- return NULL;
+ if (h->direct_key)
+ idx = (uint32_t)(key & h->mask);
+ else
+ idx = rte_hash_crc_8byte(key, 0) & h->mask;
+ return hlist_lookup(h, key, idx, ctx, false);
}
-int
-mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry,
- mlx5_hlist_match_callback_fn cb, void *ctx)
+struct mlx5_hlist_entry*
+mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx)
{
uint32_t idx;
struct mlx5_hlist_head *first;
- struct mlx5_hlist_entry *node;
+ struct mlx5_hlist_bucket *b;
+ struct mlx5_hlist_entry *entry;
+ uint32_t prev_gen_cnt = 0;
- MLX5_ASSERT(h && entry && cb && ctx);
- idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
- first = &h->heads[idx];
- /* No need to reuse the lookup function. */
- LIST_FOREACH(node, first, next) {
- if (!cb(node, ctx))
- return -EEXIST;
+ if (h->direct_key)
+ idx = (uint32_t)(key & h->mask);
+ else
+ idx = rte_hash_crc_8byte(key, 0) & h->mask;
+ MLX5_ASSERT(h);
+ b = &h->buckets[idx];
+ /* Use write lock directly for write-most list. */
+ if (!h->write_most) {
+ prev_gen_cnt = __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE);
+ entry = hlist_lookup(h, key, idx, ctx, true);
+ if (entry)
+ return entry;
}
+ rte_rwlock_write_lock(&b->lock);
+ /* Check if the list changed by other threads. */
+ if (h->write_most ||
+ prev_gen_cnt != __atomic_load_n(&b->gen_cnt, __ATOMIC_ACQUIRE)) {
+ entry = __hlist_lookup(h, key, idx, ctx, true);
+ if (entry)
+ goto done;
+ }
+ first = &b->head;
+ entry = h->cb_create(h, key, ctx);
+ if (!entry) {
+ rte_errno = ENOMEM;
+ DRV_LOG(DEBUG, "Can't allocate hash list %s entry.", h->name);
+ goto done;
+ }
+ entry->idx = idx;
+ entry->ref_cnt = 1;
LIST_INSERT_HEAD(first, entry, next);
- return 0;
+ __atomic_add_fetch(&b->gen_cnt, 1, __ATOMIC_ACQ_REL);
+ DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.",
+ h->name, (void *)entry, entry->ref_cnt);
+done:
+ rte_rwlock_write_unlock(&b->lock);
+ return entry;
}
-void
-mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
- struct mlx5_hlist_entry *entry)
+int
+mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry)
{
- MLX5_ASSERT(entry && entry->next.le_prev);
+ uint32_t idx = entry->idx;
+
+ rte_rwlock_write_lock(&h->buckets[idx].lock);
+ MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev);
+ DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.",
+ h->name, (void *)entry, entry->ref_cnt);
+ if (--entry->ref_cnt) {
+ rte_rwlock_write_unlock(&h->buckets[idx].lock);
+ return 1;
+ }
LIST_REMOVE(entry, next);
/* Set to NULL to get rid of removing action for more than once. */
entry->next.le_prev = NULL;
+ h->cb_remove(h, entry);
+ rte_rwlock_write_unlock(&h->buckets[idx].lock);
+ DRV_LOG(DEBUG, "Hash list %s entry %p removed.",
+ h->name, (void *)entry);
+ return 0;
}
void
-mlx5_hlist_destroy(struct mlx5_hlist *h,
- mlx5_hlist_destroy_callback_fn cb, void *ctx)
+mlx5_hlist_destroy(struct mlx5_hlist *h)
{
uint32_t idx;
struct mlx5_hlist_entry *entry;
MLX5_ASSERT(h);
for (idx = 0; idx < h->table_sz; ++idx) {
- /* no LIST_FOREACH_SAFE, using while instead */
- while (!LIST_EMPTY(&h->heads[idx])) {
- entry = LIST_FIRST(&h->heads[idx]);
+ /* No LIST_FOREACH_SAFE, using while instead. */
+ while (!LIST_EMPTY(&h->buckets[idx].head)) {
+ entry = LIST_FIRST(&h->buckets[idx].head);
LIST_REMOVE(entry, next);
/*
* The owner of whole element which contains data entry
* the beginning). Or else the default free function
* will be used.
*/
- if (cb)
- cb(entry, ctx);
- else
- mlx5_free(entry);
+ h->cb_remove(h, entry);
}
}
mlx5_free(h);
}
+/********************* Cache list ************************/
+
+static struct mlx5_cache_entry *
+mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *ctx __rte_unused)
+{
+ return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
+}
+
+static void
+mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry)
+{
+ mlx5_free(entry);
+}
+
+int
+mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
+ uint32_t entry_size, void *ctx,
+ mlx5_cache_create_cb cb_create,
+ mlx5_cache_match_cb cb_match,
+ mlx5_cache_remove_cb cb_remove)
+{
+ MLX5_ASSERT(list);
+ if (!cb_match || (!cb_create ^ !cb_remove))
+ return -1;
+ if (name)
+ snprintf(list->name, sizeof(list->name), "%s", name);
+ list->entry_sz = entry_size;
+ list->ctx = ctx;
+ list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
+ list->cb_match = cb_match;
+ list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
+ rte_rwlock_init(&list->lock);
+ DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
+ LIST_INIT(&list->head);
+ return 0;
+}
+
+static struct mlx5_cache_entry *
+__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+{
+ struct mlx5_cache_entry *entry;
+
+ LIST_FOREACH(entry, &list->head, next) {
+ if (list->cb_match(list, entry, ctx))
+ continue;
+ if (reuse) {
+ __atomic_add_fetch(&entry->ref_cnt, 1,
+ __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
+ list->name, (void *)entry, entry->ref_cnt);
+ }
+ break;
+ }
+ return entry;
+}
+
+static struct mlx5_cache_entry *
+cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+{
+ struct mlx5_cache_entry *entry;
+
+ rte_rwlock_read_lock(&list->lock);
+ entry = __cache_lookup(list, ctx, reuse);
+ rte_rwlock_read_unlock(&list->lock);
+ return entry;
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
+{
+ return cache_lookup(list, ctx, false);
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
+{
+ struct mlx5_cache_entry *entry;
+ uint32_t prev_gen_cnt = 0;
+
+ MLX5_ASSERT(list);
+ prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
+ /* Lookup with read lock, reuse if found. */
+ entry = cache_lookup(list, ctx, true);
+ if (entry)
+ return entry;
+ /* Not found, append with write lock - block read from other threads. */
+ rte_rwlock_write_lock(&list->lock);
+ /* If list changed by other threads before lock, search again. */
+ if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
+ /* Lookup and reuse w/o read lock. */
+ entry = __cache_lookup(list, ctx, true);
+ if (entry)
+ goto done;
+ }
+ entry = list->cb_create(list, entry, ctx);
+ if (!entry) {
+ DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
+ list->name, (void *)entry);
+ goto done;
+ }
+ entry->ref_cnt = 1;
+ LIST_INSERT_HEAD(&list->head, entry, next);
+ __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
+ __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
+ DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
+ list->name, (void *)entry, entry->ref_cnt);
+done:
+ rte_rwlock_write_unlock(&list->lock);
+ return entry;
+}
+
+int
+mlx5_cache_unregister(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry)
+{
+ rte_rwlock_write_lock(&list->lock);
+ MLX5_ASSERT(entry && entry->next.le_prev);
+ DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
+ list->name, (void *)entry, entry->ref_cnt);
+ if (--entry->ref_cnt) {
+ rte_rwlock_write_unlock(&list->lock);
+ return 1;
+ }
+ __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
+ __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
+ LIST_REMOVE(entry, next);
+ list->cb_remove(list, entry);
+ rte_rwlock_write_unlock(&list->lock);
+ DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
+ list->name, (void *)entry);
+ return 0;
+}
+
+void
+mlx5_cache_list_destroy(struct mlx5_cache_list *list)
+{
+ struct mlx5_cache_entry *entry;
+
+ MLX5_ASSERT(list);
+ /* no LIST_FOREACH_SAFE, using while instead */
+ while (!LIST_EMPTY(&list->head)) {
+ entry = LIST_FIRST(&list->head);
+ LIST_REMOVE(entry, next);
+ list->cb_remove(list, entry);
+ DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
+ list->name, (void *)entry);
+ }
+ memset(list, 0, sizeof(*list));
+}
+
+uint32_t
+mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
+{
+ MLX5_ASSERT(list);
+ return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
+}
+
+/********************* Indexed pool **********************/
+
static inline void
mlx5_ipool_lock(struct mlx5_indexed_pool *pool)
{
MLX5_ASSERT(iidx < mlx5_trunk_size_get(pool, trunk->idx));
rte_bitmap_clear(trunk->bmp, iidx);
p = &trunk->data[iidx * pool->cfg.size];
+ /*
+ * The ipool index should grow continually from small to big,
+ * some features as metering only accept limited bits of index.
+ * Random index with MSB set may be rejected.
+ */
iidx += mlx5_trunk_idx_offset_get(pool, trunk->idx);
iidx += 1; /* non-zero index. */
trunk->free--;