#include "mlx5_utils.h"
-
-/********************* mlx5 list ************************/
-
-int
-mlx5_list_create(struct mlx5_list *list, const char *name, void *ctx,
- mlx5_list_create_cb cb_create,
- mlx5_list_match_cb cb_match,
- mlx5_list_remove_cb cb_remove,
- mlx5_list_clone_cb cb_clone,
- mlx5_list_clone_free_cb cb_clone_free)
-{
- int i;
-
- MLX5_ASSERT(list);
- if (!cb_match || !cb_create || !cb_remove || !cb_clone ||
- !cb_clone_free)
- return -1;
- if (name)
- snprintf(list->name, sizeof(list->name), "%s", name);
- list->ctx = ctx;
- list->cb_create = cb_create;
- list->cb_match = cb_match;
- list->cb_remove = cb_remove;
- list->cb_clone = cb_clone;
- list->cb_clone_free = cb_clone_free;
- rte_rwlock_init(&list->lock);
- DRV_LOG(DEBUG, "mlx5 list %s initialized.", list->name);
- for (i = 0; i <= RTE_MAX_LCORE; i++)
- LIST_INIT(&list->cache[i].h);
- return 0;
-}
-
-static struct mlx5_list_entry *
-__list_lookup(struct mlx5_list *list, int lcore_index, void *ctx, bool reuse)
-{
- struct mlx5_list_entry *entry = LIST_FIRST(&list->cache[lcore_index].h);
- uint32_t ret;
-
- while (entry != NULL) {
- if (list->cb_match(list, entry, ctx) == 0) {
- if (reuse) {
- ret = __atomic_add_fetch(&entry->ref_cnt, 1,
- __ATOMIC_ACQUIRE) - 1;
- DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
- list->name, (void *)entry,
- entry->ref_cnt);
- } else if (lcore_index < RTE_MAX_LCORE) {
- ret = __atomic_load_n(&entry->ref_cnt,
- __ATOMIC_ACQUIRE);
- }
- if (likely(ret != 0 || lcore_index == RTE_MAX_LCORE))
- return entry;
- if (reuse && ret == 0)
- entry->ref_cnt--; /* Invalid entry. */
- }
- entry = LIST_NEXT(entry, next);
- }
- return NULL;
-}
-
-struct mlx5_list_entry *
-mlx5_list_lookup(struct mlx5_list *list, void *ctx)
-{
- struct mlx5_list_entry *entry = NULL;
- int i;
-
- rte_rwlock_read_lock(&list->lock);
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- entry = __list_lookup(list, i, ctx, false);
- if (entry)
- break;
- }
- rte_rwlock_read_unlock(&list->lock);
- return entry;
-}
-
-static struct mlx5_list_entry *
-mlx5_list_cache_insert(struct mlx5_list *list, int lcore_index,
- struct mlx5_list_entry *gentry, void *ctx)
-{
- struct mlx5_list_entry *lentry = list->cb_clone(list, gentry, ctx);
-
- if (unlikely(!lentry))
- return NULL;
- lentry->ref_cnt = 1u;
- lentry->gentry = gentry;
- lentry->lcore_idx = (uint32_t)lcore_index;
- LIST_INSERT_HEAD(&list->cache[lcore_index].h, lentry, next);
- return lentry;
-}
-
-static void
-__list_cache_clean(struct mlx5_list *list, int lcore_index)
-{
- struct mlx5_list_cache *c = &list->cache[lcore_index];
- struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
- uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
- __ATOMIC_RELAXED);
-
- while (inv_cnt != 0 && entry != NULL) {
- struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
-
- if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
- LIST_REMOVE(entry, next);
- list->cb_clone_free(list, entry);
- inv_cnt--;
- }
- entry = nentry;
- }
-}
-
-struct mlx5_list_entry *
-mlx5_list_register(struct mlx5_list *list, void *ctx)
-{
- struct mlx5_list_entry *entry, *local_entry;
- volatile uint32_t prev_gen_cnt = 0;
- int lcore_index = rte_lcore_index(rte_lcore_id());
-
- MLX5_ASSERT(list);
- MLX5_ASSERT(lcore_index < RTE_MAX_LCORE);
- if (unlikely(lcore_index == -1)) {
- rte_errno = ENOTSUP;
- return NULL;
- }
- /* 0. Free entries that was invalidated by other lcores. */
- __list_cache_clean(list, lcore_index);
- /* 1. Lookup in local cache. */
- local_entry = __list_lookup(list, lcore_index, ctx, true);
- if (local_entry)
- return local_entry;
- /* 2. Lookup with read lock on global list, reuse if found. */
- rte_rwlock_read_lock(&list->lock);
- entry = __list_lookup(list, RTE_MAX_LCORE, ctx, true);
- if (likely(entry)) {
- rte_rwlock_read_unlock(&list->lock);
- return mlx5_list_cache_insert(list, lcore_index, entry, ctx);
- }
- prev_gen_cnt = list->gen_cnt;
- rte_rwlock_read_unlock(&list->lock);
- /* 3. Prepare new entry for global list and for cache. */
- entry = list->cb_create(list, entry, ctx);
- if (unlikely(!entry))
- return NULL;
- local_entry = list->cb_clone(list, entry, ctx);
- if (unlikely(!local_entry)) {
- list->cb_remove(list, entry);
- return NULL;
- }
- entry->ref_cnt = 1u;
- local_entry->ref_cnt = 1u;
- local_entry->gentry = entry;
- local_entry->lcore_idx = (uint32_t)lcore_index;
- rte_rwlock_write_lock(&list->lock);
- /* 4. Make sure the same entry was not created before the write lock. */
- if (unlikely(prev_gen_cnt != list->gen_cnt)) {
- struct mlx5_list_entry *oentry = __list_lookup(list,
- RTE_MAX_LCORE,
- ctx, true);
-
- if (unlikely(oentry)) {
- /* 4.5. Found real race!!, reuse the old entry. */
- rte_rwlock_write_unlock(&list->lock);
- list->cb_remove(list, entry);
- list->cb_clone_free(list, local_entry);
- return mlx5_list_cache_insert(list, lcore_index, oentry,
- ctx);
- }
- }
- /* 5. Update lists. */
- LIST_INSERT_HEAD(&list->cache[RTE_MAX_LCORE].h, entry, next);
- list->gen_cnt++;
- rte_rwlock_write_unlock(&list->lock);
- LIST_INSERT_HEAD(&list->cache[lcore_index].h, local_entry, next);
- __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
- DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", list->name,
- (void *)entry, entry->ref_cnt);
- return local_entry;
-}
-
-int
-mlx5_list_unregister(struct mlx5_list *list,
- struct mlx5_list_entry *entry)
-{
- struct mlx5_list_entry *gentry = entry->gentry;
- int lcore_idx;
-
- if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)
- return 1;
- lcore_idx = rte_lcore_index(rte_lcore_id());
- MLX5_ASSERT(lcore_idx < RTE_MAX_LCORE);
- if (entry->lcore_idx == (uint32_t)lcore_idx) {
- LIST_REMOVE(entry, next);
- list->cb_clone_free(list, entry);
- } else if (likely(lcore_idx != -1)) {
- __atomic_add_fetch(&list->cache[entry->lcore_idx].inv_cnt, 1,
- __ATOMIC_RELAXED);
- } else {
- return 0;
- }
- if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_ACQUIRE) != 0)
- return 1;
- rte_rwlock_write_lock(&list->lock);
- if (likely(gentry->ref_cnt == 0)) {
- LIST_REMOVE(gentry, next);
- rte_rwlock_write_unlock(&list->lock);
- list->cb_remove(list, gentry);
- __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
- DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
- list->name, (void *)gentry);
- return 0;
- }
- rte_rwlock_write_unlock(&list->lock);
- return 1;
-}
-
-void
-mlx5_list_destroy(struct mlx5_list *list)
-{
- struct mlx5_list_entry *entry;
- int i;
-
- MLX5_ASSERT(list);
- for (i = 0; i <= RTE_MAX_LCORE; i++) {
- while (!LIST_EMPTY(&list->cache[i].h)) {
- entry = LIST_FIRST(&list->cache[i].h);
- LIST_REMOVE(entry, next);
- if (i == RTE_MAX_LCORE) {
- list->cb_remove(list, entry);
- DRV_LOG(DEBUG, "mlx5 list %s entry %p "
- "destroyed.", list->name,
- (void *)entry);
- } else {
- list->cb_clone_free(list, entry);
- }
- }
- }
- memset(list, 0, sizeof(*list));
-}
-
-uint32_t
-mlx5_list_get_entry_num(struct mlx5_list *list)
-{
- MLX5_ASSERT(list);
- return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
-}
-
/********************* Indexed pool **********************/
static inline void