+static inline struct mlx5_list_entry *
+_mlx5_list_register(struct mlx5_list_inconst *l_inconst,
+ struct mlx5_list_const *l_const,
+ void *ctx, int lcore_index)
+{
+ struct mlx5_list_entry *entry = NULL, *local_entry;
+ volatile uint32_t prev_gen_cnt = 0;
+ MLX5_ASSERT(l_inconst);
+ if (unlikely(!l_inconst->cache[lcore_index])) {
+ l_inconst->cache[lcore_index] = mlx5_malloc(0,
+ sizeof(struct mlx5_list_cache),
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ if (!l_inconst->cache[lcore_index]) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ l_inconst->cache[lcore_index]->inv_cnt = 0;
+ LIST_INIT(&l_inconst->cache[lcore_index]->h);
+ }
+ /* 0. Free entries that was invalidated by other lcores. */
+ __list_cache_clean(l_inconst, l_const, lcore_index);
+ /* 1. Lookup in local cache. */
+ local_entry = __list_lookup(l_inconst, l_const, lcore_index, ctx, true);
+ if (local_entry)
+ return local_entry;
+ if (l_const->lcores_share) {
+ /* 2. Lookup with read lock on global list, reuse if found. */
+ rte_rwlock_read_lock(&l_inconst->lock);
+ entry = __list_lookup(l_inconst, l_const, MLX5_LIST_GLOBAL,
+ ctx, true);
+ if (likely(entry)) {
+ rte_rwlock_read_unlock(&l_inconst->lock);
+ return mlx5_list_cache_insert(l_inconst, l_const,
+ lcore_index,
+ entry, ctx);
+ }
+ prev_gen_cnt = l_inconst->gen_cnt;
+ rte_rwlock_read_unlock(&l_inconst->lock);
+ }
+ /* 3. Prepare new entry for global list and for cache. */
+ entry = l_const->cb_create(l_const->ctx, ctx);
+ if (unlikely(!entry))
+ return NULL;
+ entry->ref_cnt = 1u;
+ if (!l_const->lcores_share) {
+ entry->lcore_idx = (uint32_t)lcore_index;
+ LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
+ entry, next);
+ __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
+ l_const->name, lcore_index,
+ (void *)entry, entry->ref_cnt);
+ return entry;
+ }
+ local_entry = l_const->cb_clone(l_const->ctx, entry, ctx);
+ if (unlikely(!local_entry)) {
+ l_const->cb_remove(l_const->ctx, entry);
+ return NULL;
+ }
+ local_entry->ref_cnt = 1u;
+ local_entry->gentry = entry;
+ local_entry->lcore_idx = (uint32_t)lcore_index;
+ rte_rwlock_write_lock(&l_inconst->lock);
+ /* 4. Make sure the same entry was not created before the write lock. */
+ if (unlikely(prev_gen_cnt != l_inconst->gen_cnt)) {
+ struct mlx5_list_entry *oentry = __list_lookup(l_inconst,
+ l_const,
+ MLX5_LIST_GLOBAL,
+ ctx, true);
+
+ if (unlikely(oentry)) {
+ /* 4.5. Found real race!!, reuse the old entry. */
+ rte_rwlock_write_unlock(&l_inconst->lock);
+ l_const->cb_remove(l_const->ctx, entry);
+ l_const->cb_clone_free(l_const->ctx, local_entry);
+ return mlx5_list_cache_insert(l_inconst, l_const,
+ lcore_index,
+ oentry, ctx);
+ }
+ }
+ /* 5. Update lists. */
+ LIST_INSERT_HEAD(&l_inconst->cache[MLX5_LIST_GLOBAL]->h, entry, next);
+ l_inconst->gen_cnt++;
+ rte_rwlock_write_unlock(&l_inconst->lock);
+ LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
+ __atomic_add_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
+ (void *)entry, entry->ref_cnt);
+ return local_entry;
+}
+
+struct mlx5_list_entry *
+mlx5_list_register(struct mlx5_list *list, void *ctx)
+{
+ struct mlx5_list_entry *entry;
+ int lcore_index = rte_lcore_index(rte_lcore_id());
+
+ if (unlikely(lcore_index == -1)) {
+ lcore_index = MLX5_LIST_NLCORE;
+ rte_spinlock_lock(&list->l_const.lcore_lock);
+ }
+ entry = _mlx5_list_register(&list->l_inconst, &list->l_const, ctx,
+ lcore_index);
+ if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+ rte_spinlock_unlock(&list->l_const.lcore_lock);
+ return entry;
+}
+
+static inline int
+_mlx5_list_unregister(struct mlx5_list_inconst *l_inconst,
+ struct mlx5_list_const *l_const,
+ struct mlx5_list_entry *entry,
+ int lcore_idx)
+{
+ struct mlx5_list_entry *gentry = entry->gentry;
+
+ if (__atomic_sub_fetch(&entry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
+ return 1;
+ if (entry->lcore_idx == (uint32_t)lcore_idx) {
+ LIST_REMOVE(entry, next);
+ if (l_const->lcores_share)
+ l_const->cb_clone_free(l_const->ctx, entry);
+ else
+ l_const->cb_remove(l_const->ctx, entry);
+ } else if (likely(lcore_idx != -1)) {
+ __atomic_add_fetch(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+ 1, __ATOMIC_RELAXED);
+ } else {
+ return 0;
+ }
+ if (!l_const->lcores_share) {
+ __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
+ l_const->name, (void *)entry);
+ return 0;
+ }
+ if (__atomic_sub_fetch(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) != 0)
+ return 1;
+ rte_rwlock_write_lock(&l_inconst->lock);
+ if (likely(gentry->ref_cnt == 0)) {
+ LIST_REMOVE(gentry, next);
+ rte_rwlock_write_unlock(&l_inconst->lock);
+ l_const->cb_remove(l_const->ctx, gentry);
+ __atomic_sub_fetch(&l_inconst->count, 1, __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
+ l_const->name, (void *)gentry);
+ return 0;
+ }
+ rte_rwlock_write_unlock(&l_inconst->lock);
+ return 1;
+}
+
+int
+mlx5_list_unregister(struct mlx5_list *list,
+ struct mlx5_list_entry *entry)
+{
+ int ret;
+ int lcore_index = rte_lcore_index(rte_lcore_id());
+
+ if (unlikely(lcore_index == -1)) {
+ lcore_index = MLX5_LIST_NLCORE;
+ rte_spinlock_lock(&list->l_const.lcore_lock);
+ }
+ ret = _mlx5_list_unregister(&list->l_inconst, &list->l_const, entry,
+ lcore_index);
+ if (unlikely(lcore_index == MLX5_LIST_NLCORE))
+ rte_spinlock_unlock(&list->l_const.lcore_lock);
+ return ret;
+
+}
+
+static void
+mlx5_list_uninit(struct mlx5_list_inconst *l_inconst,
+ struct mlx5_list_const *l_const)
+{
+ struct mlx5_list_entry *entry;
+ int i;
+
+ MLX5_ASSERT(l_inconst);
+ for (i = 0; i < MLX5_LIST_MAX; i++) {
+ if (!l_inconst->cache[i])
+ continue;
+ while (!LIST_EMPTY(&l_inconst->cache[i]->h)) {
+ entry = LIST_FIRST(&l_inconst->cache[i]->h);
+ LIST_REMOVE(entry, next);
+ if (i == MLX5_LIST_GLOBAL) {
+ l_const->cb_remove(l_const->ctx, entry);
+ DRV_LOG(DEBUG, "mlx5 list %s entry %p "
+ "destroyed.", l_const->name,
+ (void *)entry);
+ } else {
+ l_const->cb_clone_free(l_const->ctx, entry);
+ }
+ }
+ if (i != MLX5_LIST_GLOBAL)
+ mlx5_free(l_inconst->cache[i]);
+ }
+}
+
+void
+mlx5_list_destroy(struct mlx5_list *list)
+{
+ mlx5_list_uninit(&list->l_inconst, &list->l_const);
+ mlx5_free(list);
+}
+
+uint32_t
+mlx5_list_get_entry_num(struct mlx5_list *list)
+{
+ MLX5_ASSERT(list);
+ return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+}
+
+/********************* Hash List **********************/
+