mlx5_free(h);
}
+/********************* Cache list ************************/
+
+static struct mlx5_cache_entry *
+mlx5_clist_default_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *ctx __rte_unused)
+{
+ return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY);
+}
+
+static void
+mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused,
+ struct mlx5_cache_entry *entry)
+{
+ mlx5_free(entry);
+}
+
+int
+mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name,
+ uint32_t entry_size, void *ctx,
+ mlx5_cache_create_cb cb_create,
+ mlx5_cache_match_cb cb_match,
+ mlx5_cache_remove_cb cb_remove)
+{
+ MLX5_ASSERT(list);
+ if (!cb_match || (!cb_create ^ !cb_remove))
+ return -1;
+ if (name)
+ snprintf(list->name, sizeof(list->name), "%s", name);
+ list->entry_sz = entry_size;
+ list->ctx = ctx;
+ list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb;
+ list->cb_match = cb_match;
+ list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb;
+ rte_rwlock_init(&list->lock);
+ DRV_LOG(DEBUG, "Cache list %s initialized.", list->name);
+ LIST_INIT(&list->head);
+ return 0;
+}
+
+static struct mlx5_cache_entry *
+__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+{
+ struct mlx5_cache_entry *entry;
+
+ LIST_FOREACH(entry, &list->head, next) {
+ if (list->cb_match(list, entry, ctx))
+ continue;
+ if (reuse) {
+ __atomic_add_fetch(&entry->ref_cnt, 1,
+ __ATOMIC_RELAXED);
+ DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.",
+ list->name, (void *)entry, entry->ref_cnt);
+ }
+ break;
+ }
+ return entry;
+}
+
+static struct mlx5_cache_entry *
+cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse)
+{
+ struct mlx5_cache_entry *entry;
+
+ rte_rwlock_read_lock(&list->lock);
+ entry = __cache_lookup(list, ctx, reuse);
+ rte_rwlock_read_unlock(&list->lock);
+ return entry;
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx)
+{
+ return cache_lookup(list, ctx, false);
+}
+
+struct mlx5_cache_entry *
+mlx5_cache_register(struct mlx5_cache_list *list, void *ctx)
+{
+ struct mlx5_cache_entry *entry;
+ uint32_t prev_gen_cnt = 0;
+
+ MLX5_ASSERT(list);
+ prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE);
+ /* Lookup with read lock, reuse if found. */
+ entry = cache_lookup(list, ctx, true);
+ if (entry)
+ return entry;
+ /* Not found, append with write lock - block read from other threads. */
+ rte_rwlock_write_lock(&list->lock);
+ /* If list changed by other threads before lock, search again. */
+ if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) {
+ /* Lookup and reuse w/o read lock. */
+ entry = __cache_lookup(list, ctx, true);
+ if (entry)
+ goto done;
+ }
+ entry = list->cb_create(list, entry, ctx);
+ if (!entry) {
+ DRV_LOG(ERR, "Failed to init cache list %s entry %p.",
+ list->name, (void *)entry);
+ goto done;
+ }
+ entry->ref_cnt = 1;
+ LIST_INSERT_HEAD(&list->head, entry, next);
+ __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE);
+ __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
+ DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.",
+ list->name, (void *)entry, entry->ref_cnt);
+done:
+ rte_rwlock_write_unlock(&list->lock);
+ return entry;
+}
+
+int
+mlx5_cache_unregister(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry)
+{
+ rte_rwlock_write_lock(&list->lock);
+ MLX5_ASSERT(entry && entry->next.le_prev);
+ DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.",
+ list->name, (void *)entry, entry->ref_cnt);
+ if (--entry->ref_cnt) {
+ rte_rwlock_write_unlock(&list->lock);
+ return 1;
+ }
+ __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE);
+ __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE);
+ LIST_REMOVE(entry, next);
+ list->cb_remove(list, entry);
+ rte_rwlock_write_unlock(&list->lock);
+ DRV_LOG(DEBUG, "Cache list %s entry %p removed.",
+ list->name, (void *)entry);
+ return 0;
+}
+
+void
+mlx5_cache_list_destroy(struct mlx5_cache_list *list)
+{
+ struct mlx5_cache_entry *entry;
+
+ MLX5_ASSERT(list);
+ /* no LIST_FOREACH_SAFE, using while instead */
+ while (!LIST_EMPTY(&list->head)) {
+ entry = LIST_FIRST(&list->head);
+ LIST_REMOVE(entry, next);
+ list->cb_remove(list, entry);
+ DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.",
+ list->name, (void *)entry);
+ }
+ memset(list, 0, sizeof(*list));
+}
+
+uint32_t
+mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list)
+{
+ MLX5_ASSERT(list);
+ return __atomic_load_n(&list->count, __ATOMIC_RELAXED);
+}
+
/********************* Indexed pool **********************/
static inline void
*/
void mlx5_hlist_destroy(struct mlx5_hlist *h);
+/************************ cache list *****************************/
+
+/** Maximum size of string for naming. */
+#define MLX5_NAME_SIZE 32
+
+struct mlx5_cache_list;
+
+/**
+ * Structure of the entry in the cache list, user should define its own struct
+ * that contains this in order to store the data.
+ */
+struct mlx5_cache_entry {
+ LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */
+ uint32_t ref_cnt; /* Reference count. */
+};
+
+/**
+ * Type of callback function for entry removal.
+ *
+ * @param list
+ * The cache list.
+ * @param entry
+ * The entry in the list.
+ */
+typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+/**
+ * Type of function for user defined matching.
+ *
+ * @param list
+ * The cache list.
+ * @param entry
+ * The entry in the list.
+ * @param ctx
+ * The pointer to new entry context.
+ *
+ * @return
+ * 0 if matching, non-zero number otherwise.
+ */
+typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry, void *ctx);
+
+/**
+ * Type of function for user defined cache list entry creation.
+ *
+ * @param list
+ * The cache list.
+ * @param entry
+ * The new allocated entry, NULL if list entry size unspecified,
+ * New entry has to be allocated in callback and return.
+ * @param ctx
+ * The pointer to new entry context.
+ *
+ * @return
+ * Pointer of entry on success, NULL otherwise.
+ */
+typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb)
+ (struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry,
+ void *ctx);
+
+/**
+ * Linked cache list structure.
+ *
+ * Entry in cache list could be reused if entry already exists,
+ * reference count will increase and the existing entry returns.
+ *
+ * When destroy an entry from list, decrease reference count and only
+ * destroy when no further reference.
+ *
+ * Linked list cache is designed for limited number of entries cache,
+ * read mostly, less modification.
+ *
+ * For huge amount of entries cache, please consider hash list cache.
+ *
+ */
+struct mlx5_cache_list {
+ char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */
+ uint32_t entry_sz; /**< Entry size, 0: use create callback. */
+ rte_rwlock_t lock; /* read/write lock. */
+ uint32_t gen_cnt; /* List modification will update generation count. */
+ uint32_t count; /* number of entries in list. */
+ void *ctx; /* user objects target to callback. */
+ mlx5_cache_create_cb cb_create; /**< entry create callback. */
+ mlx5_cache_match_cb cb_match; /**< entry match callback. */
+ mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */
+ LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head;
+};
+
+/**
+ * Initialize a cache list.
+ *
+ * @param list
+ * Pointer to the hast list table.
+ * @param name
+ * Name of the cache list.
+ * @param entry_size
+ * Entry size to allocate, 0 to allocate by creation callback.
+ * @param ctx
+ * Pointer to the list context data.
+ * @param cb_create
+ * Callback function for entry create.
+ * @param cb_match
+ * Callback function for entry match.
+ * @param cb_remove
+ * Callback function for entry remove.
+ * @return
+ * 0 on success, otherwise failure.
+ */
+int mlx5_cache_list_init(struct mlx5_cache_list *list,
+ const char *name, uint32_t entry_size, void *ctx,
+ mlx5_cache_create_cb cb_create,
+ mlx5_cache_match_cb cb_match,
+ mlx5_cache_remove_cb cb_remove);
+
+/**
+ * Search an entry matching the key.
+ *
+ * Result returned might be destroyed by other thread, must use
+ * this function only in main thread.
+ *
+ * @param list
+ * Pointer to the cache list.
+ * @param ctx
+ * Common context parameter used by entry callback function.
+ *
+ * @return
+ * Pointer of the cache entry if found, NULL otherwise.
+ */
+struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list,
+ void *ctx);
+
+/**
+ * Reuse or create an entry to the cache list.
+ *
+ * @param list
+ * Pointer to the hast list table.
+ * @param ctx
+ * Common context parameter used by callback function.
+ *
+ * @return
+ * registered entry on success, NULL otherwise
+ */
+struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list,
+ void *ctx);
+
+/**
+ * Remove an entry from the cache list.
+ *
+ * User should guarantee the validity of the entry.
+ *
+ * @param list
+ * Pointer to the hast list.
+ * @param entry
+ * Entry to be removed from the cache list table.
+ * @return
+ * 0 on entry removed, 1 on entry still referenced.
+ */
+int mlx5_cache_unregister(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry);
+
+/**
+ * Destroy the cache list.
+ *
+ * @param list
+ * Pointer to the cache list.
+ */
+void mlx5_cache_list_destroy(struct mlx5_cache_list *list);
+
+/**
+ * Get entry number from the cache list.
+ *
+ * @param list
+ * Pointer to the hast list.
+ * @return
+ * Cache list entry number.
+ */
+uint32_t
+mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list);
+
+/********************************* indexed pool *************************/
+
/**
* This function allocates non-initialized memory entry from pool.
* In NUMA systems, the memory entry allocated resides on the same