X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_utils.c;h=18fe23e4fbce981c70c505f5bed15e8b57759e59;hb=d31a89719024934cb834ca4ed7f76e8d178cd366;hp=d041b0780119dffdac69fbc106c4c59bb65cb114;hpb=e69a59227db0f2dd6ccc9f618d0ff7e6f0dd03aa;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c index d041b07801..18fe23e4fb 100644 --- a/drivers/net/mlx5/mlx5_utils.c +++ b/drivers/net/mlx5/mlx5_utils.c @@ -3,256 +3,170 @@ */ #include -#include #include #include "mlx5_utils.h" -/********************* Hash List **********************/ -static struct mlx5_hlist_entry * -mlx5_hlist_default_create_cb(struct mlx5_hlist *h, uint64_t key __rte_unused, +/********************* Cache list ************************/ + +static struct mlx5_cache_entry * +mlx5_clist_default_create_cb(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry __rte_unused, void *ctx __rte_unused) { - return mlx5_malloc(MLX5_MEM_ZERO, h->entry_sz, 0, SOCKET_ID_ANY); + return mlx5_malloc(MLX5_MEM_ZERO, list->entry_sz, 0, SOCKET_ID_ANY); } static void -mlx5_hlist_default_remove_cb(struct mlx5_hlist *h __rte_unused, - struct mlx5_hlist_entry *entry) +mlx5_clist_default_remove_cb(struct mlx5_cache_list *list __rte_unused, + struct mlx5_cache_entry *entry) { mlx5_free(entry); } -static int -mlx5_hlist_default_match_cb(struct mlx5_hlist *h __rte_unused, - struct mlx5_hlist_entry *entry, - uint64_t key, void *ctx __rte_unused) -{ - return entry->key != key; -} - -struct mlx5_hlist * -mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size, - uint32_t flags, mlx5_hlist_create_cb cb_create, - mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_remove) -{ - struct mlx5_hlist *h; - uint32_t act_size; - uint32_t alloc_size; - - if (!size || (!cb_create ^ !cb_remove)) - return NULL; - /* Align to the next power of 2, 32bits integer is enough now. */ - if (!rte_is_power_of_2(size)) { - act_size = rte_align32pow2(size); - DRV_LOG(WARNING, "Size 0x%" PRIX32 " is not power of 2, will " - "be aligned to 0x%" PRIX32 ".", size, act_size); - } else { - act_size = size; - } - alloc_size = sizeof(struct mlx5_hlist) + - sizeof(struct mlx5_hlist_head) * act_size; - /* Using zmalloc, then no need to initialize the heads. */ - h = mlx5_malloc(MLX5_MEM_ZERO, alloc_size, RTE_CACHE_LINE_SIZE, - SOCKET_ID_ANY); - if (!h) { - DRV_LOG(ERR, "No memory for hash list %s creation", - name ? name : "None"); - return NULL; - } +int +mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, + uint32_t entry_size, void *ctx, + mlx5_cache_create_cb cb_create, + mlx5_cache_match_cb cb_match, + mlx5_cache_remove_cb cb_remove) +{ + MLX5_ASSERT(list); + if (!cb_match || (!cb_create ^ !cb_remove)) + return -1; if (name) - snprintf(h->name, MLX5_HLIST_NAMESIZE, "%s", name); - h->table_sz = act_size; - h->mask = act_size - 1; - h->entry_sz = entry_size; - h->direct_key = !!(flags & MLX5_HLIST_DIRECT_KEY); - h->write_most = !!(flags & MLX5_HLIST_WRITE_MOST); - h->cb_create = cb_create ? cb_create : mlx5_hlist_default_create_cb; - h->cb_match = cb_match ? cb_match : mlx5_hlist_default_match_cb; - h->cb_remove = cb_remove ? cb_remove : mlx5_hlist_default_remove_cb; - rte_rwlock_init(&h->lock); - DRV_LOG(DEBUG, "Hash list with %s size 0x%" PRIX32 " is created.", - h->name, act_size); - return h; + snprintf(list->name, sizeof(list->name), "%s", name); + list->entry_sz = entry_size; + list->ctx = ctx; + list->cb_create = cb_create ? cb_create : mlx5_clist_default_create_cb; + list->cb_match = cb_match; + list->cb_remove = cb_remove ? cb_remove : mlx5_clist_default_remove_cb; + rte_rwlock_init(&list->lock); + DRV_LOG(DEBUG, "Cache list %s initialized.", list->name); + LIST_INIT(&list->head); + return 0; } -static struct mlx5_hlist_entry * -__hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse) +static struct mlx5_cache_entry * +__cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) { - uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; + struct mlx5_cache_entry *entry; - MLX5_ASSERT(h); - if (h->direct_key) - idx = (uint32_t)(key & h->mask); - else - idx = rte_hash_crc_8byte(key, 0) & h->mask; - first = &h->heads[idx]; - LIST_FOREACH(node, first, next) { - if (!h->cb_match(h, node, key, ctx)) { - if (reuse) { - __atomic_add_fetch(&node->ref_cnt, 1, - __ATOMIC_RELAXED); - DRV_LOG(DEBUG, "Hash list %s entry %p " - "reuse: %u.", - h->name, (void *)node, node->ref_cnt); - } - break; + LIST_FOREACH(entry, &list->head, next) { + if (list->cb_match(list, entry, ctx)) + continue; + if (reuse) { + __atomic_add_fetch(&entry->ref_cnt, 1, + __ATOMIC_RELAXED); + DRV_LOG(DEBUG, "Cache list %s entry %p ref++: %u.", + list->name, (void *)entry, entry->ref_cnt); } + break; } - return node; + return entry; } -static struct mlx5_hlist_entry * -hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx, bool reuse) +static struct mlx5_cache_entry * +cache_lookup(struct mlx5_cache_list *list, void *ctx, bool reuse) { - struct mlx5_hlist_entry *node; + struct mlx5_cache_entry *entry; - MLX5_ASSERT(h); - rte_rwlock_read_lock(&h->lock); - node = __hlist_lookup(h, key, ctx, reuse); - rte_rwlock_read_unlock(&h->lock); - return node; + rte_rwlock_read_lock(&list->lock); + entry = __cache_lookup(list, ctx, reuse); + rte_rwlock_read_unlock(&list->lock); + return entry; } -struct mlx5_hlist_entry * -mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx) +struct mlx5_cache_entry * +mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx) { - return hlist_lookup(h, key, ctx, false); + return cache_lookup(list, ctx, false); } -struct mlx5_hlist_entry* -mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx) +struct mlx5_cache_entry * +mlx5_cache_register(struct mlx5_cache_list *list, void *ctx) { - uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *entry; + struct mlx5_cache_entry *entry; uint32_t prev_gen_cnt = 0; - MLX5_ASSERT(h && entry); - /* Use write lock directly for write-most list. */ - if (!h->write_most) { - prev_gen_cnt = __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE); - entry = hlist_lookup(h, key, ctx, true); - if (entry) - return entry; - } - rte_rwlock_write_lock(&h->lock); - /* Check if the list changed by other threads. */ - if (h->write_most || - prev_gen_cnt != __atomic_load_n(&h->gen_cnt, __ATOMIC_ACQUIRE)) { - entry = __hlist_lookup(h, key, ctx, true); + MLX5_ASSERT(list); + prev_gen_cnt = __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE); + /* Lookup with read lock, reuse if found. */ + entry = cache_lookup(list, ctx, true); + if (entry) + return entry; + /* Not found, append with write lock - block read from other threads. */ + rte_rwlock_write_lock(&list->lock); + /* If list changed by other threads before lock, search again. */ + if (prev_gen_cnt != __atomic_load_n(&list->gen_cnt, __ATOMIC_ACQUIRE)) { + /* Lookup and reuse w/o read lock. */ + entry = __cache_lookup(list, ctx, true); if (entry) goto done; } - if (h->direct_key) - idx = (uint32_t)(key & h->mask); - else - idx = rte_hash_crc_8byte(key, 0) & h->mask; - first = &h->heads[idx]; - entry = h->cb_create(h, key, ctx); + entry = list->cb_create(list, entry, ctx); if (!entry) { - rte_errno = ENOMEM; - DRV_LOG(ERR, "Can't allocate hash list %s entry.", h->name); + DRV_LOG(ERR, "Failed to init cache list %s entry %p.", + list->name, (void *)entry); goto done; } - entry->key = key; entry->ref_cnt = 1; - LIST_INSERT_HEAD(first, entry, next); - __atomic_add_fetch(&h->gen_cnt, 1, __ATOMIC_ACQ_REL); - DRV_LOG(DEBUG, "Hash list %s entry %p new: %u.", - h->name, (void *)entry, entry->ref_cnt); + LIST_INSERT_HEAD(&list->head, entry, next); + __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_RELEASE); + __atomic_add_fetch(&list->count, 1, __ATOMIC_ACQUIRE); + DRV_LOG(DEBUG, "Cache list %s entry %p new: %u.", + list->name, (void *)entry, entry->ref_cnt); done: - rte_rwlock_write_unlock(&h->lock); + rte_rwlock_write_unlock(&list->lock); return entry; } -struct mlx5_hlist_entry * -mlx5_hlist_lookup_ex(struct mlx5_hlist *h, uint64_t key, - mlx5_hlist_match_callback_fn cb, void *ctx) -{ - uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; - - MLX5_ASSERT(h && cb && ctx); - idx = rte_hash_crc_8byte(key, 0) & h->mask; - first = &h->heads[idx]; - LIST_FOREACH(node, first, next) { - if (!cb(node, ctx)) - return node; - } - return NULL; -} - int -mlx5_hlist_insert_ex(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry, - mlx5_hlist_match_callback_fn cb, void *ctx) +mlx5_cache_unregister(struct mlx5_cache_list *list, + struct mlx5_cache_entry *entry) { - uint32_t idx; - struct mlx5_hlist_head *first; - struct mlx5_hlist_entry *node; - - MLX5_ASSERT(h && entry && cb && ctx); - idx = rte_hash_crc_8byte(entry->key, 0) & h->mask; - first = &h->heads[idx]; - /* No need to reuse the lookup function. */ - LIST_FOREACH(node, first, next) { - if (!cb(node, ctx)) - return -EEXIST; - } - LIST_INSERT_HEAD(first, entry, next); - return 0; -} - -int -mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry) -{ - rte_rwlock_write_lock(&h->lock); - MLX5_ASSERT(entry && entry->ref_cnt && entry->next.le_prev); - DRV_LOG(DEBUG, "Hash list %s entry %p deref: %u.", - h->name, (void *)entry, entry->ref_cnt); + rte_rwlock_write_lock(&list->lock); + MLX5_ASSERT(entry && entry->next.le_prev); + DRV_LOG(DEBUG, "Cache list %s entry %p ref--: %u.", + list->name, (void *)entry, entry->ref_cnt); if (--entry->ref_cnt) { - rte_rwlock_write_unlock(&h->lock); + rte_rwlock_write_unlock(&list->lock); return 1; } + __atomic_add_fetch(&list->gen_cnt, 1, __ATOMIC_ACQUIRE); + __atomic_sub_fetch(&list->count, 1, __ATOMIC_ACQUIRE); LIST_REMOVE(entry, next); - /* Set to NULL to get rid of removing action for more than once. */ - entry->next.le_prev = NULL; - h->cb_remove(h, entry); - rte_rwlock_write_unlock(&h->lock); - DRV_LOG(DEBUG, "Hash list %s entry %p removed.", - h->name, (void *)entry); + list->cb_remove(list, entry); + rte_rwlock_write_unlock(&list->lock); + DRV_LOG(DEBUG, "Cache list %s entry %p removed.", + list->name, (void *)entry); return 0; } void -mlx5_hlist_destroy(struct mlx5_hlist *h) +mlx5_cache_list_destroy(struct mlx5_cache_list *list) { - uint32_t idx; - struct mlx5_hlist_entry *entry; + struct mlx5_cache_entry *entry; - MLX5_ASSERT(h); - for (idx = 0; idx < h->table_sz; ++idx) { - /* No LIST_FOREACH_SAFE, using while instead. */ - while (!LIST_EMPTY(&h->heads[idx])) { - entry = LIST_FIRST(&h->heads[idx]); - LIST_REMOVE(entry, next); - /* - * The owner of whole element which contains data entry - * is the user, so it's the user's duty to do the clean - * up and the free work because someone may not put the - * hlist entry at the beginning(suggested to locate at - * the beginning). Or else the default free function - * will be used. - */ - h->cb_remove(h, entry); - } + MLX5_ASSERT(list); + /* no LIST_FOREACH_SAFE, using while instead */ + while (!LIST_EMPTY(&list->head)) { + entry = LIST_FIRST(&list->head); + LIST_REMOVE(entry, next); + list->cb_remove(list, entry); + DRV_LOG(DEBUG, "Cache list %s entry %p destroyed.", + list->name, (void *)entry); } - mlx5_free(h); + memset(list, 0, sizeof(*list)); +} + +uint32_t +mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list) +{ + MLX5_ASSERT(list); + return __atomic_load_n(&list->count, __ATOMIC_RELAXED); } /********************* Indexed pool **********************/