From: Olivier Matz Date: Wed, 6 Apr 2016 13:28:00 +0000 (+0200) Subject: hash: fix race condition at creation X-Git-Tag: spdx-start~7064 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=5d7bfb7337e917c8d97b43a75264789b01b6cfce;p=dpdk.git hash: fix race condition at creation To avoid a race condition while creating a new hash object, the list has to be locked before the lookup, and released only once the new object is added in the list. As the lock is held by the rte_ring_create(), move its creation at the beginning of the function and only take the lock after the ring is created to avoid a deadlock. Fixes: 48a3991196 ("hash: replace with cuckoo hash implementation") Signed-off-by: Olivier Matz Acked-by: Pablo de Lara --- diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index b00cc12428..7b7d1f85e7 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -295,19 +295,48 @@ rte_hash_create(const struct rte_hash_parameters *params) if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT) hw_trans_mem_support = 1; + /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */ + if (hw_trans_mem_support) + /* + * Increase number of slots by total number of indices + * that can be stored in the lcore caches + * except for the first cache + */ + num_key_slots = params->entries + (RTE_MAX_LCORE - 1) * + LCORE_CACHE_SIZE + 1; + else + num_key_slots = params->entries + 1; + + snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name); + r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots), + params->socket_id, 0); + if (r == NULL) { + RTE_LOG(ERR, HASH, "memory allocation failed\n"); + goto err; + } + snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name); - /* Guarantee there's no existing */ - h = rte_hash_find_existing(params->name); - if (h != NULL) { + rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + + /* guarantee there's no existing: this is normally already checked + * by ring creation above */ + TAILQ_FOREACH(te, hash_list, next) { + h = (struct rte_hash *) te->data; + if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0) + break; + } + h = NULL; + if (te != NULL) { rte_errno = EEXIST; - return NULL; + te = NULL; + goto err_unlock; } te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { RTE_LOG(ERR, HASH, "tailq entry allocation failed\n"); - goto err; + goto err_unlock; } h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash), @@ -315,7 +344,7 @@ rte_hash_create(const struct rte_hash_parameters *params) if (h == NULL) { RTE_LOG(ERR, HASH, "memory allocation failed\n"); - goto err; + goto err_unlock; } const uint32_t num_buckets = rte_align32pow2(params->entries) @@ -327,23 +356,10 @@ rte_hash_create(const struct rte_hash_parameters *params) if (buckets == NULL) { RTE_LOG(ERR, HASH, "memory allocation failed\n"); - goto err; + goto err_unlock; } const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len; - - /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */ - if (hw_trans_mem_support) - /* - * Increase number of slots by total number of indices - * that can be stored in the lcore caches - * except for the first cache - */ - num_key_slots = params->entries + (RTE_MAX_LCORE - 1) * - LCORE_CACHE_SIZE + 1; - else - num_key_slots = params->entries + 1; - const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots; k = rte_zmalloc_socket(NULL, key_tbl_size, @@ -351,7 +367,7 @@ rte_hash_create(const struct rte_hash_parameters *params) if (k == NULL) { RTE_LOG(ERR, HASH, "memory allocation failed\n"); - goto err; + goto err_unlock; } /* @@ -393,14 +409,6 @@ rte_hash_create(const struct rte_hash_parameters *params) h->cmp_jump_table_idx = KEY_OTHER_BYTES; #endif - snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name); - r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots), - params->socket_id, 0); - if (r == NULL) { - RTE_LOG(ERR, HASH, "memory allocation failed\n"); - goto err; - } - if (hw_trans_mem_support) { h->local_free_slots = rte_zmalloc_socket(NULL, sizeof(struct lcore_cache) * RTE_MAX_LCORE, @@ -427,13 +435,15 @@ rte_hash_create(const struct rte_hash_parameters *params) for (i = 1; i < params->entries + 1; i++) rte_ring_sp_enqueue(r, (void *)((uintptr_t) i)); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); te->data = (void *) h; TAILQ_INSERT_TAIL(hash_list, te, next); rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); return h; +err_unlock: + rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); err: + rte_ring_free(r); rte_free(te); rte_free(h); rte_free(buckets);