Since freeing a ring is now possible, then when freeing
a hash table, its internal ring can be freed as well.
Therefore when a new table, with the same name as a previously
freed table, is created, there is no need to look up
the already allocated ring.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
struct rte_hash_list *hash_list;
struct rte_ring *r = NULL;
char hash_name[RTE_HASH_NAMESIZE];
struct rte_hash_list *hash_list;
struct rte_ring *r = NULL;
char hash_name[RTE_HASH_NAMESIZE];
void *buckets = NULL;
char ring_name[RTE_RING_NAMESIZE];
unsigned i;
void *buckets = NULL;
char ring_name[RTE_RING_NAMESIZE];
unsigned i;
#endif
snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
#endif
snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
- r = rte_ring_lookup(ring_name);
- if (r != NULL) {
- /* clear the free ring */
- while (rte_ring_dequeue(r, &ptr) == 0)
- rte_pause();
- } else
- r = rte_ring_create(ring_name, rte_align32pow2(params->entries + 1),
+ r = rte_ring_create(ring_name, rte_align32pow2(params->entries + 1),
params->socket_id, 0);
if (r == NULL) {
RTE_LOG(ERR, HASH, "memory allocation failed\n");
params->socket_id, 0);
if (r == NULL) {
RTE_LOG(ERR, HASH, "memory allocation failed\n");
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_ring_free(h->free_slots);
rte_free(h->key_store);
rte_free(h->buckets);
rte_free(h);
rte_free(h->key_store);
rte_free(h->buckets);
rte_free(h);