#include <sys/queue.h>
#include <rte_common.h>
-#include <rte_memory.h> /* for definition of CACHE_LINE_SIZE */
+#include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
#include <rte_log.h>
#include <rte_memcpy.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
#include "rte_hash.h"
+TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
-TAILQ_HEAD(rte_hash_list, rte_hash);
+static struct rte_tailq_elem rte_hash_tailq = {
+ .name = "RTE_HASH",
+};
+EAL_REGISTER_TAILQ(rte_hash_tailq)
/* Macro to enable/disable run-time checking of function parameters */
#if defined(RTE_LIBRTE_HASH_DEBUG)
static inline hash_sig_t *
get_sig_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
{
- return (hash_sig_t *)
- &(h->sig_tbl[bucket_index * h->sig_tbl_bucket_size]);
+ return RTE_PTR_ADD(h->sig_tbl, (bucket_index *
+ h->sig_tbl_bucket_size));
}
/* Returns a pointer to the first key in specified bucket. */
static inline uint8_t *
get_key_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
{
- return (uint8_t *) &(h->key_tbl[bucket_index * h->bucket_entries *
- h->key_tbl_key_size]);
+ return RTE_PTR_ADD(h->key_tbl, (bucket_index * h->bucket_entries *
+ h->key_tbl_key_size));
}
/* Returns a pointer to a key at a specific position in a specified bucket. */
static inline void *
get_key_from_bucket(const struct rte_hash *h, uint8_t *bkt, uint32_t pos)
{
- return (void *) &bkt[pos * h->key_tbl_key_size];
+ return RTE_PTR_ADD(bkt, pos * h->key_tbl_key_size);
}
/* Does integer division with rounding-up of result. */
struct rte_hash *
rte_hash_find_existing(const char *name)
{
- struct rte_hash *h;
+ struct rte_hash *h = NULL;
+ struct rte_tailq_entry *te;
struct rte_hash_list *hash_list;
- /* check that we have an initialised tail queue */
- if ((hash_list = RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_FOREACH(h, hash_list, next) {
+ TAILQ_FOREACH(te, hash_list, next) {
+ h = (struct rte_hash *) te->data;
if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
break;
}
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
- if (h == NULL)
+ if (te == NULL) {
rte_errno = ENOENT;
+ return NULL;
+ }
return h;
}
rte_hash_create(const struct rte_hash_parameters *params)
{
struct rte_hash *h = NULL;
+ struct rte_tailq_entry *te;
uint32_t num_buckets, sig_bucket_size, key_size,
hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size;
char hash_name[RTE_HASH_NAMESIZE];
struct rte_hash_list *hash_list;
- /* check that we have an initialised tail queue */
- if ((hash_list =
- RTE_TAILQ_LOOKUP_BY_IDX(RTE_TAILQ_HASH, rte_hash_list)) == NULL) {
- rte_errno = E_RTE_NO_TAILQ;
- return NULL;
- }
+ hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
/* Check for valid parameters */
if ((params == NULL) ||
sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
key_size = align_size(params->key_len, KEY_ALIGNMENT);
- hash_tbl_size = align_size(sizeof(struct rte_hash), CACHE_LINE_SIZE);
+ hash_tbl_size = align_size(sizeof(struct rte_hash), RTE_CACHE_LINE_SIZE);
sig_tbl_size = align_size(num_buckets * sig_bucket_size,
- CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
key_tbl_size = align_size(num_buckets * key_size *
- params->bucket_entries, CACHE_LINE_SIZE);
+ params->bucket_entries, RTE_CACHE_LINE_SIZE);
/* Total memory required for hash context */
mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
/* guarantee there's no existing */
- TAILQ_FOREACH(h, hash_list, next) {
+ TAILQ_FOREACH(te, hash_list, next) {
+ h = (struct rte_hash *) te->data;
if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
break;
}
- if (h != NULL)
+ if (te != NULL)
+ goto exit;
+
+ te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
goto exit;
+ }
h = (struct rte_hash *)rte_zmalloc_socket(hash_name, mem_size,
- CACHE_LINE_SIZE, params->socket_id);
+ RTE_CACHE_LINE_SIZE, params->socket_id);
if (h == NULL) {
RTE_LOG(ERR, HASH, "memory allocation failed\n");
+ rte_free(te);
goto exit;
}
h->hash_func = (params->hash_func == NULL) ?
DEFAULT_HASH_FUNC : params->hash_func;
- TAILQ_INSERT_TAIL(hash_list, h, next);
+ te->data = (void *) h;
+
+ TAILQ_INSERT_TAIL(hash_list, te, next);
exit:
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
void
rte_hash_free(struct rte_hash *h)
{
+ struct rte_tailq_entry *te;
+ struct rte_hash_list *hash_list;
+
if (h == NULL)
return;
- RTE_EAL_TAILQ_REMOVE(RTE_TAILQ_HASH, rte_hash_list, h);
+ hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, hash_list, next) {
+ if (te->data == (void *) h)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(hash_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
rte_free(h);
+ rte_free(te);
}
static inline int32_t