#include "rte_table_hash.h"
#include "rte_lru.h"
-#define RTE_TABLE_HASH_KEY_SIZE 8
+#define KEY_SIZE 8
+
+#define KEYS_PER_BUCKET 4
#ifdef RTE_TABLE_STATS_COLLECT
/* Input parameters */
uint32_t n_buckets;
- uint32_t n_entries_per_bucket;
uint32_t key_size;
uint32_t entry_size;
uint32_t bucket_size;
- uint32_t signature_offset;
uint32_t key_offset;
uint64_t key_mask;
- rte_table_hash_op_hash_nomask f_hash;
+ rte_table_hash_op_hash f_hash;
uint64_t seed;
/* Extendible buckets */
};
static int
-check_params_create_lru(struct rte_table_hash_key8_lru_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
+keycmp(void *a, void *b, void *b_mask)
+{
+ uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
+
+ return a64[0] != (b64[0] & b_mask64[0]);
+}
+
+static void
+keycpy(void *dst, void *src, void *src_mask)
+{
+ uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
+
+ dst64[0] = src64[0] & src_mask64[0];
+}
+
+static int
+check_params_create(struct rte_table_hash_params *params)
+{
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* key_size */
+ if (params->key_size != KEY_SIZE) {
+ RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_keys */
+ if (params->n_keys == 0) {
+ RTE_LOG(ERR, TABLE, "%s: n_keys is zero\n", __func__);
+ return -EINVAL;
+ }
+
+ /* n_buckets */
+ if ((params->n_buckets == 0) ||
+ (!rte_is_power_of_2(params->n_buckets))) {
+ RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
return -EINVAL;
}
static void *
rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_hash_key8_lru_params *p =
- (struct rte_table_hash_key8_lru_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_entries_per_bucket, key_size, bucket_size_cl;
- uint32_t total_size, i;
+ uint64_t bucket_size, total_size;
+ uint32_t n_buckets, i;
/* Check input parameters */
- if ((check_params_create_lru(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
- ((sizeof(struct rte_bucket_4_8) % 64) != 0)) {
+ ((sizeof(struct rte_bucket_4_8) % 64) != 0))
return NULL;
- }
- n_entries_per_bucket = 4;
- key_size = 8;
+
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of buckets (n_buckets) so that there a chance
+ * to store n_keys keys in the table.
+ *
+ * Note: Since the buckets do not get extended, it is not possible to
+ * guarantee that n_keys keys can be stored in the table at any time. In the
+ * worst case scenario when all the n_keys fall into the same bucket, only
+ * a maximum of KEYS_PER_BUCKET keys will be stored in the table. This case
+ * defeats the purpose of the hash table. It indicates unsuitable f_hash or
+ * n_keys to n_buckets ratio.
+ *
+ * MIN(n_buckets) = (n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET
+ */
+ n_buckets = rte_align32pow2(
+ (p->n_keys + KEYS_PER_BUCKET - 1) / KEYS_PER_BUCKET);
+ n_buckets = RTE_MAX(n_buckets, p->n_buckets);
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
- entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + n_buckets *
- bucket_size_cl * RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_8) +
+ KEYS_PER_BUCKET * entry_size);
+ total_size = sizeof(struct rte_table_hash) + n_buckets * bucket_size;
+
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
+ " for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
- RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
+ " for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+
+ RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
uint64_t signature, mask, pos;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
/* Key is present in the bucket */
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
memcpy(bucket_data, entry, f->entry_size);
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature |= mask;
- bucket->key[i] = *((uint64_t *) key);
+ keycpy(&bucket->key[i], key, &f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
lru_update(bucket, i);
*key_found = 0;
/* Bucket full: replace LRU entry */
pos = lru_pos(bucket);
- bucket->key[pos] = *((uint64_t *) key);
+ keycpy(&bucket->key[pos], key, &f->key_mask);
memcpy(&bucket->data[pos * f->entry_size], entry, f->entry_size);
lru_update(bucket, pos);
- *key_found = 0;
+ *key_found = 0;
*entry_ptr = (void *) &bucket->data[pos * f->entry_size];
return 0;
uint64_t signature, mask;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
/* Key is present in the bucket */
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i * f->entry_size];
bucket->signature &= ~mask;
return 0;
}
-static int
-check_params_create_ext(struct rte_table_hash_key8_ext_params *params) {
- /* n_entries */
- if (params->n_entries == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries is zero\n", __func__);
- return -EINVAL;
- }
-
- /* n_entries_ext */
- if (params->n_entries_ext == 0) {
- RTE_LOG(ERR, TABLE, "%s: n_entries_ext is zero\n", __func__);
- return -EINVAL;
- }
-
- /* f_hash */
- if (params->f_hash == NULL) {
- RTE_LOG(ERR, TABLE, "%s: f_hash function pointer is NULL\n",
- __func__);
- return -EINVAL;
- }
-
- return 0;
-}
-
static void *
rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_hash_key8_ext_params *p =
- (struct rte_table_hash_key8_ext_params *) params;
+ struct rte_table_hash_params *p = params;
struct rte_table_hash *f;
- uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket, key_size;
- uint32_t bucket_size_cl, stack_size_cl, total_size, i;
+ uint64_t bucket_size, stack_size, total_size;
+ uint32_t n_buckets_ext, i;
/* Check input parameters */
- if ((check_params_create_ext(p) != 0) ||
+ if ((check_params_create(p) != 0) ||
((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
((sizeof(struct rte_bucket_4_8) % 64) != 0))
return NULL;
- n_entries_per_bucket = 4;
- key_size = 8;
+ /*
+ * Table dimensioning
+ *
+ * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
+ * it is guaranteed that n_keys keys can be stored in the table at any time.
+ *
+ * The worst case scenario takes place when all the n_keys keys fall into
+ * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
+ * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
+ * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
+ * into a different bucket. This case defeats the purpose of the hash table.
+ * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
+ *
+ * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
+ */
+ n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
/* Memory allocation */
- n_buckets = rte_align32pow2((p->n_entries + n_entries_per_bucket - 1) /
- n_entries_per_bucket);
- n_buckets_ext = (p->n_entries_ext + n_entries_per_bucket - 1) /
- n_entries_per_bucket;
- bucket_size_cl = (sizeof(struct rte_bucket_4_8) + n_entries_per_bucket *
- entry_size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
- stack_size_cl = (n_buckets_ext * sizeof(uint32_t) + RTE_CACHE_LINE_SIZE - 1)
- / RTE_CACHE_LINE_SIZE;
- total_size = sizeof(struct rte_table_hash) + ((n_buckets +
- n_buckets_ext) * bucket_size_cl + stack_size_cl) *
- RTE_CACHE_LINE_SIZE;
-
- f = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id);
+ bucket_size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_bucket_4_8) +
+ KEYS_PER_BUCKET * entry_size);
+ stack_size = RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
+ total_size = sizeof(struct rte_table_hash) +
+ (p->n_buckets + n_buckets_ext) * bucket_size + stack_size;
+
+ if (total_size > SIZE_MAX) {
+ RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
+ return NULL;
+ }
+
+ f = rte_zmalloc_socket(p->name,
+ (size_t)total_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
if (f == NULL) {
RTE_LOG(ERR, TABLE,
- "%s: Cannot allocate %u bytes for hash table\n",
- __func__, total_size);
+ "%s: Cannot allocate %" PRIu64 " bytes "
+ "for hash table %s\n",
+ __func__, total_size, p->name);
return NULL;
}
- RTE_LOG(INFO, TABLE,
- "%s: Hash table memory footprint is %u bytes\n",
- __func__, total_size);
+ RTE_LOG(INFO, TABLE, "%s: Hash table %s memory footprint "
+ "is %" PRIu64 " bytes\n",
+ __func__, p->name, total_size);
/* Memory initialization */
- f->n_buckets = n_buckets;
- f->n_entries_per_bucket = n_entries_per_bucket;
- f->key_size = key_size;
+ f->n_buckets = p->n_buckets;
+ f->key_size = KEY_SIZE;
f->entry_size = entry_size;
- f->bucket_size = bucket_size_cl * RTE_CACHE_LINE_SIZE;
- f->signature_offset = p->signature_offset;
+ f->bucket_size = bucket_size;
f->key_offset = p->key_offset;
f->f_hash = p->f_hash;
f->seed = p->seed;
f->n_buckets_ext = n_buckets_ext;
f->stack_pos = n_buckets_ext;
f->stack = (uint32_t *)
- &f->memory[(n_buckets + n_buckets_ext) * f->bucket_size];
+ &f->memory[(p->n_buckets + n_buckets_ext) * f->bucket_size];
if (p->key_mask != NULL)
f->key_mask = ((uint64_t *)p->key_mask)[0];
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
f->entry_size];
bucket->signature |= mask;
- bucket->key[i] = *((uint64_t *) key);
+ keycpy(&bucket->key[i], key, &f->key_mask);
memcpy(bucket_data, entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) bucket_data;
bucket_prev->next_valid = 1;
bucket->signature = 1;
- bucket->key[0] = *((uint64_t *) key);
+ keycpy(&bucket->key[0], key, &f->key_mask);
memcpy(&bucket->data[0], entry, f->entry_size);
*key_found = 0;
*entry_ptr = (void *) &bucket->data[0];
uint64_t signature;
uint32_t bucket_index, i;
- signature = f->f_hash(key, f->key_size, f->seed);
+ signature = f->f_hash(key, &f->key_mask, f->key_size, f->seed);
bucket_index = signature & (f->n_buckets - 1);
bucket0 = (struct rte_bucket_4_8 *)
&f->memory[bucket_index * f->bucket_size];
for (i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) {
uint64_t bucket_signature = bucket->signature;
- uint64_t bucket_key = bucket->key[i];
+ uint64_t *bucket_key = &bucket->key[i];
if ((bucket_signature & mask) &&
- (*((uint64_t *) key) == bucket_key)) {
+ (keycmp(bucket_key, key, &f->key_mask) == 0)) {
uint8_t *bucket_data = &bucket->data[i *
f->entry_size];
return 0;
}
-#define lookup_key8_cmp(key_in, bucket, pos) \
+#define lookup_key8_cmp(key_in, bucket, pos, f) \
{ \
- uint64_t xor[4], signature; \
+ uint64_t xor[4], signature, k; \
\
signature = ~bucket->signature; \
\
- xor[0] = (key_in[0] ^ bucket->key[0]) | (signature & 1);\
- xor[1] = (key_in[0] ^ bucket->key[1]) | (signature & 2);\
- xor[2] = (key_in[0] ^ bucket->key[2]) | (signature & 4);\
- xor[3] = (key_in[0] ^ bucket->key[3]) | (signature & 8);\
+ k = key_in[0] & f->key_mask; \
+ xor[0] = (k ^ bucket->key[0]) | (signature & 1); \
+ xor[1] = (k ^ bucket->key[1]) | (signature & 2); \
+ xor[2] = (k ^ bucket->key[2]) | (signature & 4); \
+ xor[3] = (k ^ bucket->key[3]) | (signature & 8); \
\
pos = 4; \
if (xor[0] == 0) \
uint64_t *key; \
uint64_t signature; \
uint32_t bucket_index; \
- uint64_t hash_key_buffer; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf1, f->key_offset);\
- hash_key_buffer = *key & f->key_mask; \
- signature = f->f_hash(&hash_key_buffer, \
- RTE_TABLE_HASH_KEY_SIZE, f->seed); \
+ signature = f->f_hash(key, &f->key_mask, KEY_SIZE, f->seed); \
bucket_index = signature & (f->n_buckets - 1); \
bucket1 = (struct rte_bucket_4_8 *) \
&f->memory[bucket_index * f->bucket_size]; \
uint64_t pkt_mask; \
uint64_t *key; \
uint32_t pos; \
- uint64_t hash_key_buffer; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- hash_key_buffer = key[0] & f->key_mask; \
- \
- lookup_key8_cmp((&hash_key_buffer), bucket2, pos); \
+ lookup_key8_cmp(key, bucket2, pos, f); \
\
pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
uint64_t pkt_mask, bucket_mask; \
uint64_t *key; \
uint32_t pos; \
- uint64_t hash_key_buffer; \
\
key = RTE_MBUF_METADATA_UINT64_PTR(mbuf2, f->key_offset);\
- hash_key_buffer = *key & f->key_mask; \
- \
- lookup_key8_cmp((&hash_key_buffer), bucket2, pos); \
+ lookup_key8_cmp(key, bucket2, pos, f); \
\
pkt_mask = ((bucket2->signature >> pos) & 1LLU) << pkt2_index;\
pkts_mask_out |= pkt_mask; \
uint64_t pkt_mask, bucket_mask; \
uint64_t *key; \
uint32_t pos; \
- uint64_t hash_key_buffer; \
\
bucket = buckets[pkt_index]; \
key = keys[pkt_index]; \
- hash_key_buffer = (*key) & f->key_mask; \
- \
- lookup_key8_cmp((&hash_key_buffer), bucket, pos); \
+ lookup_key8_cmp(key, bucket, pos, f); \
\
pkt_mask = ((bucket->signature >> pos) & 1LLU) << pkt_index;\
pkts_mask_out |= pkt_mask; \
#define lookup2_stage1(mbuf10, mbuf11, bucket10, bucket11, f)\
{ \
uint64_t *key10, *key11; \
- uint64_t hash_offset_buffer10; \
- uint64_t hash_offset_buffer11; \
uint64_t signature10, signature11; \
uint32_t bucket10_index, bucket11_index; \
- rte_table_hash_op_hash_nomask f_hash = f->f_hash; \
+ rte_table_hash_op_hash f_hash = f->f_hash; \
uint64_t seed = f->seed; \
uint32_t key_offset = f->key_offset; \
\
key10 = RTE_MBUF_METADATA_UINT64_PTR(mbuf10, key_offset);\
key11 = RTE_MBUF_METADATA_UINT64_PTR(mbuf11, key_offset);\
- hash_offset_buffer10 = *key10 & f->key_mask; \
- hash_offset_buffer11 = *key11 & f->key_mask; \
\
- signature10 = f_hash(&hash_offset_buffer10, \
- RTE_TABLE_HASH_KEY_SIZE, seed); \
+ signature10 = f_hash(key10, &f->key_mask, KEY_SIZE, seed); \
bucket10_index = signature10 & (f->n_buckets - 1); \
bucket10 = (struct rte_bucket_4_8 *) \
&f->memory[bucket10_index * f->bucket_size]; \
rte_prefetch0(bucket10); \
\
- signature11 = f_hash(&hash_offset_buffer11, \
- RTE_TABLE_HASH_KEY_SIZE, seed); \
+ signature11 = f_hash(key11, &f->key_mask, KEY_SIZE, seed); \
bucket11_index = signature11 & (f->n_buckets - 1); \
bucket11 = (struct rte_bucket_4_8 *) \
&f->memory[bucket11_index * f->bucket_size]; \
void *a20, *a21; \
uint64_t pkt20_mask, pkt21_mask; \
uint64_t *key20, *key21; \
- uint64_t hash_offset_buffer20; \
- uint64_t hash_offset_buffer21; \
uint32_t pos20, pos21; \
\
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- hash_offset_buffer20 = *key20 & f->key_mask; \
- hash_offset_buffer21 = *key21 & f->key_mask; \
\
- lookup_key8_cmp((&hash_offset_buffer20), bucket20, pos20);\
- lookup_key8_cmp((&hash_offset_buffer21), bucket21, pos21);\
+ lookup_key8_cmp(key20, bucket20, pos20, f); \
+ lookup_key8_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
void *a20, *a21; \
uint64_t pkt20_mask, pkt21_mask, bucket20_mask, bucket21_mask;\
uint64_t *key20, *key21; \
- uint64_t hash_offset_buffer20; \
- uint64_t hash_offset_buffer21; \
uint32_t pos20, pos21; \
\
key20 = RTE_MBUF_METADATA_UINT64_PTR(mbuf20, f->key_offset);\
key21 = RTE_MBUF_METADATA_UINT64_PTR(mbuf21, f->key_offset);\
- hash_offset_buffer20 = *key20 & f->key_mask; \
- hash_offset_buffer21 = *key21 & f->key_mask; \
\
- lookup_key8_cmp((&hash_offset_buffer20), bucket20, pos20);\
- lookup_key8_cmp((&hash_offset_buffer21), bucket21, pos21);\
+ lookup_key8_cmp(key20, bucket20, pos20, f); \
+ lookup_key8_cmp(key21, bucket21, pos21, f); \
\
pkt20_mask = ((bucket20->signature >> pos20) & 1LLU) << pkt20_index;\
pkt21_mask = ((bucket21->signature >> pos21) & 1LLU) << pkt21_index;\
*lookup_hit_mask = pkts_mask_out;
RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return 0;
-} /* rte_table_hash_lookup_key8_lru() */
+} /* lookup LRU */
static int
rte_table_hash_lookup_key8_ext(
*lookup_hit_mask = pkts_mask_out;
RTE_TABLE_HASH_KEY8_STATS_PKTS_LOOKUP_MISS(f, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return 0;
-} /* rte_table_hash_lookup_key8_ext() */
+} /* lookup EXT */
static int
rte_table_hash_key8_stats_read(void *table, struct rte_table_stats *stats, int clear)