1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Arm Limited
8 #include <rte_cycles.h>
10 #include <rte_hash_crc.h>
11 #include <rte_jhash.h>
12 #include <rte_launch.h>
13 #include <rte_malloc.h>
14 #include <rte_random.h>
15 #include <rte_spinlock.h>
19 #ifndef RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF
20 #define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF 0
23 #define BULK_LOOKUP_SIZE 32
25 #define RUN_WITH_HTM_DISABLED 0
27 #if (RUN_WITH_HTM_DISABLED)
29 #define TOTAL_ENTRY (5*1024)
30 #define TOTAL_INSERT (5*1024)
34 #define TOTAL_ENTRY (4*1024*1024)
35 #define TOTAL_INSERT (4*1024*1024)
40 #define READ_PASS_NO_KEY_SHIFTS 2
41 #define READ_PASS_SHIFT_PATH 4
42 #define READ_PASS_NON_SHIFT_PATH 8
43 #define BULK_LOOKUP 16
44 #define READ_PASS_KEY_SHIFTS_EXTBKT 32
46 #define WRITE_NO_KEY_SHIFT 0
47 #define WRITE_KEY_SHIFT 1
48 #define WRITE_EXT_BKT 2
51 unsigned int rwc_core_cnt[NUM_TEST] = {1, 2, 4};
54 uint32_t w_no_ks_r_hit[2][NUM_TEST];
55 uint32_t w_no_ks_r_miss[2][NUM_TEST];
56 uint32_t w_ks_r_hit_nsp[2][NUM_TEST];
57 uint32_t w_ks_r_hit_sp[2][NUM_TEST];
58 uint32_t w_ks_r_miss[2][NUM_TEST];
59 uint32_t multi_rw[NUM_TEST - 1][2][NUM_TEST];
60 uint32_t w_ks_r_hit_extbkt[2][NUM_TEST];
63 static struct rwc_perf rwc_lf_results, rwc_non_lf_results;
69 uint32_t *keys_absent;
70 uint32_t *keys_shift_path;
71 uint32_t *keys_non_shift_path;
72 uint32_t *keys_ext_bkt;
73 uint32_t *keys_ks_extbkt;
74 uint32_t count_keys_no_ks;
75 uint32_t count_keys_ks;
76 uint32_t count_keys_absent;
77 uint32_t count_keys_shift_path;
78 uint32_t count_keys_non_shift_path;
79 uint32_t count_keys_extbkt;
80 uint32_t count_keys_ks_extbkt;
81 uint32_t single_insert;
85 static rte_atomic64_t gread_cycles;
86 static rte_atomic64_t greads;
88 static volatile uint8_t writer_done;
90 uint16_t enabled_core_ids[RTE_MAX_LCORE];
92 uint8_t *scanned_bkts;
94 static inline uint16_t
95 get_short_sig(const hash_sig_t hash)
100 static inline uint32_t
101 get_prim_bucket_index(__attribute__((unused)) const struct rte_hash *h,
102 const hash_sig_t hash)
104 uint32_t num_buckets;
105 uint32_t bucket_bitmask;
106 num_buckets = rte_align32pow2(TOTAL_ENTRY) / 8;
107 bucket_bitmask = num_buckets - 1;
108 return hash & bucket_bitmask;
111 static inline uint32_t
112 get_alt_bucket_index(__attribute__((unused)) const struct rte_hash *h,
113 uint32_t cur_bkt_idx, uint16_t sig)
115 uint32_t num_buckets;
116 uint32_t bucket_bitmask;
117 num_buckets = rte_align32pow2(TOTAL_ENTRY) / 8;
118 bucket_bitmask = num_buckets - 1;
119 return (cur_bkt_idx ^ sig) & bucket_bitmask;
124 get_enabled_cores_list(void)
128 uint32_t max_cores = rte_lcore_count();
129 RTE_LCORE_FOREACH(core_id) {
130 enabled_core_ids[i] = core_id;
134 if (i != max_cores) {
135 printf("Number of enabled cores in list is different from "
136 "number given by rte_lcore_count()\n");
143 check_bucket(uint32_t bkt_idx, uint32_t key)
149 const void *next_key;
152 /* Temporary bucket to hold the keys */
153 uint32_t keys_in_bkt[8];
157 while (rte_hash_iterate(tbl_rwc_test_param.h,
158 &next_key, &next_data, &iter) >= 0) {
160 /* Check for duplicate entries */
161 if (*(const uint32_t *)next_key == key)
164 /* Identify if there is any free entry in the bucket */
165 diff = iter - prev_iter;
170 keys_in_bkt[count] = *(const uint32_t *)next_key;
173 /* All entries in the bucket are occupied */
177 * Check if bucket was not scanned before, to avoid
180 if (scanned_bkts[bkt_idx] == 0) {
182 * Since this bucket (pointed to by bkt_idx) is
183 * full, it is likely that key(s) in this
184 * bucket will be on the shift path, when
185 * collision occurs. Thus, add it to
188 memcpy(tbl_rwc_test_param.keys_shift_path +
189 tbl_rwc_test_param.count_keys_shift_path
191 tbl_rwc_test_param.count_keys_shift_path += 8;
192 scanned_bkts[bkt_idx] = 1;
203 uint32_t *keys = NULL;
204 uint32_t *keys_no_ks = NULL;
205 uint32_t *keys_ks = NULL;
206 uint32_t *keys_absent = NULL;
207 uint32_t *keys_non_shift_path = NULL;
208 uint32_t *keys_ext_bkt = NULL;
209 uint32_t *keys_ks_extbkt = NULL;
210 uint32_t *found = NULL;
211 uint32_t count_keys_no_ks = 0;
212 uint32_t count_keys_ks = 0;
213 uint32_t count_keys_extbkt = 0;
217 * keys will consist of a) keys whose addition to the hash table
218 * will result in shifting of the existing keys to their alternate
219 * locations b) keys whose addition to the hash table will not result
220 * in shifting of the existing keys.
222 keys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
224 printf("RTE_MALLOC failed\n");
229 * keys_no_ks (no key-shifts): Subset of 'keys' - consists of keys that
230 * will NOT result in shifting of the existing keys to their alternate
231 * locations. Roughly around 900K keys.
233 keys_no_ks = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
234 if (keys_no_ks == NULL) {
235 printf("RTE_MALLOC failed\n");
240 * keys_ks (key-shifts): Subset of 'keys' - consists of keys that will
241 * result in shifting of the existing keys to their alternate locations.
242 * Roughly around 146K keys. There might be repeating keys. More code is
243 * required to filter out these keys which will complicate the test case
245 keys_ks = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
246 if (keys_ks == NULL) {
247 printf("RTE_MALLOC failed\n");
251 /* Used to identify keys not inserted in the hash table */
252 found = rte_zmalloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
254 printf("RTE_MALLOC failed\n");
259 * This consist of keys not inserted to the hash table.
260 * Used to test perf of lookup on keys that do not exist in the table.
262 keys_absent = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
263 if (keys_absent == NULL) {
264 printf("RTE_MALLOC failed\n");
269 * This consist of keys which are likely to be on the shift
270 * path (i.e. being moved to alternate location), when collision occurs
271 * on addition of a key to an already full primary bucket.
272 * Used to test perf of lookup on keys that are on the shift path.
274 tbl_rwc_test_param.keys_shift_path = rte_malloc(NULL, sizeof(uint32_t) *
276 if (tbl_rwc_test_param.keys_shift_path == NULL) {
277 printf("RTE_MALLOC failed\n");
282 * This consist of keys which are never on the shift
283 * path (i.e. being moved to alternate location), when collision occurs
284 * on addition of a key to an already full primary bucket.
285 * Used to test perf of lookup on keys that are not on the shift path.
287 keys_non_shift_path = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT,
289 if (keys_non_shift_path == NULL) {
290 printf("RTE_MALLOC failed\n");
295 * This consist of keys which will be stored in extended buckets
297 keys_ext_bkt = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
298 if (keys_ext_bkt == NULL) {
299 printf("RTE_MALLOC failed\n");
304 * This consist of keys which when deleted causes shifting of keys
305 * in extended buckets to respective secondary buckets
307 keys_ks_extbkt = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_INSERT, 0);
308 if (keys_ks_extbkt == NULL) {
309 printf("RTE_MALLOC failed\n");
314 uint32_t prim_bucket_idx;
315 uint32_t sec_bucket_idx;
317 uint32_t num_buckets;
318 num_buckets = rte_align32pow2(TOTAL_ENTRY) / 8;
322 * Used to mark bkts in which at least one key was shifted to its
325 scanned_bkts = rte_malloc(NULL, sizeof(uint8_t) * num_buckets, 0);
326 if (scanned_bkts == NULL) {
327 printf("RTE_MALLOC failed\n");
331 tbl_rwc_test_param.keys = keys;
332 tbl_rwc_test_param.keys_no_ks = keys_no_ks;
333 tbl_rwc_test_param.keys_ks = keys_ks;
334 tbl_rwc_test_param.keys_absent = keys_absent;
335 tbl_rwc_test_param.keys_non_shift_path = keys_non_shift_path;
336 tbl_rwc_test_param.keys_ext_bkt = keys_ext_bkt;
337 tbl_rwc_test_param.keys_ks_extbkt = keys_ks_extbkt;
338 /* Generate keys by adding previous two keys, neglect overflow */
339 printf("Generating keys...\n");
342 for (i = 2; i < TOTAL_INSERT; i++)
343 keys[i] = keys[i-1] + keys[i-2];
345 /* Segregate keys into keys_no_ks and keys_ks */
346 for (i = 0; i < TOTAL_INSERT; i++) {
347 /* Check if primary bucket has space.*/
348 sig = rte_hash_hash(tbl_rwc_test_param.h,
349 tbl_rwc_test_param.keys+i);
350 prim_bucket_idx = get_prim_bucket_index(tbl_rwc_test_param.h,
352 ret = check_bucket(prim_bucket_idx, keys[i]);
355 * Primary bucket is full, this key will result in
356 * shifting of the keys to their alternate locations.
358 keys_ks[count_keys_ks] = keys[i];
360 } else if (ret == 0) {
362 * Primary bucket has space, this key will not result in
363 * shifting of the keys. Hence, add key to the table.
365 ret = rte_hash_add_key_data(tbl_rwc_test_param.h,
367 (void *)((uintptr_t)i));
369 printf("writer failed %"PRIu32"\n", i);
372 keys_no_ks[count_keys_no_ks] = keys[i];
377 for (i = 0; i < count_keys_no_ks; i++) {
379 * Identify keys in keys_no_ks with value less than
380 * 4M (HTM enabled) OR 5K (HTM disabled)
382 if (keys_no_ks[i] < TOTAL_INSERT)
383 found[keys_no_ks[i]]++;
386 for (i = 0; i < count_keys_ks; i++) {
388 * Identify keys in keys_ks with value less than
389 * 4M (HTM enabled) OR 5K (HTM disabled)
391 if (keys_ks[i] < TOTAL_INSERT)
395 uint32_t count_keys_absent = 0;
396 for (i = 0; i < TOTAL_INSERT; i++) {
398 * Identify missing keys between 0 and
399 * 4M (HTM enabled) OR 5K (HTM disabled)
402 keys_absent[count_keys_absent++] = i;
405 /* Find keys that will not be on the shift path */
407 const void *next_key;
410 for (i = 0; i < num_buckets; i++) {
411 /* Check bucket for no keys shifted to alternate locations */
412 if (scanned_bkts[i] == 0) {
414 while (rte_hash_iterate(tbl_rwc_test_param.h,
415 &next_key, &next_data, &iter) >= 0) {
417 /* Check if key belongs to the current bucket */
419 keys_non_shift_path[count++]
420 = *(const uint32_t *)next_key;
427 tbl_rwc_test_param.count_keys_no_ks = count_keys_no_ks;
428 tbl_rwc_test_param.count_keys_ks = count_keys_ks;
429 tbl_rwc_test_param.count_keys_absent = count_keys_absent;
430 tbl_rwc_test_param.count_keys_non_shift_path = count;
432 memset(scanned_bkts, 0, num_buckets);
434 /* Find keys that will be in extended buckets */
435 for (i = 0; i < count_keys_ks; i++) {
436 ret = rte_hash_add_key(tbl_rwc_test_param.h, keys_ks + i);
438 /* Key will be added to ext bkt */
439 keys_ext_bkt[count_keys_extbkt++] = keys_ks[i];
440 /* Sec bkt to be added to keys_ks_extbkt */
441 sig = rte_hash_hash(tbl_rwc_test_param.h,
442 tbl_rwc_test_param.keys_ks + i);
443 prim_bucket_idx = get_prim_bucket_index(
444 tbl_rwc_test_param.h, sig);
445 short_sig = get_short_sig(sig);
446 sec_bucket_idx = get_alt_bucket_index(
447 tbl_rwc_test_param.h,
448 prim_bucket_idx, short_sig);
449 if (scanned_bkts[sec_bucket_idx] == 0)
450 scanned_bkts[sec_bucket_idx] = 1;
454 /* Find keys that will shift keys in ext bucket*/
455 for (i = 0; i < num_buckets; i++) {
456 if (scanned_bkts[i] == 1) {
458 while (rte_hash_iterate(tbl_rwc_test_param.h,
459 &next_key, &next_data, &iter) >= 0) {
460 /* Check if key belongs to the current bucket */
462 keys_ks_extbkt[count++]
463 = *(const uint32_t *)next_key;
470 tbl_rwc_test_param.count_keys_ks_extbkt = count;
471 tbl_rwc_test_param.count_keys_extbkt = count_keys_extbkt;
473 printf("\nCount of keys NOT causing shifting of existing keys to "
474 "alternate location: %d\n", tbl_rwc_test_param.count_keys_no_ks);
475 printf("\nCount of keys causing shifting of existing keys to alternate "
476 "locations: %d\n\n", tbl_rwc_test_param.count_keys_ks);
477 printf("Count of absent keys that will never be added to the hash "
478 "table: %d\n\n", tbl_rwc_test_param.count_keys_absent);
479 printf("Count of keys likely to be on the shift path: %d\n\n",
480 tbl_rwc_test_param.count_keys_shift_path);
481 printf("Count of keys not likely to be on the shift path: %d\n\n",
482 tbl_rwc_test_param.count_keys_non_shift_path);
483 printf("Count of keys in extended buckets: %d\n\n",
484 tbl_rwc_test_param.count_keys_extbkt);
485 printf("Count of keys shifting keys in ext buckets: %d\n\n",
486 tbl_rwc_test_param.count_keys_ks_extbkt);
489 rte_hash_free(tbl_rwc_test_param.h);
494 rte_free(keys_no_ks);
496 rte_free(keys_absent);
498 rte_free(tbl_rwc_test_param.keys_shift_path);
499 rte_free(keys_ext_bkt);
500 rte_free(keys_ks_extbkt);
501 rte_free(scanned_bkts);
506 init_params(int rwc_lf, int use_jhash, int htm, int ext_bkt)
508 struct rte_hash *handle;
510 struct rte_hash_parameters hash_params = {
511 .entries = TOTAL_ENTRY,
512 .key_len = sizeof(uint32_t),
513 .hash_func_init_val = 0,
514 .socket_id = rte_socket_id(),
518 hash_params.hash_func = rte_jhash;
520 hash_params.hash_func = rte_hash_crc;
523 hash_params.extra_flag =
524 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF |
525 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD;
527 hash_params.extra_flag =
528 RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT |
529 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY |
530 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD;
532 hash_params.extra_flag =
533 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY |
534 RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD;
537 hash_params.extra_flag |= RTE_HASH_EXTRA_FLAGS_EXT_TABLE;
539 hash_params.name = "tests";
541 handle = rte_hash_create(&hash_params);
542 if (handle == NULL) {
543 printf("hash creation failed");
547 tbl_rwc_test_param.h = handle;
552 test_rwc_reader(__attribute__((unused)) void *arg)
556 uint64_t begin, cycles;
557 uint32_t loop_cnt = 0;
558 uint8_t read_type = (uint8_t)((uintptr_t)arg);
563 void *temp_a[BULK_LOOKUP_SIZE];
565 /* Used to identify keys not inserted in the hash table */
566 pos = rte_malloc(NULL, sizeof(uint32_t) * BULK_LOOKUP_SIZE, 0);
568 printf("RTE_MALLOC failed\n");
572 if (read_type & READ_FAIL) {
573 keys = tbl_rwc_test_param.keys_absent;
574 read_cnt = tbl_rwc_test_param.count_keys_absent;
575 } else if (read_type & READ_PASS_NO_KEY_SHIFTS) {
576 keys = tbl_rwc_test_param.keys_no_ks;
577 read_cnt = tbl_rwc_test_param.count_keys_no_ks;
578 } else if (read_type & READ_PASS_SHIFT_PATH) {
579 keys = tbl_rwc_test_param.keys_shift_path;
580 read_cnt = tbl_rwc_test_param.count_keys_shift_path;
581 } else if (read_type & READ_PASS_KEY_SHIFTS_EXTBKT) {
582 keys = tbl_rwc_test_param.keys_ext_bkt;
583 read_cnt = tbl_rwc_test_param.count_keys_extbkt;
585 keys = tbl_rwc_test_param.keys_non_shift_path;
586 read_cnt = tbl_rwc_test_param.count_keys_non_shift_path;
589 extra_keys = read_cnt & (BULK_LOOKUP_SIZE - 1);
591 begin = rte_rdtsc_precise();
593 if (read_type & BULK_LOOKUP) {
594 for (i = 0; i < (read_cnt - extra_keys);
595 i += BULK_LOOKUP_SIZE) {
596 /* Array of pointer to the list of keys */
597 for (j = 0; j < BULK_LOOKUP_SIZE; j++)
598 temp_a[j] = keys + i + j;
599 rte_hash_lookup_bulk(tbl_rwc_test_param.h,
602 BULK_LOOKUP_SIZE, pos);
603 /* Validate lookup result */
604 for (j = 0; j < BULK_LOOKUP_SIZE; j++)
605 if ((read_type & READ_FAIL &&
606 pos[j] != -ENOENT) ||
607 (!(read_type & READ_FAIL) &&
608 pos[j] == -ENOENT)) {
609 printf("lookup failed!"
615 for (j = 0; j < extra_keys; j++)
616 temp_a[j] = keys + i + j;
618 rte_hash_lookup_bulk(tbl_rwc_test_param.h,
622 for (j = 0; j < extra_keys; j++)
623 if ((read_type & READ_FAIL &&
624 pos[j] != -ENOENT) ||
625 (!(read_type & READ_FAIL) &&
626 pos[j] == -ENOENT)) {
627 printf("lookup failed! %"PRIu32"\n",
632 for (i = 0; i < read_cnt; i++) {
633 ret = rte_hash_lookup
634 (tbl_rwc_test_param.h, keys + i);
635 if (((read_type & READ_FAIL) &&
637 (!(read_type & READ_FAIL) &&
639 printf("lookup failed! %"PRIu32"\n",
646 } while (!writer_done);
648 cycles = rte_rdtsc_precise() - begin;
649 rte_atomic64_add(&gread_cycles, cycles);
650 rte_atomic64_add(&greads, read_cnt*loop_cnt);
655 write_keys(uint8_t write_type)
659 uint32_t key_cnt = 0;
661 if (write_type == WRITE_KEY_SHIFT) {
662 key_cnt = tbl_rwc_test_param.count_keys_ks;
663 keys = tbl_rwc_test_param.keys_ks;
664 } else if (write_type == WRITE_NO_KEY_SHIFT) {
665 key_cnt = tbl_rwc_test_param.count_keys_no_ks;
666 keys = tbl_rwc_test_param.keys_no_ks;
667 } else if (write_type == WRITE_EXT_BKT) {
668 key_cnt = tbl_rwc_test_param.count_keys_extbkt;
669 keys = tbl_rwc_test_param.keys_ext_bkt;
671 for (i = 0; i < key_cnt; i++) {
672 ret = rte_hash_add_key(tbl_rwc_test_param.h, keys + i);
673 if ((write_type == WRITE_NO_KEY_SHIFT) && ret < 0) {
674 printf("writer failed %"PRIu32"\n", i);
682 test_rwc_multi_writer(__attribute__((unused)) void *arg)
685 uint32_t pos_core = (uint32_t)((uintptr_t)arg);
686 offset = pos_core * tbl_rwc_test_param.single_insert;
687 for (i = offset; i < offset + tbl_rwc_test_param.single_insert; i++)
688 rte_hash_add_key(tbl_rwc_test_param.h,
689 tbl_rwc_test_param.keys_ks + i);
695 * Reader(s) lookup keys present in the table.
698 test_hash_add_no_ks_lookup_hit(struct rwc_perf *rwc_perf_results, int rwc_lf,
699 int htm, int ext_bkt)
704 uint8_t write_type = WRITE_NO_KEY_SHIFT;
705 uint8_t read_type = READ_PASS_NO_KEY_SHIFTS;
707 rte_atomic64_init(&greads);
708 rte_atomic64_init(&gread_cycles);
710 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
712 printf("\nTest: Hash add - no key-shifts, read - hit\n");
713 for (m = 0; m < 2; m++) {
715 printf("\n** With bulk-lookup **\n");
716 read_type |= BULK_LOOKUP;
718 for (n = 0; n < NUM_TEST; n++) {
719 unsigned int tot_lcore = rte_lcore_count();
720 if (tot_lcore < rwc_core_cnt[n] + 1)
723 printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
725 rte_atomic64_clear(&greads);
726 rte_atomic64_clear(&gread_cycles);
728 rte_hash_reset(tbl_rwc_test_param.h);
730 if (write_keys(write_type) < 0)
733 for (i = 1; i <= rwc_core_cnt[n]; i++)
734 rte_eal_remote_launch(test_rwc_reader,
735 (void *)(uintptr_t)read_type,
736 enabled_core_ids[i]);
738 for (i = 1; i <= rwc_core_cnt[n]; i++)
739 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
742 unsigned long long cycles_per_lookup =
743 rte_atomic64_read(&gread_cycles) /
744 rte_atomic64_read(&greads);
745 rwc_perf_results->w_no_ks_r_hit[m][n]
747 printf("Cycles per lookup: %llu\n", cycles_per_lookup);
752 rte_hash_free(tbl_rwc_test_param.h);
756 rte_eal_mp_wait_lcore();
757 rte_hash_free(tbl_rwc_test_param.h);
763 * Reader(s) lookup keys absent in the table while
764 * 'Main' thread adds with no key-shifts.
767 test_hash_add_no_ks_lookup_miss(struct rwc_perf *rwc_perf_results, int rwc_lf,
768 int htm, int ext_bkt)
773 uint8_t write_type = WRITE_NO_KEY_SHIFT;
774 uint8_t read_type = READ_FAIL;
777 rte_atomic64_init(&greads);
778 rte_atomic64_init(&gread_cycles);
780 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
782 printf("\nTest: Hash add - no key-shifts, Hash lookup - miss\n");
783 for (m = 0; m < 2; m++) {
785 printf("\n** With bulk-lookup **\n");
786 read_type |= BULK_LOOKUP;
788 for (n = 0; n < NUM_TEST; n++) {
789 unsigned int tot_lcore = rte_lcore_count();
790 if (tot_lcore < rwc_core_cnt[n] + 1)
793 printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
795 rte_atomic64_clear(&greads);
796 rte_atomic64_clear(&gread_cycles);
798 rte_hash_reset(tbl_rwc_test_param.h);
801 for (i = 1; i <= rwc_core_cnt[n]; i++)
802 rte_eal_remote_launch(test_rwc_reader,
803 (void *)(uintptr_t)read_type,
804 enabled_core_ids[i]);
805 ret = write_keys(write_type);
810 for (i = 1; i <= rwc_core_cnt[n]; i++)
811 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
814 unsigned long long cycles_per_lookup =
815 rte_atomic64_read(&gread_cycles) /
816 rte_atomic64_read(&greads);
817 rwc_perf_results->w_no_ks_r_miss[m][n]
819 printf("Cycles per lookup: %llu\n", cycles_per_lookup);
824 rte_hash_free(tbl_rwc_test_param.h);
828 rte_eal_mp_wait_lcore();
829 rte_hash_free(tbl_rwc_test_param.h);
835 * Reader(s) lookup keys present in the table and not likely to be on the
836 * shift path while 'Main' thread adds keys causing key-shifts.
839 test_hash_add_ks_lookup_hit_non_sp(struct rwc_perf *rwc_perf_results,
840 int rwc_lf, int htm, int ext_bkt)
847 uint8_t read_type = READ_PASS_NON_SHIFT_PATH;
849 rte_atomic64_init(&greads);
850 rte_atomic64_init(&gread_cycles);
852 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
854 printf("\nTest: Hash add - key shift, Hash lookup - hit"
855 " (non-shift-path)\n");
856 for (m = 0; m < 2; m++) {
858 printf("\n** With bulk-lookup **\n");
859 read_type |= BULK_LOOKUP;
861 for (n = 0; n < NUM_TEST; n++) {
862 unsigned int tot_lcore = rte_lcore_count();
863 if (tot_lcore < rwc_core_cnt[n] + 1)
866 printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
868 rte_atomic64_clear(&greads);
869 rte_atomic64_clear(&gread_cycles);
871 rte_hash_reset(tbl_rwc_test_param.h);
873 write_type = WRITE_NO_KEY_SHIFT;
874 if (write_keys(write_type) < 0)
876 for (i = 1; i <= rwc_core_cnt[n]; i++)
877 rte_eal_remote_launch(test_rwc_reader,
878 (void *)(uintptr_t)read_type,
879 enabled_core_ids[i]);
880 write_type = WRITE_KEY_SHIFT;
881 ret = write_keys(write_type);
886 for (i = 1; i <= rwc_core_cnt[n]; i++)
887 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
890 unsigned long long cycles_per_lookup =
891 rte_atomic64_read(&gread_cycles) /
892 rte_atomic64_read(&greads);
893 rwc_perf_results->w_ks_r_hit_nsp[m][n]
895 printf("Cycles per lookup: %llu\n", cycles_per_lookup);
900 rte_hash_free(tbl_rwc_test_param.h);
904 rte_eal_mp_wait_lcore();
905 rte_hash_free(tbl_rwc_test_param.h);
911 * Reader(s) lookup keys present in the table and likely on the shift-path while
912 * 'Main' thread adds keys causing key-shifts.
915 test_hash_add_ks_lookup_hit_sp(struct rwc_perf *rwc_perf_results, int rwc_lf,
916 int htm, int ext_bkt)
923 uint8_t read_type = READ_PASS_SHIFT_PATH;
925 rte_atomic64_init(&greads);
926 rte_atomic64_init(&gread_cycles);
928 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
930 printf("\nTest: Hash add - key shift, Hash lookup - hit (shift-path)"
933 for (m = 0; m < 2; m++) {
935 printf("\n** With bulk-lookup **\n");
936 read_type |= BULK_LOOKUP;
938 for (n = 0; n < NUM_TEST; n++) {
939 unsigned int tot_lcore = rte_lcore_count();
940 if (tot_lcore < rwc_core_cnt[n] + 1)
943 printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
944 rte_atomic64_clear(&greads);
945 rte_atomic64_clear(&gread_cycles);
947 rte_hash_reset(tbl_rwc_test_param.h);
949 write_type = WRITE_NO_KEY_SHIFT;
950 if (write_keys(write_type) < 0)
952 for (i = 1; i <= rwc_core_cnt[n]; i++)
953 rte_eal_remote_launch(test_rwc_reader,
954 (void *)(uintptr_t)read_type,
955 enabled_core_ids[i]);
956 write_type = WRITE_KEY_SHIFT;
957 ret = write_keys(write_type);
962 for (i = 1; i <= rwc_core_cnt[n]; i++)
963 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
966 unsigned long long cycles_per_lookup =
967 rte_atomic64_read(&gread_cycles) /
968 rte_atomic64_read(&greads);
969 rwc_perf_results->w_ks_r_hit_sp[m][n]
971 printf("Cycles per lookup: %llu\n", cycles_per_lookup);
976 rte_hash_free(tbl_rwc_test_param.h);
980 rte_eal_mp_wait_lcore();
981 rte_hash_free(tbl_rwc_test_param.h);
987 * Reader(s) lookup keys absent in the table while
988 * 'Main' thread adds keys causing key-shifts.
991 test_hash_add_ks_lookup_miss(struct rwc_perf *rwc_perf_results, int rwc_lf, int
999 uint8_t read_type = READ_FAIL;
1001 rte_atomic64_init(&greads);
1002 rte_atomic64_init(&gread_cycles);
1004 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
1006 printf("\nTest: Hash add - key shift, Hash lookup - miss\n");
1007 for (m = 0; m < 2; m++) {
1009 printf("\n** With bulk-lookup **\n");
1010 read_type |= BULK_LOOKUP;
1012 for (n = 0; n < NUM_TEST; n++) {
1013 unsigned int tot_lcore = rte_lcore_count();
1014 if (tot_lcore < rwc_core_cnt[n] + 1)
1017 printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
1019 rte_atomic64_clear(&greads);
1020 rte_atomic64_clear(&gread_cycles);
1022 rte_hash_reset(tbl_rwc_test_param.h);
1024 write_type = WRITE_NO_KEY_SHIFT;
1025 if (write_keys(write_type) < 0)
1027 for (i = 1; i <= rwc_core_cnt[n]; i++)
1028 rte_eal_remote_launch(test_rwc_reader,
1029 (void *)(uintptr_t)read_type,
1030 enabled_core_ids[i]);
1031 write_type = WRITE_KEY_SHIFT;
1032 ret = write_keys(write_type);
1037 for (i = 1; i <= rwc_core_cnt[n]; i++)
1038 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
1041 unsigned long long cycles_per_lookup =
1042 rte_atomic64_read(&gread_cycles) /
1043 rte_atomic64_read(&greads);
1044 rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
1045 printf("Cycles per lookup: %llu\n", cycles_per_lookup);
1050 rte_hash_free(tbl_rwc_test_param.h);
1054 rte_eal_mp_wait_lcore();
1055 rte_hash_free(tbl_rwc_test_param.h);
1060 * Test lookup perf for multi-writer:
1061 * Reader(s) lookup keys present in the table and likely on the shift-path while
1062 * Writers add keys causing key-shiftsi.
1063 * Writers are running in parallel, on different data plane cores.
1066 test_hash_multi_add_lookup(struct rwc_perf *rwc_perf_results, int rwc_lf,
1067 int htm, int ext_bkt)
1069 unsigned int n, m, k;
1073 uint8_t read_type = READ_PASS_SHIFT_PATH;
1075 rte_atomic64_init(&greads);
1076 rte_atomic64_init(&gread_cycles);
1078 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
1080 printf("\nTest: Multi-add-lookup\n");
1082 for (m = 1; m < NUM_TEST; m++) {
1083 /* Calculate keys added by each writer */
1084 tbl_rwc_test_param.single_insert =
1085 tbl_rwc_test_param.count_keys_ks / rwc_core_cnt[m];
1086 for (k = 0; k < 2; k++) {
1088 printf("\n** With bulk-lookup **\n");
1089 read_type |= BULK_LOOKUP;
1091 for (n = 0; n < NUM_TEST; n++) {
1092 unsigned int tot_lcore = rte_lcore_count();
1093 if (tot_lcore < (rwc_core_cnt[n] +
1094 rwc_core_cnt[m] + 1))
1097 printf("\nNumber of writers: %u",
1099 printf("\nNumber of readers: %u\n",
1102 rte_atomic64_clear(&greads);
1103 rte_atomic64_clear(&gread_cycles);
1105 rte_hash_reset(tbl_rwc_test_param.h);
1107 write_type = WRITE_NO_KEY_SHIFT;
1108 if (write_keys(write_type) < 0)
1111 /* Launch reader(s) */
1112 for (i = 1; i <= rwc_core_cnt[n]; i++)
1113 rte_eal_remote_launch(test_rwc_reader,
1114 (void *)(uintptr_t)read_type,
1115 enabled_core_ids[i]);
1116 write_type = WRITE_KEY_SHIFT;
1119 /* Launch writers */
1120 for (; i <= rwc_core_cnt[m]
1121 + rwc_core_cnt[n]; i++) {
1122 rte_eal_remote_launch
1123 (test_rwc_multi_writer,
1124 (void *)(uintptr_t)pos_core,
1125 enabled_core_ids[i]);
1129 /* Wait for writers to complete */
1130 for (i = rwc_core_cnt[n] + 1;
1131 i <= rwc_core_cnt[m] + rwc_core_cnt[n];
1133 rte_eal_wait_lcore(enabled_core_ids[i]);
1137 for (i = 1; i <= rwc_core_cnt[n]; i++)
1138 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
1141 unsigned long long cycles_per_lookup =
1142 rte_atomic64_read(&gread_cycles)
1143 / rte_atomic64_read(&greads);
1144 rwc_perf_results->multi_rw[m][k][n]
1145 = cycles_per_lookup;
1146 printf("Cycles per lookup: %llu\n",
1153 rte_hash_free(tbl_rwc_test_param.h);
1157 rte_eal_mp_wait_lcore();
1158 rte_hash_free(tbl_rwc_test_param.h);
1164 * Reader(s) lookup keys present in the extendable bkt.
1167 test_hash_add_ks_lookup_hit_extbkt(struct rwc_perf *rwc_perf_results,
1168 int rwc_lf, int htm, int ext_bkt)
1174 uint8_t read_type = READ_PASS_KEY_SHIFTS_EXTBKT;
1176 rte_atomic64_init(&greads);
1177 rte_atomic64_init(&gread_cycles);
1179 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
1181 printf("\nTest: Hash add - key-shifts, read - hit (ext_bkt)\n");
1182 for (m = 0; m < 2; m++) {
1184 printf("\n** With bulk-lookup **\n");
1185 read_type |= BULK_LOOKUP;
1187 for (n = 0; n < NUM_TEST; n++) {
1188 unsigned int tot_lcore = rte_lcore_count();
1189 if (tot_lcore < rwc_core_cnt[n] + 1)
1192 printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
1194 rte_atomic64_clear(&greads);
1195 rte_atomic64_clear(&gread_cycles);
1197 rte_hash_reset(tbl_rwc_test_param.h);
1198 write_type = WRITE_NO_KEY_SHIFT;
1199 if (write_keys(write_type) < 0)
1201 write_type = WRITE_KEY_SHIFT;
1202 if (write_keys(write_type) < 0)
1205 for (i = 1; i <= rwc_core_cnt[n]; i++)
1206 rte_eal_remote_launch(test_rwc_reader,
1207 (void *)(uintptr_t)read_type,
1208 enabled_core_ids[i]);
1209 for (i = 0; i < tbl_rwc_test_param.count_keys_ks_extbkt;
1211 if (rte_hash_del_key(tbl_rwc_test_param.h,
1212 tbl_rwc_test_param.keys_ks_extbkt + i)
1214 printf("Delete Failed: %u\n",
1215 tbl_rwc_test_param.keys_ks_extbkt[i]);
1221 for (i = 1; i <= rwc_core_cnt[n]; i++)
1222 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
1225 unsigned long long cycles_per_lookup =
1226 rte_atomic64_read(&gread_cycles) /
1227 rte_atomic64_read(&greads);
1228 rwc_perf_results->w_ks_r_hit_extbkt[m][n]
1229 = cycles_per_lookup;
1230 printf("Cycles per lookup: %llu\n", cycles_per_lookup);
1235 rte_hash_free(tbl_rwc_test_param.h);
1239 rte_eal_mp_wait_lcore();
1240 rte_hash_free(tbl_rwc_test_param.h);
1245 test_hash_readwrite_lf_main(void)
1248 * Variables used to choose different tests.
1249 * rwc_lf indicates if read-write concurrency lock-free support is
1251 * htm indicates if Hardware transactional memory support is enabled.
1258 if (rte_lcore_count() < 2) {
1259 printf("Not enough cores for hash_readwrite_lf_autotest, expecting at least 2\n");
1260 return TEST_SKIPPED;
1263 setlocale(LC_NUMERIC, "");
1265 if (rte_tm_supported())
1270 if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
1272 if (generate_keys() != 0)
1274 if (get_enabled_cores_list() != 0)
1277 if (RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
1280 printf("Test lookup with read-write concurrency lock free support"
1282 if (test_hash_add_no_ks_lookup_hit(&rwc_lf_results, rwc_lf,
1285 if (test_hash_add_no_ks_lookup_miss(&rwc_lf_results, rwc_lf,
1288 if (test_hash_add_ks_lookup_hit_non_sp(&rwc_lf_results, rwc_lf,
1291 if (test_hash_add_ks_lookup_hit_sp(&rwc_lf_results, rwc_lf,
1294 if (test_hash_add_ks_lookup_miss(&rwc_lf_results, rwc_lf, htm,
1297 if (test_hash_multi_add_lookup(&rwc_lf_results, rwc_lf, htm,
1300 if (test_hash_add_ks_lookup_hit_extbkt(&rwc_lf_results, rwc_lf,
1304 printf("\nTest lookup with read-write concurrency lock free support"
1308 printf("With HTM Disabled\n");
1309 if (!RUN_WITH_HTM_DISABLED) {
1310 printf("Enable RUN_WITH_HTM_DISABLED to test with"
1311 " lock-free disabled");
1315 printf("With HTM Enabled\n");
1316 if (test_hash_add_no_ks_lookup_hit(&rwc_non_lf_results, rwc_lf, htm,
1319 if (test_hash_add_no_ks_lookup_miss(&rwc_non_lf_results, rwc_lf, htm,
1322 if (test_hash_add_ks_lookup_hit_non_sp(&rwc_non_lf_results, rwc_lf,
1325 if (test_hash_add_ks_lookup_hit_sp(&rwc_non_lf_results, rwc_lf, htm,
1328 if (test_hash_add_ks_lookup_miss(&rwc_non_lf_results, rwc_lf, htm,
1331 if (test_hash_multi_add_lookup(&rwc_non_lf_results, rwc_lf, htm,
1334 if (test_hash_add_ks_lookup_hit_extbkt(&rwc_non_lf_results, rwc_lf,
1338 printf("\n\t\t\t\t\t\t********** Results summary **********\n\n");
1340 for (j = 0; j < 2; j++) {
1342 printf("\n\t\t\t\t\t#######********** Bulk Lookup "
1343 "**********#######\n\n");
1344 printf("_______\t\t_______\t\t_________\t___\t\t_________\t\t"
1345 "\t\t\t\t_________________\n");
1346 printf("Writers\t\tReaders\t\tLock-free\tHTM\t\tTest-case\t\t\t"
1347 "\t\t\tCycles per lookup\n");
1348 printf("_______\t\t_______\t\t_________\t___\t\t_________\t\t\t"
1349 "\t\t\t_________________\n");
1350 for (i = 0; i < NUM_TEST; i++) {
1351 printf("%u\t\t%u\t\t", 1, rwc_core_cnt[i]);
1352 printf("Enabled\t\t");
1354 printf("Hash add - no key-shifts, lookup - hit\t\t\t\t"
1355 "%u\n\t\t\t\t\t\t\t\t",
1356 rwc_lf_results.w_no_ks_r_hit[j][i]);
1357 printf("Hash add - no key-shifts, lookup - miss\t\t\t\t"
1358 "%u\n\t\t\t\t\t\t\t\t",
1359 rwc_lf_results.w_no_ks_r_miss[j][i]);
1360 printf("Hash add - key-shifts, lookup - hit"
1361 "(non-shift-path)\t\t%u\n\t\t\t\t\t\t\t\t",
1362 rwc_lf_results.w_ks_r_hit_nsp[j][i]);
1363 printf("Hash add - key-shifts, lookup - hit "
1364 "(shift-path)\t\t%u\n\t\t\t\t\t\t\t\t",
1365 rwc_lf_results.w_ks_r_hit_sp[j][i]);
1366 printf("Hash add - key-shifts, Hash lookup miss\t\t\t\t"
1367 "%u\n\t\t\t\t\t\t\t\t",
1368 rwc_lf_results.w_ks_r_miss[j][i]);
1369 printf("Hash add - key-shifts, Hash lookup hit (ext_bkt)\t\t"
1371 rwc_lf_results.w_ks_r_hit_extbkt[j][i]);
1373 printf("Disabled\t");
1375 printf("Enabled\t\t");
1377 printf("Disabled\t");
1378 printf("Hash add - no key-shifts, lookup - hit\t\t\t\t"
1379 "%u\n\t\t\t\t\t\t\t\t",
1380 rwc_non_lf_results.w_no_ks_r_hit[j][i]);
1381 printf("Hash add - no key-shifts, lookup - miss\t\t\t\t"
1382 "%u\n\t\t\t\t\t\t\t\t",
1383 rwc_non_lf_results.w_no_ks_r_miss[j][i]);
1384 printf("Hash add - key-shifts, lookup - hit "
1385 "(non-shift-path)\t\t%u\n\t\t\t\t\t\t\t\t",
1386 rwc_non_lf_results.w_ks_r_hit_nsp[j][i]);
1387 printf("Hash add - key-shifts, lookup - hit "
1388 "(shift-path)\t\t%u\n\t\t\t\t\t\t\t\t",
1389 rwc_non_lf_results.w_ks_r_hit_sp[j][i]);
1390 printf("Hash add - key-shifts, Hash lookup miss\t\t\t\t"
1391 "%u\n\t\t\t\t\t\t\t\t",
1392 rwc_non_lf_results.w_ks_r_miss[j][i]);
1393 printf("Hash add - key-shifts, Hash lookup hit (ext_bkt)\t\t"
1395 rwc_non_lf_results.w_ks_r_hit_extbkt[j][i]);
1397 printf("_______\t\t_______\t\t_________\t___\t\t"
1398 "_________\t\t\t\t\t\t_________________\n");
1401 for (i = 1; i < NUM_TEST; i++) {
1402 for (k = 0; k < NUM_TEST; k++) {
1403 printf("%u", rwc_core_cnt[i]);
1404 printf("\t\t%u\t\t", rwc_core_cnt[k]);
1405 printf("Enabled\t\t");
1407 printf("Multi-add-lookup\t\t\t\t\t\t%u\n\n\t\t"
1409 rwc_lf_results.multi_rw[i][j][k]);
1410 printf("Disabled\t");
1412 printf("Enabled\t\t");
1414 printf("Disabled\t");
1415 printf("Multi-add-lookup\t\t\t\t\t\t%u\n",
1416 rwc_non_lf_results.multi_rw[i][j][k]);
1418 printf("_______\t\t_______\t\t_________\t___"
1419 "\t\t_________\t\t\t\t\t\t"
1420 "_________________\n");
1424 rte_free(tbl_rwc_test_param.keys);
1425 rte_free(tbl_rwc_test_param.keys_no_ks);
1426 rte_free(tbl_rwc_test_param.keys_ks);
1427 rte_free(tbl_rwc_test_param.keys_absent);
1428 rte_free(tbl_rwc_test_param.keys_shift_path);
1429 rte_free(scanned_bkts);
1433 REGISTER_TEST_COMMAND(hash_readwrite_lf_autotest, test_hash_readwrite_lf_main);