#define WRITE_EXT_BKT 2
#define NUM_TEST 3
+
+#define QSBR_REPORTING_INTERVAL 1024
+
static unsigned int rwc_core_cnt[NUM_TEST] = {1, 2, 4};
struct rwc_perf {
uint32_t w_ks_r_miss[2][NUM_TEST];
uint32_t multi_rw[NUM_TEST - 1][2][NUM_TEST];
uint32_t w_ks_r_hit_extbkt[2][NUM_TEST];
+ uint32_t writer_add_del[NUM_TEST];
};
static struct rwc_perf rwc_lf_results, rwc_non_lf_results;
struct rte_hash *h;
} tbl_rwc_test_param;
-static rte_atomic64_t gread_cycles;
-static rte_atomic64_t greads;
+static uint64_t gread_cycles;
+static uint64_t greads;
+static uint64_t gwrite_cycles;
+static uint64_t gwrites;
static volatile uint8_t writer_done;
}
static inline uint32_t
-get_prim_bucket_index(__attribute__((unused)) const struct rte_hash *h,
+get_prim_bucket_index(__rte_unused const struct rte_hash *h,
const hash_sig_t hash)
{
uint32_t num_buckets;
}
static inline uint32_t
-get_alt_bucket_index(__attribute__((unused)) const struct rte_hash *h,
+get_alt_bucket_index(__rte_unused const struct rte_hash *h,
uint32_t cur_bkt_idx, uint16_t sig)
{
uint32_t num_buckets;
}
static int
-test_rwc_reader(__attribute__((unused)) void *arg)
+test_rwc_reader(__rte_unused void *arg)
{
uint32_t i, j;
int ret;
} while (!writer_done);
cycles = rte_rdtsc_precise() - begin;
- rte_atomic64_add(&gread_cycles, cycles);
- rte_atomic64_add(&greads, read_cnt*loop_cnt);
+ __atomic_fetch_add(&gread_cycles, cycles, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&greads, read_cnt*loop_cnt, __ATOMIC_RELAXED);
return 0;
}
}
static int
-test_rwc_multi_writer(__attribute__((unused)) void *arg)
+test_rwc_multi_writer(__rte_unused void *arg)
{
uint32_t i, offset;
uint32_t pos_core = (uint32_t)((uintptr_t)arg);
uint8_t write_type = WRITE_NO_KEY_SHIFT;
uint8_t read_type = READ_PASS_NO_KEY_SHIFTS;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Hash add - no key-shifts, read - hit\n");
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles) /
- rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
+ / __atomic_load_n(&greads, __ATOMIC_RELAXED);
rwc_perf_results->w_no_ks_r_hit[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
uint8_t read_type = READ_FAIL;
int ret;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Hash add - no key-shifts, Hash lookup - miss\n");
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles) /
- rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
+ / __atomic_load_n(&greads, __ATOMIC_RELAXED);
rwc_perf_results->w_no_ks_r_miss[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
uint8_t write_type;
uint8_t read_type = READ_PASS_NON_SHIFT_PATH;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Hash add - key shift, Hash lookup - hit"
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles) /
- rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
+ / __atomic_load_n(&greads, __ATOMIC_RELAXED);
rwc_perf_results->w_ks_r_hit_nsp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
uint8_t write_type;
uint8_t read_type = READ_PASS_SHIFT_PATH;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Hash add - key shift, Hash lookup - hit (shift-path)"
goto finish;
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles) /
- rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
+ / __atomic_load_n(&greads, __ATOMIC_RELAXED);
rwc_perf_results->w_ks_r_hit_sp[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
uint8_t write_type;
uint8_t read_type = READ_FAIL;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Hash add - key shift, Hash lookup - miss\n");
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles) /
- rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
+ / __atomic_load_n(&greads, __ATOMIC_RELAXED);
rwc_perf_results->w_ks_r_miss[m][n] = cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
}
uint8_t write_type;
uint8_t read_type = READ_PASS_SHIFT_PATH;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Multi-add-lookup\n");
printf("\nNumber of readers: %u\n",
rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0,
+ __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
writer_done = 0;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles)
- / rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles,
+ __ATOMIC_RELAXED) /
+ __atomic_load_n(&greads,
+ __ATOMIC_RELAXED);
rwc_perf_results->multi_rw[m][k][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n",
uint8_t write_type;
uint8_t read_type = READ_PASS_KEY_SHIFTS_EXTBKT;
- rte_atomic64_init(&greads);
- rte_atomic64_init(&gread_cycles);
-
if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
goto err;
printf("\nTest: Hash add - key-shifts, read - hit (ext_bkt)\n");
printf("\nNumber of readers: %u\n", rwc_core_cnt[n]);
- rte_atomic64_clear(&greads);
- rte_atomic64_clear(&gread_cycles);
+ __atomic_store_n(&greads, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gread_cycles, 0, __ATOMIC_RELAXED);
rte_hash_reset(tbl_rwc_test_param.h);
write_type = WRITE_NO_KEY_SHIFT;
goto err;
unsigned long long cycles_per_lookup =
- rte_atomic64_read(&gread_cycles) /
- rte_atomic64_read(&greads);
+ __atomic_load_n(&gread_cycles, __ATOMIC_RELAXED)
+ / __atomic_load_n(&greads, __ATOMIC_RELAXED);
rwc_perf_results->w_ks_r_hit_extbkt[m][n]
= cycles_per_lookup;
printf("Cycles per lookup: %llu\n", cycles_per_lookup);
return -1;
}
+static struct rte_rcu_qsbr *rv;
+
+/*
+ * Reader thread using rte_hash data structure with RCU
+ */
+static int
+test_hash_rcu_qsbr_reader(void *arg)
+{
+ unsigned int i, j;
+ uint32_t num_keys = tbl_rwc_test_param.count_keys_no_ks
+ - QSBR_REPORTING_INTERVAL;
+ uint32_t *keys = tbl_rwc_test_param.keys_no_ks;
+ uint32_t lcore_id = rte_lcore_id();
+ RTE_SET_USED(arg);
+
+ (void)rte_rcu_qsbr_thread_register(rv, lcore_id);
+ rte_rcu_qsbr_thread_online(rv, lcore_id);
+ do {
+ for (i = 0; i < num_keys; i += j) {
+ for (j = 0; j < QSBR_REPORTING_INTERVAL; j++)
+ rte_hash_lookup(tbl_rwc_test_param.h,
+ keys + i + j);
+ /* Update quiescent state counter */
+ rte_rcu_qsbr_quiescent(rv, lcore_id);
+ }
+ } while (!writer_done);
+ rte_rcu_qsbr_thread_offline(rv, lcore_id);
+ (void)rte_rcu_qsbr_thread_unregister(rv, lcore_id);
+
+ return 0;
+}
+
+/*
+ * Writer thread using rte_hash data structure with RCU
+ */
+static int
+test_hash_rcu_qsbr_writer(void *arg)
+{
+ uint32_t i, offset;
+ uint64_t begin, cycles;
+ uint8_t pos_core = (uint32_t)((uintptr_t)arg);
+ offset = pos_core * tbl_rwc_test_param.single_insert;
+
+ begin = rte_rdtsc_precise();
+ for (i = offset; i < offset + tbl_rwc_test_param.single_insert; i++) {
+ /* Delete element from the shared data structure */
+ rte_hash_del_key(tbl_rwc_test_param.h,
+ tbl_rwc_test_param.keys_no_ks + i);
+ rte_hash_add_key(tbl_rwc_test_param.h,
+ tbl_rwc_test_param.keys_no_ks + i);
+ }
+ cycles = rte_rdtsc_precise() - begin;
+ __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
+ __ATOMIC_RELAXED);
+ return 0;
+}
+
+/*
+ * Writer perf test with RCU QSBR in DQ mode:
+ * Writer(s) delete and add keys in the table.
+ * Readers lookup keys in the hash table
+ */
+static int
+test_hash_rcu_qsbr_writer_perf(struct rwc_perf *rwc_perf_results, int rwc_lf,
+ int htm, int ext_bkt)
+{
+ unsigned int n;
+ uint64_t i;
+ uint8_t write_type;
+ int use_jhash = 0;
+ struct rte_hash_rcu_config rcu_config = {0};
+ uint32_t sz;
+ uint8_t pos_core;
+
+ printf("\nTest: Writer perf with integrated RCU\n");
+
+ if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
+ goto err;
+
+ sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
+ rv = (struct rte_rcu_qsbr *)rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
+ rcu_config.v = rv;
+
+ if (rte_hash_rcu_qsbr_add(tbl_rwc_test_param.h, &rcu_config) < 0) {
+ printf("RCU init in hash failed\n");
+ goto err;
+ }
+
+ for (n = 0; n < NUM_TEST; n++) {
+ unsigned int tot_lcore = rte_lcore_count();
+ if (tot_lcore < rwc_core_cnt[n] + 3)
+ goto finish;
+
+ /* Calculate keys added by each writer */
+ tbl_rwc_test_param.single_insert =
+ tbl_rwc_test_param.count_keys_no_ks /
+ rwc_core_cnt[n];
+ printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
+
+ __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+
+ rte_hash_reset(tbl_rwc_test_param.h);
+ rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
+
+ write_type = WRITE_NO_KEY_SHIFT;
+ if (write_keys(write_type) < 0)
+ goto err;
+ write_type = WRITE_KEY_SHIFT;
+ if (write_keys(write_type) < 0)
+ goto err;
+
+ /* Launch 2 readers */
+ for (i = 1; i <= 2; i++)
+ rte_eal_remote_launch(test_hash_rcu_qsbr_reader, NULL,
+ enabled_core_ids[i]);
+ pos_core = 0;
+ /* Launch writer(s) */
+ for (; i <= rwc_core_cnt[n] + 2; i++) {
+ rte_eal_remote_launch(test_hash_rcu_qsbr_writer,
+ (void *)(uintptr_t)pos_core,
+ enabled_core_ids[i]);
+ pos_core++;
+ }
+
+ /* Wait for writers to complete */
+ for (i = 3; i <= rwc_core_cnt[n] + 2; i++)
+ rte_eal_wait_lcore(enabled_core_ids[i]);
+
+ writer_done = 1;
+
+ /* Wait for readers to complete */
+ rte_eal_mp_wait_lcore();
+
+ unsigned long long cycles_per_write_operation =
+ __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
+ __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rwc_perf_results->writer_add_del[n]
+ = cycles_per_write_operation;
+ printf("Cycles per write operation: %llu\n",
+ cycles_per_write_operation);
+ }
+
+finish:
+ rte_hash_free(tbl_rwc_test_param.h);
+ rte_free(rv);
+ return 0;
+
+err:
+ writer_done = 1;
+ rte_eal_mp_wait_lcore();
+ rte_hash_free(tbl_rwc_test_param.h);
+ rte_free(rv);
+ return -1;
+}
+
static int
test_hash_readwrite_lf_perf_main(void)
{
if (test_hash_add_ks_lookup_hit_extbkt(&rwc_lf_results, rwc_lf,
htm, ext_bkt) < 0)
return -1;
+ if (test_hash_rcu_qsbr_writer_perf(&rwc_lf_results, rwc_lf,
+ htm, ext_bkt) < 0)
+ return -1;
}
printf("\nTest lookup with read-write concurrency lock free support"
" disabled\n");