#define WRITE_EXT_BKT 2
#define NUM_TEST 3
+
+#define QSBR_REPORTING_INTERVAL 1024
+
static unsigned int rwc_core_cnt[NUM_TEST] = {1, 2, 4};
struct rwc_perf {
uint32_t w_ks_r_miss[2][NUM_TEST];
uint32_t multi_rw[NUM_TEST - 1][2][NUM_TEST];
uint32_t w_ks_r_hit_extbkt[2][NUM_TEST];
+ uint32_t writer_add_del[NUM_TEST];
};
static struct rwc_perf rwc_lf_results, rwc_non_lf_results;
static uint64_t gread_cycles;
static uint64_t greads;
+static uint64_t gwrite_cycles;
+static uint64_t gwrites;
static volatile uint8_t writer_done;
return -1;
}
+static struct rte_rcu_qsbr *rv;
+
+/*
+ * Reader thread using rte_hash data structure with RCU
+ */
+static int
+test_hash_rcu_qsbr_reader(void *arg)
+{
+ unsigned int i, j;
+ uint32_t num_keys = tbl_rwc_test_param.count_keys_no_ks
+ - QSBR_REPORTING_INTERVAL;
+ uint32_t *keys = tbl_rwc_test_param.keys_no_ks;
+ uint32_t lcore_id = rte_lcore_id();
+ RTE_SET_USED(arg);
+
+ (void)rte_rcu_qsbr_thread_register(rv, lcore_id);
+ rte_rcu_qsbr_thread_online(rv, lcore_id);
+ do {
+ for (i = 0; i < num_keys; i += j) {
+ for (j = 0; j < QSBR_REPORTING_INTERVAL; j++)
+ rte_hash_lookup(tbl_rwc_test_param.h,
+ keys + i + j);
+ /* Update quiescent state counter */
+ rte_rcu_qsbr_quiescent(rv, lcore_id);
+ }
+ } while (!writer_done);
+ rte_rcu_qsbr_thread_offline(rv, lcore_id);
+ (void)rte_rcu_qsbr_thread_unregister(rv, lcore_id);
+
+ return 0;
+}
+
+/*
+ * Writer thread using rte_hash data structure with RCU
+ */
+static int
+test_hash_rcu_qsbr_writer(void *arg)
+{
+ uint32_t i, offset;
+ uint64_t begin, cycles;
+ uint8_t pos_core = (uint32_t)((uintptr_t)arg);
+ offset = pos_core * tbl_rwc_test_param.single_insert;
+
+ begin = rte_rdtsc_precise();
+ for (i = offset; i < offset + tbl_rwc_test_param.single_insert; i++) {
+ /* Delete element from the shared data structure */
+ rte_hash_del_key(tbl_rwc_test_param.h,
+ tbl_rwc_test_param.keys_no_ks + i);
+ rte_hash_add_key(tbl_rwc_test_param.h,
+ tbl_rwc_test_param.keys_no_ks + i);
+ }
+ cycles = rte_rdtsc_precise() - begin;
+ __atomic_fetch_add(&gwrite_cycles, cycles, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&gwrites, tbl_rwc_test_param.single_insert,
+ __ATOMIC_RELAXED);
+ return 0;
+}
+
+/*
+ * Writer perf test with RCU QSBR in DQ mode:
+ * Writer(s) delete and add keys in the table.
+ * Readers lookup keys in the hash table
+ */
+static int
+test_hash_rcu_qsbr_writer_perf(struct rwc_perf *rwc_perf_results, int rwc_lf,
+ int htm, int ext_bkt)
+{
+ unsigned int n;
+ uint64_t i;
+ uint8_t write_type;
+ int use_jhash = 0;
+ struct rte_hash_rcu_config rcu_config = {0};
+ uint32_t sz;
+ uint8_t pos_core;
+
+ printf("\nTest: Writer perf with integrated RCU\n");
+
+ if (init_params(rwc_lf, use_jhash, htm, ext_bkt) != 0)
+ goto err;
+
+ sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
+ rv = (struct rte_rcu_qsbr *)rte_zmalloc(NULL, sz, RTE_CACHE_LINE_SIZE);
+ rcu_config.v = rv;
+
+ if (rte_hash_rcu_qsbr_add(tbl_rwc_test_param.h, &rcu_config) < 0) {
+ printf("RCU init in hash failed\n");
+ goto err;
+ }
+
+ for (n = 0; n < NUM_TEST; n++) {
+ unsigned int tot_lcore = rte_lcore_count();
+ if (tot_lcore < rwc_core_cnt[n] + 3)
+ goto finish;
+
+ /* Calculate keys added by each writer */
+ tbl_rwc_test_param.single_insert =
+ tbl_rwc_test_param.count_keys_no_ks /
+ rwc_core_cnt[n];
+ printf("\nNumber of writers: %u\n", rwc_core_cnt[n]);
+
+ __atomic_store_n(&gwrites, 0, __ATOMIC_RELAXED);
+ __atomic_store_n(&gwrite_cycles, 0, __ATOMIC_RELAXED);
+
+ rte_hash_reset(tbl_rwc_test_param.h);
+ rte_rcu_qsbr_init(rv, RTE_MAX_LCORE);
+
+ write_type = WRITE_NO_KEY_SHIFT;
+ if (write_keys(write_type) < 0)
+ goto err;
+ write_type = WRITE_KEY_SHIFT;
+ if (write_keys(write_type) < 0)
+ goto err;
+
+ /* Launch 2 readers */
+ for (i = 1; i <= 2; i++)
+ rte_eal_remote_launch(test_hash_rcu_qsbr_reader, NULL,
+ enabled_core_ids[i]);
+ pos_core = 0;
+ /* Launch writer(s) */
+ for (; i <= rwc_core_cnt[n] + 2; i++) {
+ rte_eal_remote_launch(test_hash_rcu_qsbr_writer,
+ (void *)(uintptr_t)pos_core,
+ enabled_core_ids[i]);
+ pos_core++;
+ }
+
+ /* Wait for writers to complete */
+ for (i = 3; i <= rwc_core_cnt[n] + 2; i++)
+ rte_eal_wait_lcore(enabled_core_ids[i]);
+
+ writer_done = 1;
+
+ /* Wait for readers to complete */
+ rte_eal_mp_wait_lcore();
+
+ unsigned long long cycles_per_write_operation =
+ __atomic_load_n(&gwrite_cycles, __ATOMIC_RELAXED) /
+ __atomic_load_n(&gwrites, __ATOMIC_RELAXED);
+ rwc_perf_results->writer_add_del[n]
+ = cycles_per_write_operation;
+ printf("Cycles per write operation: %llu\n",
+ cycles_per_write_operation);
+ }
+
+finish:
+ rte_hash_free(tbl_rwc_test_param.h);
+ rte_free(rv);
+ return 0;
+
+err:
+ writer_done = 1;
+ rte_eal_mp_wait_lcore();
+ rte_hash_free(tbl_rwc_test_param.h);
+ rte_free(rv);
+ return -1;
+}
+
static int
test_hash_readwrite_lf_perf_main(void)
{
if (test_hash_add_ks_lookup_hit_extbkt(&rwc_lf_results, rwc_lf,
htm, ext_bkt) < 0)
return -1;
+ if (test_hash_rcu_qsbr_writer_perf(&rwc_lf_results, rwc_lf,
+ htm, ext_bkt) < 0)
+ return -1;
}
printf("\nTest lookup with read-write concurrency lock free support"
" disabled\n");