-#define N 65536
-#define TIME_S 5
-
-static rte_atomic32_t synchro;
-
-static struct rte_ring *r;
-
-struct test_stats {
- unsigned enq_success ;
- unsigned enq_quota;
- unsigned enq_fail;
-
- unsigned deq_success;
- unsigned deq_fail;
-} __rte_cache_aligned;
-
-static struct test_stats test_stats[RTE_MAX_LCORE];
-
-static int
-ring_enqueue_test(int (que_func)(struct rte_ring*, void * const *, unsigned),
- void* arg, unsigned bulk_or_burst)
-{
- unsigned success = 0;
- unsigned quota = 0;
- unsigned fail = 0;
- unsigned i;
- unsigned long dummy_obj;
- void *obj_table[MAX_BULK];
- int ret;
- unsigned lcore_id = rte_lcore_id();
- unsigned count = *((unsigned*)arg);
- uint64_t start_cycles, end_cycles;
- uint64_t time_diff = 0, hz = rte_get_hpet_hz();
-
- /* init dummy object table */
- for (i = 0; i< MAX_BULK; i++) {
- dummy_obj = lcore_id + 0x1000 + i;
- obj_table[i] = (void *)dummy_obj;
- }
-
- /* wait synchro for slaves */
- if (lcore_id != rte_get_master_lcore())
- while (rte_atomic32_read(&synchro) == 0);
-
- start_cycles = rte_get_hpet_cycles();
-
- /* enqueue as many object as possible */
- while (time_diff/hz < TIME_S) {
- for (i = 0; likely(i < N); i++) {
- ret = que_func(r, obj_table, count);
- /*
- * bulk_or_burst
- * 1: for bulk operation
- * 0: for burst operation
- */
- if (bulk_or_burst) {
- /* The *count* objects enqueued, unless fail */
- if (ret == 0)
- success += count;
- else if (ret == -EDQUOT)
- quota += count;
- else
- fail++;
- } else {
- /* The actual objects enqueued */
- if (ret != 0)
- success += (ret & RTE_RING_SZ_MASK);
- else
- fail++;
- }
- }
- end_cycles = rte_get_hpet_cycles();
- time_diff = end_cycles - start_cycles;
- }
-
- /* write statistics in a shared structure */
- test_stats[lcore_id].enq_success = success;
- test_stats[lcore_id].enq_quota = quota;
- test_stats[lcore_id].enq_fail = fail;
-
- return 0;
-}
-
-static int
-ring_dequeue_test(int (que_func)(struct rte_ring*, void **, unsigned),
- void* arg, unsigned bulk_or_burst)
-{
- unsigned success = 0;
- unsigned fail = 0;
- unsigned i;
- void *obj_table[MAX_BULK];
- int ret;
- unsigned lcore_id = rte_lcore_id();
- unsigned count = *((unsigned*)arg);
- uint64_t start_cycles, end_cycles;
- uint64_t time_diff = 0, hz = rte_get_hpet_hz();
-
- /* wait synchro for slaves */
- if (lcore_id != rte_get_master_lcore())
- while (rte_atomic32_read(&synchro) == 0);
-
- start_cycles = rte_get_hpet_cycles();
-
- /* dequeue as many object as possible */
- while (time_diff/hz < TIME_S) {
- for (i = 0; likely(i < N); i++) {
- ret = que_func(r, obj_table, count);
- /*
- * bulk_or_burst
- * 1: for bulk operation
- * 0: for burst operation
- */
- if (bulk_or_burst) {
- if (ret == 0)
- success += count;
- else
- fail++;
- } else {
- if (ret != 0)
- success += ret;
- else
- fail++;
- }
- }
- end_cycles = rte_get_hpet_cycles();
- time_diff = end_cycles - start_cycles;
- }
-
- /* write statistics in a shared structure */
- test_stats[lcore_id].deq_success = success;
- test_stats[lcore_id].deq_fail = fail;
-
- return 0;
-}
-
-static int
-test_ring_per_core_sp_enqueue(void *arg)
-{
- return ring_enqueue_test(&rte_ring_sp_enqueue_bulk, arg, 1);
-}
-
-static int
-test_ring_per_core_mp_enqueue(void *arg)
-{
- return ring_enqueue_test(&rte_ring_mp_enqueue_bulk, arg, 1);
-}
-
-static int
-test_ring_per_core_mc_dequeue(void *arg)
-{
- return ring_dequeue_test(&rte_ring_mc_dequeue_bulk, arg, 1);
-}
-
-static int
-test_ring_per_core_sc_dequeue(void *arg)
-{
- return ring_dequeue_test(&rte_ring_sc_dequeue_bulk, arg, 1);
-}
-
-static int
-test_ring_per_core_sp_enqueue_burst(void *arg)
-{
- return ring_enqueue_test(&rte_ring_sp_enqueue_burst, arg, 0);
-}
-
-static int
-test_ring_per_core_mp_enqueue_burst(void *arg)
-{
- return ring_enqueue_test(&rte_ring_mp_enqueue_burst, arg, 0);
-}
-
-static int
-test_ring_per_core_mc_dequeue_burst(void *arg)
-{
- return ring_dequeue_test(&rte_ring_mc_dequeue_burst, arg, 0);
-}
-
-static int
-test_ring_per_core_sc_dequeue_burst(void *arg)
-{
- return ring_dequeue_test(&rte_ring_sc_dequeue_burst, arg, 0);
-}