X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_ring_perf.c;h=ac9bf5608daa45741ceb937dc81a14e65969bf3d;hb=a617494eeb01ff;hp=0e238c13e4d618cf61240a1db812211801d3691b;hpb=21a7f4e2646e1cb6b0dbd6643e5d64f72355af58;p=dpdk.git diff --git a/app/test/test_ring_perf.c b/app/test/test_ring_perf.c index 0e238c13e4..ac9bf5608d 100644 --- a/app/test/test_ring_perf.c +++ b/app/test/test_ring_perf.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2019 Arm Limited */ @@ -37,38 +9,62 @@ #include #include #include +#include +#include #include "test.h" +#include "test_ring.h" /* - * Ring - * ==== - * - * Measures performance of various operations using rdtsc - * * Empty ring dequeue - * * Enqueue/dequeue of bursts in 1 threads - * * Enqueue/dequeue of bursts in 2 threads + * Ring performance test cases, measures performance of various operations + * using rdtsc for legacy and 16B size ring elements. */ #define RING_NAME "RING_PERF" #define RING_SIZE 4096 #define MAX_BURST 32 -/* +/* * the sizes to enqueue and dequeue in testing * (marked volatile so they won't be seen as compile-time constants) */ static const volatile unsigned bulk_sizes[] = { 8, 32 }; -/* The ring structure used for tests */ -static struct rte_ring *r; - struct lcore_pair { unsigned c1, c2; }; static volatile unsigned lcore_count = 0; +static void +test_ring_print_test_string(unsigned int api_type, int esize, + unsigned int bsz, double value) +{ + if (esize == -1) + printf("legacy APIs"); + else + printf("elem APIs: element size %dB", esize); + + if (api_type == TEST_RING_IGNORE_API_TYPE) + return; + + if ((api_type & TEST_RING_THREAD_DEF) == TEST_RING_THREAD_DEF) + printf(": default enqueue/dequeue: "); + else if ((api_type & TEST_RING_THREAD_SPSC) == TEST_RING_THREAD_SPSC) + printf(": SP/SC: "); + else if ((api_type & TEST_RING_THREAD_MPMC) == TEST_RING_THREAD_MPMC) + printf(": MP/MC: "); + + if ((api_type & TEST_RING_ELEM_SINGLE) == TEST_RING_ELEM_SINGLE) + printf("single: "); + else if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK) + printf("bulk (size: %u): ", bsz); + else if ((api_type & TEST_RING_ELEM_BURST) == TEST_RING_ELEM_BURST) + printf("burst (size: %u): ", bsz); + + printf("%.2F\n", value); +} + /**** Functions to analyse our core mask to get cores for different tests ***/ static int @@ -83,10 +79,11 @@ get_two_hyperthreads(struct lcore_pair *lcp) RTE_LCORE_FOREACH(id2) { if (id1 == id2) continue; - c1 = lcore_config[id1].core_id; - c2 = lcore_config[id2].core_id; - s1 = lcore_config[id1].socket_id; - s2 = lcore_config[id2].socket_id; + + c1 = rte_lcore_to_cpu_id(id1); + c2 = rte_lcore_to_cpu_id(id2); + s1 = rte_lcore_to_socket_id(id1); + s2 = rte_lcore_to_socket_id(id2); if ((c1 == c2) && (s1 == s2)){ lcp->c1 = id1; lcp->c2 = id2; @@ -106,10 +103,11 @@ get_two_cores(struct lcore_pair *lcp) RTE_LCORE_FOREACH(id2) { if (id1 == id2) continue; - c1 = lcore_config[id1].core_id; - c2 = lcore_config[id2].core_id; - s1 = lcore_config[id1].socket_id; - s2 = lcore_config[id2].socket_id; + + c1 = rte_lcore_to_cpu_id(id1); + c2 = rte_lcore_to_cpu_id(id2); + s1 = rte_lcore_to_socket_id(id1); + s2 = rte_lcore_to_socket_id(id2); if ((c1 != c2) && (s1 == s2)){ lcp->c1 = id1; lcp->c2 = id2; @@ -129,8 +127,8 @@ get_two_sockets(struct lcore_pair *lcp) RTE_LCORE_FOREACH(id2) { if (id1 == id2) continue; - s1 = lcore_config[id1].socket_id; - s2 = lcore_config[id2].socket_id; + s1 = rte_lcore_to_socket_id(id1); + s2 = rte_lcore_to_socket_id(id2); if (s1 != s2){ lcp->c1 = id1; lcp->c2 = id2; @@ -143,121 +141,162 @@ get_two_sockets(struct lcore_pair *lcp) /* Get cycle counts for dequeuing from an empty ring. Should be 2 or 3 cycles */ static void -test_empty_dequeue(void) +test_empty_dequeue(struct rte_ring *r, const int esize, + const unsigned int api_type) { - const unsigned iter_shift = 26; - const unsigned iterations = 1< enqueue + * flag == 1 -> dequeue */ -static int -enqueue_bulk(void *p) +static __rte_always_inline int +enqueue_dequeue_bulk_helper(const unsigned int flag, const int esize, + struct thread_params *p) { - const unsigned iter_shift = 23; - const unsigned iterations = 1<size; - unsigned i; - void *burst[MAX_BURST] = {0}; + int ret; + const unsigned int iter_shift = 23; + const unsigned int iterations = 1 << iter_shift; + struct rte_ring *r = p->r; + unsigned int bsize = p->size; + unsigned int i; + void *burst = NULL; - if ( __sync_add_and_fetch(&lcore_count, 1) != 2 ) +#ifdef RTE_USE_C11_MEM_MODEL + if (__atomic_add_fetch(&lcore_count, 1, __ATOMIC_RELAXED) != 2) +#else + if (__sync_add_and_fetch(&lcore_count, 1) != 2) +#endif while(lcore_count != 2) rte_pause(); + burst = test_ring_calloc(MAX_BURST, esize); + if (burst == NULL) + return -1; + const uint64_t sp_start = rte_rdtsc(); for (i = 0; i < iterations; i++) - while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0) - rte_pause(); + do { + if (flag == 0) + ret = test_ring_enqueue(r, burst, esize, bsize, + TEST_RING_THREAD_SPSC | + TEST_RING_ELEM_BULK); + else if (flag == 1) + ret = test_ring_dequeue(r, burst, esize, bsize, + TEST_RING_THREAD_SPSC | + TEST_RING_ELEM_BULK); + if (ret == 0) + rte_pause(); + } while (!ret); const uint64_t sp_end = rte_rdtsc(); const uint64_t mp_start = rte_rdtsc(); for (i = 0; i < iterations; i++) - while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0) - rte_pause(); + do { + if (flag == 0) + ret = test_ring_enqueue(r, burst, esize, bsize, + TEST_RING_THREAD_MPMC | + TEST_RING_ELEM_BULK); + else if (flag == 1) + ret = test_ring_dequeue(r, burst, esize, bsize, + TEST_RING_THREAD_MPMC | + TEST_RING_ELEM_BULK); + if (ret == 0) + rte_pause(); + } while (!ret); const uint64_t mp_end = rte_rdtsc(); - params->spsc = ((double)(sp_end - sp_start))/(iterations*size); - params->mpmc = ((double)(mp_end - mp_start))/(iterations*size); + p->spsc = ((double)(sp_end - sp_start))/(iterations * bsize); + p->mpmc = ((double)(mp_end - mp_start))/(iterations * bsize); return 0; } -/* +/* + * Function that uses rdtsc to measure timing for ring enqueue. Needs pair + * thread running dequeue_bulk function + */ +static int +enqueue_bulk(void *p) +{ + struct thread_params *params = p; + + return enqueue_dequeue_bulk_helper(0, -1, params); +} + +static int +enqueue_bulk_16B(void *p) +{ + struct thread_params *params = p; + + return enqueue_dequeue_bulk_helper(0, 16, params); +} + +/* * Function that uses rdtsc to measure timing for ring dequeue. Needs pair - * thread running enqueue_bulk function + * thread running enqueue_bulk function */ static int dequeue_bulk(void *p) { - const unsigned iter_shift = 23; - const unsigned iterations = 1<size; - unsigned i; - void *burst[MAX_BURST] = {0}; - if ( __sync_add_and_fetch(&lcore_count, 1) != 2 ) - while(lcore_count != 2) - rte_pause(); - - const uint64_t sc_start = rte_rdtsc(); - for (i = 0; i < iterations; i++) - while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0) - rte_pause(); - const uint64_t sc_end = rte_rdtsc(); + return enqueue_dequeue_bulk_helper(1, -1, params); +} - const uint64_t mc_start = rte_rdtsc(); - for (i = 0; i < iterations; i++) - while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0) - rte_pause(); - const uint64_t mc_end = rte_rdtsc(); +static int +dequeue_bulk_16B(void *p) +{ + struct thread_params *params = p; - params->spsc = ((double)(sc_end - sc_start))/(iterations*size); - params->mpmc = ((double)(mc_end - mc_start))/(iterations*size); - return 0; + return enqueue_dequeue_bulk_helper(1, 16, params); } -/* +/* * Function that calls the enqueue and dequeue bulk functions on pairs of cores. * used to measure ring perf between hyperthreads, cores and sockets. */ -static void -run_on_core_pair(struct lcore_pair *cores, - lcore_function_t f1, lcore_function_t f2) +static int +run_on_core_pair(struct lcore_pair *cores, struct rte_ring *r, const int esize) { - struct thread_params param1 = {.size = 0}, param2 = {.size = 0}; + lcore_function_t *f1, *f2; + struct thread_params param1 = {0}, param2 = {0}; unsigned i; - for (i = 0; i < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); i++) { + + if (esize == -1) { + f1 = enqueue_bulk; + f2 = dequeue_bulk; + } else { + f1 = enqueue_bulk_16B; + f2 = dequeue_bulk_16B; + } + + for (i = 0; i < RTE_DIM(bulk_sizes); i++) { lcore_count = 0; param1.size = param2.size = bulk_sizes[i]; + param1.r = param2.r = r; if (cores->c1 == rte_get_master_lcore()) { rte_eal_remote_launch(f2, ¶m2, cores->c2); f1(¶m1); @@ -265,151 +304,280 @@ run_on_core_pair(struct lcore_pair *cores, } else { rte_eal_remote_launch(f1, ¶m1, cores->c1); rte_eal_remote_launch(f2, ¶m2, cores->c2); - rte_eal_wait_lcore(cores->c1); - rte_eal_wait_lcore(cores->c2); + if (rte_eal_wait_lcore(cores->c1) < 0) + return -1; + if (rte_eal_wait_lcore(cores->c2) < 0) + return -1; } - printf("SP/SC bulk enq/dequeue (size: %u): %.2F\n", bulk_sizes[i], - param1.spsc + param2.spsc); - printf("MP/MC bulk enq/dequeue (size: %u): %.2F\n", bulk_sizes[i], - param1.mpmc + param2.mpmc); + test_ring_print_test_string( + TEST_RING_THREAD_SPSC | TEST_RING_ELEM_BULK, + esize, bulk_sizes[i], param1.spsc + param2.spsc); + test_ring_print_test_string( + TEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK, + esize, bulk_sizes[i], param1.mpmc + param2.mpmc); } + + return 0; } -/* - * Test function that determines how long an enqueue + dequeue of a single item - * takes on a single lcore. Result is for comparison with the bulk enq+deq. - */ -static void -test_single_enqueue_dequeue(void) +static rte_atomic32_t synchro; +static uint64_t queue_count[RTE_MAX_LCORE]; + +#define TIME_MS 100 + +static int +load_loop_fn_helper(struct thread_params *p, const int esize) { - const unsigned iter_shift = 24; - const unsigned iterations = 1<r, burst, esize, params->size, + TEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK); + test_ring_dequeue(params->r, burst, esize, params->size, + TEST_RING_THREAD_MPMC | TEST_RING_ELEM_BULK); + lcount++; + time_diff = rte_get_timer_cycles() - begin; } - const uint64_t mc_end = rte_rdtsc(); + queue_count[lcore] = lcount; + + rte_free(burst); - printf("SP/SC single enq/dequeue: %"PRIu64"\n", - (sc_end-sc_start) >> iter_shift); - printf("MP/MC single enq/dequeue: %"PRIu64"\n", - (mc_end-mc_start) >> iter_shift); + return 0; } -/* - * Test that does both enqueue and dequeue on a core using the burst() API calls - * instead of the bulk() calls used in other tests. Results should be the same - * as for the bulk function called on a single lcore. - */ -static void -test_burst_enqueue_dequeue(void) +static int +load_loop_fn(void *p) { - const unsigned iter_shift = 23; - const unsigned iterations = 1<> iter_shift) / bulk_sizes[sz]; - uint64_t sc_avg = ((sc_end-sc_start) >> iter_shift) / bulk_sizes[sz]; +static int +run_on_all_cores(struct rte_ring *r, const int esize) +{ + uint64_t total; + struct thread_params param; + lcore_function_t *lcore_f; + unsigned int i, c; + + if (esize == -1) + lcore_f = load_loop_fn; + else + lcore_f = load_loop_fn_16B; + + memset(¶m, 0, sizeof(struct thread_params)); + for (i = 0; i < RTE_DIM(bulk_sizes); i++) { + total = 0; + printf("\nBulk enq/dequeue count on size %u\n", bulk_sizes[i]); + param.size = bulk_sizes[i]; + param.r = r; + + /* clear synchro and start slaves */ + rte_atomic32_set(&synchro, 0); + if (rte_eal_mp_remote_launch(lcore_f, ¶m, SKIP_MASTER) < 0) + return -1; + + /* start synchro and launch test on master */ + rte_atomic32_set(&synchro, 1); + lcore_f(¶m); + + rte_eal_mp_wait_lcore(); + + RTE_LCORE_FOREACH(c) { + printf("Core [%u] count = %"PRIu64"\n", + c, queue_count[c]); + total += queue_count[c]; + } - printf("SP/SC burst enq/dequeue (size: %u): %"PRIu64"\n", bulk_sizes[sz], - sc_avg); - printf("MP/MC burst enq/dequeue (size: %u): %"PRIu64"\n", bulk_sizes[sz], - mc_avg); + printf("Total count (size: %u): %"PRIu64"\n", + bulk_sizes[i], total); } + + return 0; } -/* Times enqueue and dequeue on a single lcore */ -static void -test_bulk_enqueue_dequeue(void) +/* + * Test function that determines how long an enqueue + dequeue of a single item + * takes on a single lcore. Result is for comparison with the bulk enq+deq. + */ +static int +test_single_enqueue_dequeue(struct rte_ring *r, const int esize, + const unsigned int api_type) { - const unsigned iter_shift = 23; - const unsigned iterations = 1<