X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest%2Ftest_ring.c;h=bde33ab4a1950d7e7ef48beddc5797cf6009968d;hb=b53d106d34b5c638f5a2cbdfee0da5bd42d4383f;hp=d6bb44b9f9fe28bb29ea90a88bd6cc019f3563b8;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/app/test/test_ring.c b/app/test/test_ring.c index d6bb44b9f9..bde33ab4a1 100644 --- a/app/test/test_ring.c +++ b/app/test/test_ring.c @@ -1,36 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2020 Arm Limited */ #include @@ -45,943 +15,1230 @@ #include #include #include -#include #include #include -#include #include #include #include -#include #include #include #include +#include #include -#include #include - -#include +#include #include "test.h" +#include "test_ring.h" /* * Ring * ==== * - * #. Basic tests: done on one core: - * - * - Using single producer/single consumer functions: - * - * - Enqueue one object, two objects, MAX_BULK objects - * - Dequeue one object, two objects, MAX_BULK objects - * - Check that dequeued pointers are correct - * - * - Using multi producers/multi consumers functions: - * - * - Enqueue one object, two objects, MAX_BULK objects - * - Dequeue one object, two objects, MAX_BULK objects - * - Check that dequeued pointers are correct - * - * - Test watermark and default bulk enqueue/dequeue: - * - * - Set watermark - * - Set default bulk value - * - Enqueue objects, check that -EDQUOT is returned when - * watermark is exceeded - * - Check that dequeued pointers are correct - * - * #. Check quota and watermark - * - * - Start a loop on another lcore that will enqueue and dequeue - * objects in a ring. It will monitor the value of quota (default - * bulk count) and watermark. - * - At the same time, change the quota and the watermark on the - * master lcore. - * - The slave lcore will check that bulk count changes from 4 to - * 8, and watermark changes from 16 to 32. - * - * #. Performance tests. - * - * This test is done on the following configurations: - * - * - One core enqueuing, one core dequeuing - * - One core enqueuing, other cores dequeuing - * - One core dequeuing, other cores enqueuing - * - Half of the cores enqueuing, the other half dequeuing - * - * When only one core enqueues/dequeues, the test is done with the - * SP/SC functions in addition to the MP/MC functions. - * - * The test is done with different bulk size. - * - * On each core, the test enqueues or dequeues objects during - * TIME_S seconds. The number of successes and failures are stored on - * each core, then summed and displayed. + * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC, + * legacy/custom element size (4B, 8B, 16B, 20B) APIs. + * Some tests incorporate unaligned addresses for objects. + * The enqueued/dequeued data is validated for correctness. * - * The test checks that the number of enqueues is equal to the - * number of dequeues. + * #. Performance tests are in test_ring_perf.c */ #define RING_SIZE 4096 #define MAX_BULK 32 -#define N 65536 -#define TIME_S 5 - -static rte_atomic32_t synchro; - -static unsigned bulk_enqueue; -static unsigned bulk_dequeue; -static struct rte_ring *r; - -struct test_stats { - unsigned enq_success ; - unsigned enq_quota; - unsigned enq_fail; - - unsigned deq_success; - unsigned deq_fail; -} __rte_cache_aligned; - -static struct test_stats test_stats[RTE_MAX_LCORE]; - -#define DEFINE_ENQUEUE_FUNCTION(name, enq_code) \ -static int \ -name(__attribute__((unused)) void *arg) \ -{ \ - unsigned success = 0; \ - unsigned quota = 0; \ - unsigned fail = 0; \ - unsigned i; \ - unsigned long dummy_obj; \ - void *obj_table[MAX_BULK]; \ - int ret; \ - unsigned lcore_id = rte_lcore_id(); \ - uint64_t start_cycles, end_cycles; \ - uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \ - \ - /* init dummy object table */ \ - for (i = 0; i< MAX_BULK; i++) { \ - dummy_obj = lcore_id + 0x1000 + i; \ - obj_table[i] = (void *)dummy_obj; \ - } \ - \ - /* wait synchro for slaves */ \ - if (lcore_id != rte_get_master_lcore()) \ - while (rte_atomic32_read(&synchro) == 0); \ - \ - start_cycles = rte_get_hpet_cycles(); \ - \ - /* enqueue as many object as possible */ \ - while (time_diff/hz < TIME_S) { \ - for (i = 0; likely(i < N); i++) { \ - ret = enq_code; \ - if (ret == 0) \ - success++; \ - else if (ret == -EDQUOT) \ - quota++; \ - else \ - fail++; \ - } \ - end_cycles = rte_get_hpet_cycles(); \ - time_diff = end_cycles - start_cycles; \ - } \ - \ - /* write statistics in a shared structure */ \ - test_stats[lcore_id].enq_success = success; \ - test_stats[lcore_id].enq_quota = quota; \ - test_stats[lcore_id].enq_fail = fail; \ - \ - return 0; \ -} - -#define DEFINE_DEQUEUE_FUNCTION(name, deq_code) \ -static int \ -name(__attribute__((unused)) void *arg) \ -{ \ - unsigned success = 0; \ - unsigned fail = 0; \ - unsigned i; \ - void *obj_table[MAX_BULK]; \ - int ret; \ - unsigned lcore_id = rte_lcore_id(); \ - uint64_t start_cycles, end_cycles; \ - uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \ - \ - /* wait synchro for slaves */ \ - if (lcore_id != rte_get_master_lcore()) \ - while (rte_atomic32_read(&synchro) == 0); \ - \ - start_cycles = rte_get_hpet_cycles(); \ - \ - /* dequeue as many object as possible */ \ - while (time_diff/hz < TIME_S) { \ - for (i = 0; likely(i < N); i++) { \ - ret = deq_code; \ - if (ret == 0) \ - success++; \ - else \ - fail++; \ - } \ - end_cycles = rte_get_hpet_cycles(); \ - time_diff = end_cycles - start_cycles; \ - } \ - \ - /* write statistics in a shared structure */ \ - test_stats[lcore_id].deq_success = success; \ - test_stats[lcore_id].deq_fail = fail; \ - \ - return 0; \ -} - -DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_sp_enqueue, - rte_ring_sp_enqueue_bulk(r, obj_table, bulk_enqueue)) -DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_sc_dequeue, - rte_ring_sc_dequeue_bulk(r, obj_table, bulk_dequeue)) - -DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_mp_enqueue, - rte_ring_mp_enqueue_bulk(r, obj_table, bulk_enqueue)) - -DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_mc_dequeue, - rte_ring_mc_dequeue_bulk(r, obj_table, bulk_dequeue)) - -#define TEST_RING_VERIFY(exp) \ +/* + * Validate the return value of test cases and print details of the + * ring if validation fails + * + * @param exp + * Expression to validate return value. + * @param r + * A pointer to the ring structure. + */ +#define TEST_RING_VERIFY(exp, r, errst) do { \ if (!(exp)) { \ printf("error at %s:%d\tcondition " #exp " failed\n", \ __func__, __LINE__); \ - rte_ring_dump(r); \ - return (-1); \ - } + rte_ring_dump(stdout, (r)); \ + errst; \ + } \ +} while (0) -#define TEST_RING_FULL_EMTPY_ITER 8 +#define TEST_RING_FULL_EMPTY_ITER 8 +static const int esize[] = {-1, 4, 8, 16, 20}; -static int -launch_cores(unsigned enq_core_count, unsigned deq_core_count, int sp, int sc) +/* Wrappers around the zero-copy APIs. The wrappers match + * the normal enqueue/dequeue API declarations. + */ +static unsigned int +test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table, + unsigned int n, unsigned int *free_space) { - void *obj; - unsigned lcore_id; - unsigned rate, deq_remain = 0; - unsigned enq_total, deq_total; - struct test_stats sum; - int (*enq_f)(void *); - int (*deq_f)(void *); - unsigned cores = enq_core_count + deq_core_count; - int ret; + uint32_t ret; + struct rte_ring_zc_data zcd; - rte_atomic32_set(&synchro, 0); - - printf("ring_autotest e/d_core=%u,%u e/d_bulk=%u,%u ", - enq_core_count, deq_core_count, bulk_enqueue, bulk_dequeue); - printf("sp=%d sc=%d ", sp, sc); + ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space); + if (ret != 0) { + /* Copy the data to the ring */ + test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret); + rte_ring_enqueue_zc_finish(r, ret); + } - /* set enqueue function to be used */ - if (sp) - enq_f = test_ring_per_core_sp_enqueue; - else - enq_f = test_ring_per_core_mp_enqueue; + return ret; +} - /* set dequeue function to be used */ - if (sc) - deq_f = test_ring_per_core_sc_dequeue; - else - deq_f = test_ring_per_core_mc_dequeue; +static unsigned int +test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n, unsigned int *free_space) +{ + unsigned int ret; + struct rte_ring_zc_data zcd; - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (enq_core_count != 0) { - enq_core_count--; - rte_eal_remote_launch(enq_f, NULL, lcore_id); - } - if (deq_core_count != 1) { - deq_core_count--; - rte_eal_remote_launch(deq_f, NULL, lcore_id); - } + ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n, + &zcd, free_space); + if (ret != 0) { + /* Copy the data to the ring */ + test_ring_copy_to(&zcd, obj_table, esize, ret); + rte_ring_enqueue_zc_finish(r, ret); } - memset(test_stats, 0, sizeof(test_stats)); + return ret; +} - /* start synchro and launch test on master */ - rte_atomic32_set(&synchro, 1); - ret = deq_f(NULL); +static unsigned int +test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table, + unsigned int n, unsigned int *free_space) +{ + unsigned int ret; + struct rte_ring_zc_data zcd; - /* wait all cores */ - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (cores == 1) - break; - cores--; - if (rte_eal_wait_lcore(lcore_id) < 0) - ret = -1; + ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space); + if (ret != 0) { + /* Copy the data to the ring */ + test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret); + rte_ring_enqueue_zc_finish(r, ret); } - memset(&sum, 0, sizeof(sum)); - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { - sum.enq_success += test_stats[lcore_id].enq_success; - sum.enq_quota += test_stats[lcore_id].enq_quota; - sum.enq_fail += test_stats[lcore_id].enq_fail; - sum.deq_success += test_stats[lcore_id].deq_success; - sum.deq_fail += test_stats[lcore_id].deq_fail; - } + return ret; +} - /* empty the ring */ - while (rte_ring_sc_dequeue(r, &obj) == 0) - deq_remain += 1; +static unsigned int +test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n, unsigned int *free_space) +{ + unsigned int ret; + struct rte_ring_zc_data zcd; - if (ret < 0) { - printf("per-lcore test returned -1\n"); - return -1; + ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n, + &zcd, free_space); + if (ret != 0) { + /* Copy the data to the ring */ + test_ring_copy_to(&zcd, obj_table, esize, ret); + rte_ring_enqueue_zc_finish(r, ret); } - enq_total = (sum.enq_success * bulk_enqueue) + - (sum.enq_quota * bulk_enqueue); - deq_total = (sum.deq_success * bulk_dequeue) + deq_remain; - - rate = deq_total/TIME_S; + return ret; +} - printf("rate_persec=%u\n", rate); +static unsigned int +test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) +{ + unsigned int ret; + struct rte_ring_zc_data zcd; - if (enq_total != deq_total) { - printf("invalid enq/deq_success counter: %u %u\n", - enq_total, deq_total); - return -1; + ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available); + if (ret != 0) { + /* Copy the data from the ring */ + test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret); + rte_ring_dequeue_zc_finish(r, ret); } - return 0; + return ret; } -static int -do_one_ring_test2(unsigned enq_core_count, unsigned deq_core_count, - unsigned n_enq_bulk, unsigned n_deq_bulk) +static unsigned int +test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, unsigned int *available) { - int sp, sc; - int do_sp, do_sc; - int ret; - - bulk_enqueue = n_enq_bulk; - bulk_dequeue = n_deq_bulk; + unsigned int ret; + struct rte_ring_zc_data zcd; - do_sp = (enq_core_count == 1) ? 1 : 0; - do_sc = (deq_core_count == 1) ? 1 : 0; - - for (sp = 0; sp <= do_sp; sp ++) { - for (sc = 0; sc <= do_sc; sc ++) { - ret = launch_cores(enq_core_count, - deq_core_count, - sp, sc); - if (ret < 0) - return -1; - } + ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n, + &zcd, available); + if (ret != 0) { + /* Copy the data from the ring */ + test_ring_copy_from(&zcd, obj_table, esize, ret); + rte_ring_dequeue_zc_finish(r, ret); } - return 0; + + return ret; } -static int -do_one_ring_test(unsigned enq_core_count, unsigned deq_core_count) +static unsigned int +test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table, + unsigned int n, unsigned int *available) { - unsigned bulk_enqueue_tab[] = { 1, 2, 4, 32, 0 }; - unsigned bulk_dequeue_tab[] = { 1, 2, 4, 32, 0 }; - unsigned *bulk_enqueue_ptr; - unsigned *bulk_dequeue_ptr; - int ret; - - for (bulk_enqueue_ptr = bulk_enqueue_tab; - *bulk_enqueue_ptr; - bulk_enqueue_ptr++) { + unsigned int ret; + struct rte_ring_zc_data zcd; - for (bulk_dequeue_ptr = bulk_dequeue_tab; - *bulk_dequeue_ptr; - bulk_dequeue_ptr++) { - - ret = do_one_ring_test2(enq_core_count, deq_core_count, - *bulk_enqueue_ptr, - *bulk_dequeue_ptr); - if (ret < 0) - return -1; - } + ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available); + if (ret != 0) { + /* Copy the data from the ring */ + test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret); + rte_ring_dequeue_zc_finish(r, ret); } - return 0; + + return ret; } -static int -check_quota_and_watermark(__attribute__((unused)) void *dummy) +static unsigned int +test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, unsigned int *available) { - uint64_t hz = rte_get_hpet_hz(); - void *obj_table[MAX_BULK]; - unsigned watermark, watermark_old = 16; - uint64_t cur_time, end_time; - int64_t diff = 0; - int i, ret; - unsigned quota, quota_old = 4; - - /* init the object table */ - memset(obj_table, 0, sizeof(obj_table)); - end_time = rte_get_hpet_cycles() + (hz * 2); - - /* check that bulk and watermark are 4 and 32 (respectively) */ - while (diff >= 0) { - - /* read quota, the only change allowed is from 4 to 8 */ - quota = rte_ring_get_bulk_count(r); - if (quota != quota_old && (quota_old != 4 || quota != 8)) { - printf("Bad quota change %u -> %u\n", quota_old, - quota); - return -1; - } - quota_old = quota; - - /* add in ring until we reach watermark */ - ret = 0; - for (i = 0; i < 16; i ++) { - if (ret != 0) - break; - ret = rte_ring_enqueue_bulk(r, obj_table, quota); - } - - if (ret != -EDQUOT) { - printf("Cannot enqueue objects, or watermark not " - "reached (ret=%d)\n", ret); - return -1; - } - - /* read watermark, the only change allowed is from 16 to 32 */ - watermark = i * quota; - if (watermark != watermark_old && - (watermark_old != 16 || watermark != 32)) { - printf("Bad watermark change %u -> %u\n", watermark_old, - watermark); - return -1; - } - watermark_old = watermark; - - /* dequeue objects from ring */ - while (i--) { - ret = rte_ring_dequeue_bulk(r, obj_table, quota); - if (ret != 0) { - printf("Cannot dequeue (ret=%d)\n", ret); - return -1; - } - } + unsigned int ret; + struct rte_ring_zc_data zcd; - cur_time = rte_get_hpet_cycles(); - diff = end_time - cur_time; + ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n, + &zcd, available); + if (ret != 0) { + /* Copy the data from the ring */ + test_ring_copy_from(&zcd, obj_table, esize, ret); + rte_ring_dequeue_zc_finish(r, ret); } - if (watermark_old != 32 || quota_old != 8) { - printf("quota or watermark was not updated (q=%u wm=%u)\n", - quota_old, watermark_old); - return -1; + return ret; +} + +static const struct { + const char *desc; + uint32_t api_type; + uint32_t create_flags; + struct { + unsigned int (*flegacy)(struct rte_ring *r, + void * const *obj_table, unsigned int n, + unsigned int *free_space); + unsigned int (*felem)(struct rte_ring *r, const void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *free_space); + } enq; + struct { + unsigned int (*flegacy)(struct rte_ring *r, + void **obj_table, unsigned int n, + unsigned int *available); + unsigned int (*felem)(struct rte_ring *r, void *obj_table, + unsigned int esize, unsigned int n, + unsigned int *available); + } deq; +} test_enqdeq_impl[] = { + { + .desc = "MP/MC sync mode", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, + .create_flags = 0, + .enq = { + .flegacy = rte_ring_enqueue_bulk, + .felem = rte_ring_enqueue_bulk_elem, + }, + .deq = { + .flegacy = rte_ring_dequeue_bulk, + .felem = rte_ring_dequeue_bulk_elem, + }, + }, + { + .desc = "SP/SC sync mode", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC, + .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, + .enq = { + .flegacy = rte_ring_sp_enqueue_bulk, + .felem = rte_ring_sp_enqueue_bulk_elem, + }, + .deq = { + .flegacy = rte_ring_sc_dequeue_bulk, + .felem = rte_ring_sc_dequeue_bulk_elem, + }, + }, + { + .desc = "MP/MC sync mode", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC, + .create_flags = 0, + .enq = { + .flegacy = rte_ring_mp_enqueue_bulk, + .felem = rte_ring_mp_enqueue_bulk_elem, + }, + .deq = { + .flegacy = rte_ring_mc_dequeue_bulk, + .felem = rte_ring_mc_dequeue_bulk_elem, + }, + }, + { + .desc = "MP_RTS/MC_RTS sync mode", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, + .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ, + .enq = { + .flegacy = rte_ring_enqueue_bulk, + .felem = rte_ring_enqueue_bulk_elem, + }, + .deq = { + .flegacy = rte_ring_dequeue_bulk, + .felem = rte_ring_dequeue_bulk_elem, + }, + }, + { + .desc = "MP_HTS/MC_HTS sync mode", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, + .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, + .enq = { + .flegacy = rte_ring_enqueue_bulk, + .felem = rte_ring_enqueue_bulk_elem, + }, + .deq = { + .flegacy = rte_ring_dequeue_bulk, + .felem = rte_ring_dequeue_bulk_elem, + }, + }, + { + .desc = "MP/MC sync mode", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, + .create_flags = 0, + .enq = { + .flegacy = rte_ring_enqueue_burst, + .felem = rte_ring_enqueue_burst_elem, + }, + .deq = { + .flegacy = rte_ring_dequeue_burst, + .felem = rte_ring_dequeue_burst_elem, + }, + }, + { + .desc = "SP/SC sync mode", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC, + .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, + .enq = { + .flegacy = rte_ring_sp_enqueue_burst, + .felem = rte_ring_sp_enqueue_burst_elem, + }, + .deq = { + .flegacy = rte_ring_sc_dequeue_burst, + .felem = rte_ring_sc_dequeue_burst_elem, + }, + }, + { + .desc = "MP/MC sync mode", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC, + .create_flags = 0, + .enq = { + .flegacy = rte_ring_mp_enqueue_burst, + .felem = rte_ring_mp_enqueue_burst_elem, + }, + .deq = { + .flegacy = rte_ring_mc_dequeue_burst, + .felem = rte_ring_mc_dequeue_burst_elem, + }, + }, + { + .desc = "MP_RTS/MC_RTS sync mode", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, + .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ, + .enq = { + .flegacy = rte_ring_enqueue_burst, + .felem = rte_ring_enqueue_burst_elem, + }, + .deq = { + .flegacy = rte_ring_dequeue_burst, + .felem = rte_ring_dequeue_burst_elem, + }, + }, + { + .desc = "MP_HTS/MC_HTS sync mode", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, + .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, + .enq = { + .flegacy = rte_ring_enqueue_burst, + .felem = rte_ring_enqueue_burst_elem, + }, + .deq = { + .flegacy = rte_ring_dequeue_burst, + .felem = rte_ring_dequeue_burst_elem, + }, + }, + { + .desc = "SP/SC sync mode (ZC)", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC, + .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, + .enq = { + .flegacy = test_ring_enqueue_zc_bulk, + .felem = test_ring_enqueue_zc_bulk_elem, + }, + .deq = { + .flegacy = test_ring_dequeue_zc_bulk, + .felem = test_ring_dequeue_zc_bulk_elem, + }, + }, + { + .desc = "MP_HTS/MC_HTS sync mode (ZC)", + .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF, + .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, + .enq = { + .flegacy = test_ring_enqueue_zc_bulk, + .felem = test_ring_enqueue_zc_bulk_elem, + }, + .deq = { + .flegacy = test_ring_dequeue_zc_bulk, + .felem = test_ring_dequeue_zc_bulk_elem, + }, + }, + { + .desc = "SP/SC sync mode (ZC)", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC, + .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ, + .enq = { + .flegacy = test_ring_enqueue_zc_burst, + .felem = test_ring_enqueue_zc_burst_elem, + }, + .deq = { + .flegacy = test_ring_dequeue_zc_burst, + .felem = test_ring_dequeue_zc_burst_elem, + }, + }, + { + .desc = "MP_HTS/MC_HTS sync mode (ZC)", + .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF, + .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ, + .enq = { + .flegacy = test_ring_enqueue_zc_burst, + .felem = test_ring_enqueue_zc_burst_elem, + }, + .deq = { + .flegacy = test_ring_dequeue_zc_burst, + .felem = test_ring_dequeue_zc_burst_elem, + }, } +}; - return 0; +static unsigned int +test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n, + unsigned int test_idx) +{ + if (esize == -1) + return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL); + else + return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n, + NULL); } -static int -test_quota_and_watermark(void) +static unsigned int +test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n, + unsigned int test_idx) { - unsigned lcore_id = rte_lcore_id(); - unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1); - - printf("Test quota and watermark live modification\n"); + if (esize == -1) + return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL); + else + return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n, + NULL); +} - rte_ring_set_bulk_count(r, 4); - rte_ring_set_water_mark(r, 16); +static void +test_ring_mem_init(void *obj, unsigned int count, int esize) +{ + unsigned int i; - /* launch a thread that will enqueue and dequeue, checking - * watermark and quota */ - rte_eal_remote_launch(check_quota_and_watermark, NULL, lcore_id2); + /* Legacy queue APIs? */ + if (esize == -1) + for (i = 0; i < count; i++) + ((void **)obj)[i] = (void *)(uintptr_t)i; + else + for (i = 0; i < (count * esize / sizeof(uint32_t)); i++) + ((uint32_t *)obj)[i] = i; +} - rte_delay_ms(1000); - rte_ring_set_bulk_count(r, 8); - rte_ring_set_water_mark(r, 32); - rte_delay_ms(1000); +static int +test_ring_mem_cmp(void *src, void *dst, unsigned int size) +{ + int ret; - if (rte_eal_wait_lcore(lcore_id2) < 0) - return -1; + ret = memcmp(src, dst, size); + if (ret) { + rte_hexdump(stdout, "src", src, size); + rte_hexdump(stdout, "dst", dst, size); + printf("data after dequeue is not the same\n"); + } - return 0; + return ret; } -/* Test for catch on invalid watermark values */ -static int -test_set_watermark( void ){ - unsigned count; - int setwm; - - struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex"); - if(r == NULL){ - printf( " ring lookup failed\n" ); - goto error; - } - count = r->prod.size*2; - setwm = rte_ring_set_water_mark(r, count); - if (setwm != -EINVAL){ - printf("Test failed to detect invalid watermark count value\n"); - goto error; - } - count = 0; - setwm = rte_ring_set_water_mark(r, count); - if (r->prod.watermark != r->prod.size) { - printf("Test failed to detect invalid watermark count value\n"); - goto error; - } - return 0; +static void +test_ring_print_test_string(const char *istr, unsigned int api_type, int esize) +{ + printf("\n%s: ", istr); -error: - return -1; + if (esize == -1) + printf("legacy APIs: "); + else + printf("elem APIs: element size %dB ", esize); + + if (api_type == TEST_RING_IGNORE_API_TYPE) + return; + + if (api_type & TEST_RING_THREAD_DEF) + printf(": default enqueue/dequeue: "); + else if (api_type & TEST_RING_THREAD_SPSC) + printf(": SP/SC: "); + else if (api_type & TEST_RING_THREAD_MPMC) + printf(": MP/MC: "); + + if (api_type & TEST_RING_ELEM_SINGLE) + printf("single\n"); + else if (api_type & TEST_RING_ELEM_BULK) + printf("bulk\n"); + else if (api_type & TEST_RING_ELEM_BURST) + printf("burst\n"); } /* - * helper routine for test_ring_basic + * Various negative test cases. */ static int -test_ring_basic_full_empty(void * const src[], void *dst[]) +test_ring_negative_tests(void) { - unsigned i, rand; - const unsigned rsz = RING_SIZE - 1; - - printf("Basic full/empty test\n"); - - for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) { - - /* random shift in the ring */ - rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL); - printf("%s: iteration %u, random shift: %u;\n", - __func__, i, rand); - TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src, - rand)); - TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand)); - - /* fill the ring */ - TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src, - rsz)); - TEST_RING_VERIFY(0 == rte_ring_free_count(r)); - TEST_RING_VERIFY(rsz == rte_ring_count(r)); - TEST_RING_VERIFY(rte_ring_full(r)); - TEST_RING_VERIFY(0 == rte_ring_empty(r)); - - /* empty the ring */ - TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz)); - TEST_RING_VERIFY(rsz == rte_ring_free_count(r)); - TEST_RING_VERIFY(0 == rte_ring_count(r)); - TEST_RING_VERIFY(0 == rte_ring_full(r)); - TEST_RING_VERIFY(rte_ring_empty(r)); - - /* check data */ - TEST_RING_VERIFY(0 == memcmp(src, dst, rsz)); - rte_ring_dump(r); + struct rte_ring *rp = NULL; + struct rte_ring *rt = NULL; + unsigned int i; + + /* Test with esize not a multiple of 4 */ + rp = test_ring_create("test_bad_element_size", 23, + RING_SIZE + 1, SOCKET_ID_ANY, 0); + if (rp != NULL) { + printf("Test failed to detect invalid element size\n"); + goto test_fail; } - return (0); -} -static int -test_ring_basic(void) -{ - void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; - int ret; - unsigned i, n; - /* alloc dummy object pointers */ - src = malloc(RING_SIZE*2*sizeof(void *)); - if (src == NULL) - goto fail; + for (i = 0; i < RTE_DIM(esize); i++) { + /* Test if ring size is not power of 2 */ + rp = test_ring_create("test_bad_ring_size", esize[i], + RING_SIZE + 1, SOCKET_ID_ANY, 0); + if (rp != NULL) { + printf("Test failed to detect odd count\n"); + goto test_fail; + } - for (i = 0; i < RING_SIZE*2 ; i++) { - src[i] = (void *)(unsigned long)i; - } - cur_src = src; - - /* alloc some room for copied objects */ - dst = malloc(RING_SIZE*2*sizeof(void *)); - if (dst == NULL) - goto fail; - - memset(dst, 0, RING_SIZE*2*sizeof(void *)); - cur_dst = dst; - - printf("enqueue 1 obj\n"); - ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1); - cur_src += 1; - if (ret != 0) - goto fail; - - printf("enqueue 2 objs\n"); - ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2); - cur_src += 2; - if (ret != 0) - goto fail; - - printf("enqueue MAX_BULK objs\n"); - ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK); - cur_src += MAX_BULK; - if (ret != 0) - goto fail; - - printf("dequeue 1 obj\n"); - ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1); - cur_dst += 1; - if (ret != 0) - goto fail; - - printf("dequeue 2 objs\n"); - ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2); - cur_dst += 2; - if (ret != 0) - goto fail; - - printf("dequeue MAX_BULK objs\n"); - ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK); - cur_dst += MAX_BULK; - if (ret != 0) - goto fail; - - /* check data */ - if (memcmp(src, dst, cur_dst - dst)) { - test_hexdump("src", src, cur_src - src); - test_hexdump("dst", dst, cur_dst - dst); - printf("data after dequeue is not the same\n"); - goto fail; - } - cur_src = src; - cur_dst = dst; - - printf("enqueue 1 obj\n"); - ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1); - cur_src += 1; - if (ret != 0) - goto fail; - - printf("enqueue 2 objs\n"); - ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2); - cur_src += 2; - if (ret != 0) - goto fail; - - printf("enqueue MAX_BULK objs\n"); - ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK); - cur_src += MAX_BULK; - if (ret != 0) - goto fail; - - printf("dequeue 1 obj\n"); - ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1); - cur_dst += 1; - if (ret != 0) - goto fail; - - printf("dequeue 2 objs\n"); - ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2); - cur_dst += 2; - if (ret != 0) - goto fail; - - printf("dequeue MAX_BULK objs\n"); - ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK); - cur_dst += MAX_BULK; - if (ret != 0) - goto fail; - - /* check data */ - if (memcmp(src, dst, cur_dst - dst)) { - test_hexdump("src", src, cur_src - src); - test_hexdump("dst", dst, cur_dst - dst); - printf("data after dequeue is not the same\n"); - goto fail; - } - cur_src = src; - cur_dst = dst; - - printf("fill and empty the ring\n"); - for (i = 0; i