1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
13 #include <sys/queue.h>
15 #include <rte_common.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_malloc.h>
26 #include <rte_ring_elem.h>
27 #include <rte_random.h>
28 #include <rte_errno.h>
29 #include <rte_hexdump.h>
32 #include "test_ring.h"
38 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
39 * legacy/custom element size (4B, 8B, 16B, 20B) APIs.
40 * Some tests incorporate unaligned addresses for objects.
41 * The enqueued/dequeued data is validated for correctness.
43 * #. Performance tests are in test_ring_perf.c
46 #define RING_SIZE 4096
50 * Validate the return value of test cases and print details of the
51 * ring if validation fails
54 * Expression to validate return value.
56 * A pointer to the ring structure.
58 #define TEST_RING_VERIFY(exp, r, errst) do { \
60 printf("error at %s:%d\tcondition " #exp " failed\n", \
61 __func__, __LINE__); \
62 rte_ring_dump(stdout, (r)); \
67 #define TEST_RING_FULL_EMPTY_ITER 8
69 static const int esize[] = {-1, 4, 8, 16, 20};
71 /* Wrappers around the zero-copy APIs. The wrappers match
72 * the normal enqueue/dequeue API declarations.
75 test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table,
76 unsigned int n, unsigned int *free_space)
79 struct rte_ring_zc_data zcd;
81 ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space);
83 /* Copy the data to the ring */
84 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
85 rte_ring_enqueue_zc_finish(r, ret);
92 test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table,
93 unsigned int esize, unsigned int n, unsigned int *free_space)
96 struct rte_ring_zc_data zcd;
98 ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n,
101 /* Copy the data to the ring */
102 test_ring_copy_to(&zcd, obj_table, esize, ret);
103 rte_ring_enqueue_zc_finish(r, ret);
110 test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table,
111 unsigned int n, unsigned int *free_space)
114 struct rte_ring_zc_data zcd;
116 ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space);
118 /* Copy the data to the ring */
119 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
120 rte_ring_enqueue_zc_finish(r, ret);
127 test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table,
128 unsigned int esize, unsigned int n, unsigned int *free_space)
131 struct rte_ring_zc_data zcd;
133 ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n,
136 /* Copy the data to the ring */
137 test_ring_copy_to(&zcd, obj_table, esize, ret);
138 rte_ring_enqueue_zc_finish(r, ret);
145 test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table,
146 unsigned int n, unsigned int *available)
149 struct rte_ring_zc_data zcd;
151 ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available);
153 /* Copy the data from the ring */
154 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
155 rte_ring_dequeue_zc_finish(r, ret);
162 test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table,
163 unsigned int esize, unsigned int n, unsigned int *available)
166 struct rte_ring_zc_data zcd;
168 ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n,
171 /* Copy the data from the ring */
172 test_ring_copy_from(&zcd, obj_table, esize, ret);
173 rte_ring_dequeue_zc_finish(r, ret);
180 test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table,
181 unsigned int n, unsigned int *available)
184 struct rte_ring_zc_data zcd;
186 ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available);
188 /* Copy the data from the ring */
189 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
190 rte_ring_dequeue_zc_finish(r, ret);
197 test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table,
198 unsigned int esize, unsigned int n, unsigned int *available)
201 struct rte_ring_zc_data zcd;
203 ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n,
206 /* Copy the data from the ring */
207 test_ring_copy_from(&zcd, obj_table, esize, ret);
208 rte_ring_dequeue_zc_finish(r, ret);
214 static const struct {
217 uint32_t create_flags;
219 unsigned int (*flegacy)(struct rte_ring *r,
220 void * const *obj_table, unsigned int n,
221 unsigned int *free_space);
222 unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
223 unsigned int esize, unsigned int n,
224 unsigned int *free_space);
227 unsigned int (*flegacy)(struct rte_ring *r,
228 void **obj_table, unsigned int n,
229 unsigned int *available);
230 unsigned int (*felem)(struct rte_ring *r, void *obj_table,
231 unsigned int esize, unsigned int n,
232 unsigned int *available);
234 } test_enqdeq_impl[] = {
236 .desc = "MP/MC sync mode",
237 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
240 .flegacy = rte_ring_enqueue_bulk,
241 .felem = rte_ring_enqueue_bulk_elem,
244 .flegacy = rte_ring_dequeue_bulk,
245 .felem = rte_ring_dequeue_bulk_elem,
249 .desc = "SP/SC sync mode",
250 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
251 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
253 .flegacy = rte_ring_sp_enqueue_bulk,
254 .felem = rte_ring_sp_enqueue_bulk_elem,
257 .flegacy = rte_ring_sc_dequeue_bulk,
258 .felem = rte_ring_sc_dequeue_bulk_elem,
262 .desc = "MP/MC sync mode",
263 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
266 .flegacy = rte_ring_mp_enqueue_bulk,
267 .felem = rte_ring_mp_enqueue_bulk_elem,
270 .flegacy = rte_ring_mc_dequeue_bulk,
271 .felem = rte_ring_mc_dequeue_bulk_elem,
275 .desc = "MP_RTS/MC_RTS sync mode",
276 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
277 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
279 .flegacy = rte_ring_enqueue_bulk,
280 .felem = rte_ring_enqueue_bulk_elem,
283 .flegacy = rte_ring_dequeue_bulk,
284 .felem = rte_ring_dequeue_bulk_elem,
288 .desc = "MP_HTS/MC_HTS sync mode",
289 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
290 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
292 .flegacy = rte_ring_enqueue_bulk,
293 .felem = rte_ring_enqueue_bulk_elem,
296 .flegacy = rte_ring_dequeue_bulk,
297 .felem = rte_ring_dequeue_bulk_elem,
301 .desc = "MP/MC sync mode",
302 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
305 .flegacy = rte_ring_enqueue_burst,
306 .felem = rte_ring_enqueue_burst_elem,
309 .flegacy = rte_ring_dequeue_burst,
310 .felem = rte_ring_dequeue_burst_elem,
314 .desc = "SP/SC sync mode",
315 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
316 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
318 .flegacy = rte_ring_sp_enqueue_burst,
319 .felem = rte_ring_sp_enqueue_burst_elem,
322 .flegacy = rte_ring_sc_dequeue_burst,
323 .felem = rte_ring_sc_dequeue_burst_elem,
327 .desc = "MP/MC sync mode",
328 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
331 .flegacy = rte_ring_mp_enqueue_burst,
332 .felem = rte_ring_mp_enqueue_burst_elem,
335 .flegacy = rte_ring_mc_dequeue_burst,
336 .felem = rte_ring_mc_dequeue_burst_elem,
340 .desc = "MP_RTS/MC_RTS sync mode",
341 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
342 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
344 .flegacy = rte_ring_enqueue_burst,
345 .felem = rte_ring_enqueue_burst_elem,
348 .flegacy = rte_ring_dequeue_burst,
349 .felem = rte_ring_dequeue_burst_elem,
353 .desc = "MP_HTS/MC_HTS sync mode",
354 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
355 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
357 .flegacy = rte_ring_enqueue_burst,
358 .felem = rte_ring_enqueue_burst_elem,
361 .flegacy = rte_ring_dequeue_burst,
362 .felem = rte_ring_dequeue_burst_elem,
366 .desc = "SP/SC sync mode (ZC)",
367 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
368 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
370 .flegacy = test_ring_enqueue_zc_bulk,
371 .felem = test_ring_enqueue_zc_bulk_elem,
374 .flegacy = test_ring_dequeue_zc_bulk,
375 .felem = test_ring_dequeue_zc_bulk_elem,
379 .desc = "MP_HTS/MC_HTS sync mode (ZC)",
380 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
381 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
383 .flegacy = test_ring_enqueue_zc_bulk,
384 .felem = test_ring_enqueue_zc_bulk_elem,
387 .flegacy = test_ring_dequeue_zc_bulk,
388 .felem = test_ring_dequeue_zc_bulk_elem,
392 .desc = "SP/SC sync mode (ZC)",
393 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
394 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
396 .flegacy = test_ring_enqueue_zc_burst,
397 .felem = test_ring_enqueue_zc_burst_elem,
400 .flegacy = test_ring_dequeue_zc_burst,
401 .felem = test_ring_dequeue_zc_burst_elem,
405 .desc = "MP_HTS/MC_HTS sync mode (ZC)",
406 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
407 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
409 .flegacy = test_ring_enqueue_zc_burst,
410 .felem = test_ring_enqueue_zc_burst_elem,
413 .flegacy = test_ring_dequeue_zc_burst,
414 .felem = test_ring_dequeue_zc_burst_elem,
420 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
421 unsigned int test_idx)
424 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
426 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
431 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
432 unsigned int test_idx)
435 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
437 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
442 test_ring_mem_init(void *obj, unsigned int count, int esize)
446 /* Legacy queue APIs? */
448 for (i = 0; i < count; i++)
449 ((void **)obj)[i] = (void *)(uintptr_t)i;
451 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
452 ((uint32_t *)obj)[i] = i;
456 test_ring_mem_cmp(void *src, void *dst, unsigned int size)
460 ret = memcmp(src, dst, size);
462 rte_hexdump(stdout, "src", src, size);
463 rte_hexdump(stdout, "dst", dst, size);
464 printf("data after dequeue is not the same\n");
471 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
473 printf("\n%s: ", istr);
476 printf("legacy APIs: ");
478 printf("elem APIs: element size %dB ", esize);
480 if (api_type == TEST_RING_IGNORE_API_TYPE)
483 if (api_type & TEST_RING_THREAD_DEF)
484 printf(": default enqueue/dequeue: ");
485 else if (api_type & TEST_RING_THREAD_SPSC)
487 else if (api_type & TEST_RING_THREAD_MPMC)
490 if (api_type & TEST_RING_ELEM_SINGLE)
492 else if (api_type & TEST_RING_ELEM_BULK)
494 else if (api_type & TEST_RING_ELEM_BURST)
499 * Various negative test cases.
502 test_ring_negative_tests(void)
504 struct rte_ring *rp = NULL;
505 struct rte_ring *rt = NULL;
508 /* Test with esize not a multiple of 4 */
509 rp = test_ring_create("test_bad_element_size", 23,
510 RING_SIZE + 1, SOCKET_ID_ANY, 0);
512 printf("Test failed to detect invalid element size\n");
517 for (i = 0; i < RTE_DIM(esize); i++) {
518 /* Test if ring size is not power of 2 */
519 rp = test_ring_create("test_bad_ring_size", esize[i],
520 RING_SIZE + 1, SOCKET_ID_ANY, 0);
522 printf("Test failed to detect odd count\n");
526 /* Test if ring size is exceeding the limit */
527 rp = test_ring_create("test_bad_ring_size", esize[i],
528 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
530 printf("Test failed to detect limits\n");
534 /* Tests if lookup returns NULL on non-existing ring */
535 rp = rte_ring_lookup("ring_not_found");
536 if (rp != NULL && rte_errno != ENOENT) {
537 printf("Test failed to detect NULL ring lookup\n");
541 /* Test to if a non-power of 2 count causes the create
542 * function to fail correctly
544 rp = test_ring_create("test_ring_count", esize[i], 4097,
549 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
551 RING_F_SP_ENQ | RING_F_SC_DEQ);
553 printf("test_ring_negative fail to create ring\n");
557 TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp,
560 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail);
562 /* Tests if it would always fail to create ring with an used
565 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
583 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
584 * Random number of elements are enqueued and dequeued.
587 test_ring_burst_bulk_tests1(unsigned int test_idx)
590 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
592 unsigned int i, j, temp_sz;
594 const unsigned int rsz = RING_SIZE - 1;
596 for (i = 0; i < RTE_DIM(esize); i++) {
597 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
598 test_enqdeq_impl[test_idx].api_type, esize[i]);
600 /* Create the ring */
601 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
602 RING_SIZE, SOCKET_ID_ANY,
603 test_enqdeq_impl[test_idx].create_flags);
605 /* alloc dummy object pointers */
606 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
609 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
612 /* alloc some room for copied objects */
613 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
618 printf("Random full/empty test\n");
620 for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) {
621 /* random shift in the ring */
622 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
623 printf("%s: iteration %u, random shift: %u;\n",
625 ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
627 TEST_RING_VERIFY(ret != 0, r, goto fail);
629 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
631 TEST_RING_VERIFY(ret == rand, r, goto fail);
634 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
636 TEST_RING_VERIFY(ret != 0, r, goto fail);
638 TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail);
639 TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail);
640 TEST_RING_VERIFY(rte_ring_full(r), r, goto fail);
641 TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail);
644 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
646 TEST_RING_VERIFY(ret == (int)rsz, r, goto fail);
648 TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail);
649 TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail);
650 TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail);
651 TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail);
654 temp_sz = rsz * sizeof(void *);
656 temp_sz = rsz * esize[i];
657 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
658 temp_sz) == 0, r, goto fail);
661 /* Free memory before test completed */
679 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
680 * Sequence of simple enqueues/dequeues and validate the enqueued and
684 test_ring_burst_bulk_tests2(unsigned int test_idx)
687 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
691 for (i = 0; i < RTE_DIM(esize); i++) {
692 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
693 test_enqdeq_impl[test_idx].api_type, esize[i]);
695 /* Create the ring */
696 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
697 RING_SIZE, SOCKET_ID_ANY,
698 test_enqdeq_impl[test_idx].create_flags);
700 /* alloc dummy object pointers */
701 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
704 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
707 /* alloc some room for copied objects */
708 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
713 printf("enqueue 1 obj\n");
714 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
715 TEST_RING_VERIFY(ret == 1, r, goto fail);
716 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
718 printf("enqueue 2 objs\n");
719 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
720 TEST_RING_VERIFY(ret == 2, r, goto fail);
721 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
723 printf("enqueue MAX_BULK objs\n");
724 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
726 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
728 printf("dequeue 1 obj\n");
729 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
730 TEST_RING_VERIFY(ret == 1, r, goto fail);
731 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
733 printf("dequeue 2 objs\n");
734 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
735 TEST_RING_VERIFY(ret == 2, r, goto fail);
736 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
738 printf("dequeue MAX_BULK objs\n");
739 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
741 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
742 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
745 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
746 RTE_PTR_DIFF(cur_dst, dst)) == 0,
749 /* Free memory before test completed */
767 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
768 * Enqueue and dequeue to cover the entire ring length.
771 test_ring_burst_bulk_tests3(unsigned int test_idx)
774 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
778 for (i = 0; i < RTE_DIM(esize); i++) {
779 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
780 test_enqdeq_impl[test_idx].api_type, esize[i]);
782 /* Create the ring */
783 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
784 RING_SIZE, SOCKET_ID_ANY,
785 test_enqdeq_impl[test_idx].create_flags);
787 /* alloc dummy object pointers */
788 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
791 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
794 /* alloc some room for copied objects */
795 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
800 printf("fill and empty the ring\n");
801 for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
802 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
804 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
805 cur_src = test_ring_inc_ptr(cur_src, esize[i],
808 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
810 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
811 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
816 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
817 RTE_PTR_DIFF(cur_dst, dst)) == 0,
820 /* Free memory before test completed */
838 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
839 * Enqueue till the ring is full and dequeue till the ring becomes empty.
842 test_ring_burst_bulk_tests4(unsigned int test_idx)
845 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
848 unsigned int api_type, num_elems;
850 api_type = test_enqdeq_impl[test_idx].api_type;
852 for (i = 0; i < RTE_DIM(esize); i++) {
853 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
854 test_enqdeq_impl[test_idx].api_type, esize[i]);
856 /* Create the ring */
857 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
858 RING_SIZE, SOCKET_ID_ANY,
859 test_enqdeq_impl[test_idx].create_flags);
861 /* alloc dummy object pointers */
862 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
865 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
868 /* alloc some room for copied objects */
869 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
874 printf("Test enqueue without enough memory space\n");
875 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
876 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
878 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
879 cur_src = test_ring_inc_ptr(cur_src, esize[i],
883 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
884 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
885 TEST_RING_VERIFY(ret == 2, r, goto fail);
886 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
888 printf("Enqueue the remaining entries = MAX_BULK - 3\n");
889 /* Bulk APIs enqueue exact number of elements */
890 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
891 num_elems = MAX_BULK - 3;
893 num_elems = MAX_BULK;
894 /* Always one free entry left */
895 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
897 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
898 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
900 printf("Test if ring is full\n");
901 TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail);
903 printf("Test enqueue for a full entry\n");
904 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
906 TEST_RING_VERIFY(ret == 0, r, goto fail);
908 printf("Test dequeue without enough objects\n");
909 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
910 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
912 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
913 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
917 /* Available memory space for the exact MAX_BULK entries */
918 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
919 TEST_RING_VERIFY(ret == 2, r, goto fail);
920 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
922 /* Bulk APIs enqueue exact number of elements */
923 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
924 num_elems = MAX_BULK - 3;
926 num_elems = MAX_BULK;
927 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
929 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
930 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
932 printf("Test if ring is empty\n");
933 /* Check if ring is empty */
934 TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail);
937 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
938 RTE_PTR_DIFF(cur_dst, dst)) == 0,
941 /* Free memory before test completed */
959 * Test default, single element, bulk and burst APIs
962 test_ring_basic_ex(void)
966 struct rte_ring *rp = NULL;
967 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
969 for (i = 0; i < RTE_DIM(esize); i++) {
970 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
972 RING_F_SP_ENQ | RING_F_SC_DEQ);
974 printf("%s: failed to create ring\n", __func__);
978 /* alloc dummy object pointers */
979 src = test_ring_calloc(RING_SIZE, esize[i]);
981 printf("%s: failed to alloc src memory\n", __func__);
984 test_ring_mem_init(src, RING_SIZE, esize[i]);
987 /* alloc some room for copied objects */
988 dst = test_ring_calloc(RING_SIZE, esize[i]);
990 printf("%s: failed to alloc dst memory\n", __func__);
995 TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp,
998 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1000 printf("%u ring entries are now free\n",
1001 rte_ring_free_count(rp));
1003 for (j = 0; j < RING_SIZE - 1; j++) {
1004 ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
1005 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1006 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1007 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1010 TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test);
1012 for (j = 0; j < RING_SIZE - 1; j++) {
1013 ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
1014 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1015 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1016 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
1019 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1022 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1023 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1024 rp, goto fail_test);
1026 /* Following tests use the configured flags to decide
1029 /* reset memory of dst */
1030 memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
1032 /* reset cur_src and cur_dst */
1036 /* Covering the ring burst operation */
1037 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1038 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1039 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1040 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
1042 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1043 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1044 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1045 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1047 /* Covering the ring bulk operation */
1048 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1049 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1050 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1052 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1053 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1054 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1055 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1058 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1059 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1060 rp, goto fail_test);
1080 * Basic test cases with exact size ring.
1083 test_ring_with_exact_size(void)
1085 struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
1086 void **src_orig = NULL, **dst_orig = NULL;
1087 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
1088 const unsigned int ring_sz = 16;
1092 for (i = 0; i < RTE_DIM(esize); i++) {
1093 test_ring_print_test_string("Test exact size ring",
1094 TEST_RING_IGNORE_API_TYPE,
1097 std_r = test_ring_create("std", esize[i], ring_sz,
1099 RING_F_SP_ENQ | RING_F_SC_DEQ);
1100 if (std_r == NULL) {
1101 printf("%s: error, can't create std ring\n", __func__);
1104 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
1106 RING_F_SP_ENQ | RING_F_SC_DEQ |
1108 if (exact_sz_r == NULL) {
1109 printf("%s: error, can't create exact size ring\n",
1114 /* alloc object pointers. Allocate one extra object
1115 * and create an unaligned address.
1117 src_orig = test_ring_calloc(17, esize[i]);
1118 if (src_orig == NULL)
1120 test_ring_mem_init(src_orig, 17, esize[i]);
1121 src = (void **)((uintptr_t)src_orig + 1);
1124 dst_orig = test_ring_calloc(17, esize[i]);
1125 if (dst_orig == NULL)
1127 dst = (void **)((uintptr_t)dst_orig + 1);
1131 * Check that the exact size ring is bigger than the
1134 TEST_RING_VERIFY(rte_ring_get_size(std_r) <=
1135 rte_ring_get_size(exact_sz_r),
1136 std_r, goto test_fail);
1139 * check that the exact_sz_ring can hold one more element
1140 * than the standard ring. (16 vs 15 elements)
1142 for (j = 0; j < ring_sz - 1; j++) {
1143 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1144 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1145 TEST_RING_VERIFY(ret == 0, std_r, goto test_fail);
1146 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1147 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1148 TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail);
1149 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1151 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1152 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1153 TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail);
1154 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1155 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1156 TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
1158 /* check that dequeue returns the expected number of elements */
1159 ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
1160 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1161 TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail);
1162 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
1164 /* check that the capacity function returns expected value */
1165 TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz,
1166 exact_sz_r, goto test_fail);
1169 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1170 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1171 exact_sz_r, goto test_fail);
1175 rte_ring_free(std_r);
1176 rte_ring_free(exact_sz_r);
1188 rte_ring_free(std_r);
1189 rte_ring_free(exact_sz_r);
1199 /* Negative test cases */
1200 if (test_ring_negative_tests() < 0)
1203 /* Some basic operations */
1204 if (test_ring_basic_ex() < 0)
1207 if (test_ring_with_exact_size() < 0)
1210 /* Burst and bulk operations with sp/sc, mp/mc and default.
1211 * The test cases are split into smaller test cases to
1212 * help clang compile faster.
1214 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1217 rc = test_ring_burst_bulk_tests1(i);
1221 rc = test_ring_burst_bulk_tests2(i);
1225 rc = test_ring_burst_bulk_tests3(i);
1229 rc = test_ring_burst_bulk_tests4(i);
1234 /* dump the ring status */
1235 rte_ring_list_dump(stdout);
1244 REGISTER_TEST_COMMAND(ring_autotest, test_ring);