1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 * Copyright(c) 2020 Arm Limited
13 #include <sys/queue.h>
15 #include <rte_common.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_malloc.h>
27 #include <rte_ring_elem.h>
28 #include <rte_random.h>
29 #include <rte_errno.h>
30 #include <rte_hexdump.h>
33 #include "test_ring.h"
39 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
40 * legacy/custom element size (4B, 8B, 16B, 20B) APIs.
41 * Some tests incorporate unaligned addresses for objects.
42 * The enqueued/dequeued data is validated for correctness.
44 * #. Performance tests are in test_ring_perf.c
47 #define RING_SIZE 4096
51 * Validate the return value of test cases and print details of the
52 * ring if validation fails
55 * Expression to validate return value.
57 * A pointer to the ring structure.
59 #define TEST_RING_VERIFY(exp, r, errst) do { \
61 printf("error at %s:%d\tcondition " #exp " failed\n", \
62 __func__, __LINE__); \
63 rte_ring_dump(stdout, (r)); \
68 #define TEST_RING_FULL_EMPTY_ITER 8
70 static const int esize[] = {-1, 4, 8, 16, 20};
72 /* Wrappers around the zero-copy APIs. The wrappers match
73 * the normal enqueue/dequeue API declarations.
76 test_ring_enqueue_zc_bulk(struct rte_ring *r, void * const *obj_table,
77 unsigned int n, unsigned int *free_space)
80 struct rte_ring_zc_data zcd;
82 ret = rte_ring_enqueue_zc_bulk_start(r, n, &zcd, free_space);
84 /* Copy the data to the ring */
85 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
86 rte_ring_enqueue_zc_finish(r, ret);
93 test_ring_enqueue_zc_bulk_elem(struct rte_ring *r, const void *obj_table,
94 unsigned int esize, unsigned int n, unsigned int *free_space)
97 struct rte_ring_zc_data zcd;
99 ret = rte_ring_enqueue_zc_bulk_elem_start(r, esize, n,
102 /* Copy the data to the ring */
103 test_ring_copy_to(&zcd, obj_table, esize, ret);
104 rte_ring_enqueue_zc_finish(r, ret);
111 test_ring_enqueue_zc_burst(struct rte_ring *r, void * const *obj_table,
112 unsigned int n, unsigned int *free_space)
115 struct rte_ring_zc_data zcd;
117 ret = rte_ring_enqueue_zc_burst_start(r, n, &zcd, free_space);
119 /* Copy the data to the ring */
120 test_ring_copy_to(&zcd, obj_table, sizeof(void *), ret);
121 rte_ring_enqueue_zc_finish(r, ret);
128 test_ring_enqueue_zc_burst_elem(struct rte_ring *r, const void *obj_table,
129 unsigned int esize, unsigned int n, unsigned int *free_space)
132 struct rte_ring_zc_data zcd;
134 ret = rte_ring_enqueue_zc_burst_elem_start(r, esize, n,
137 /* Copy the data to the ring */
138 test_ring_copy_to(&zcd, obj_table, esize, ret);
139 rte_ring_enqueue_zc_finish(r, ret);
146 test_ring_dequeue_zc_bulk(struct rte_ring *r, void **obj_table,
147 unsigned int n, unsigned int *available)
150 struct rte_ring_zc_data zcd;
152 ret = rte_ring_dequeue_zc_bulk_start(r, n, &zcd, available);
154 /* Copy the data from the ring */
155 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
156 rte_ring_dequeue_zc_finish(r, ret);
163 test_ring_dequeue_zc_bulk_elem(struct rte_ring *r, void *obj_table,
164 unsigned int esize, unsigned int n, unsigned int *available)
167 struct rte_ring_zc_data zcd;
169 ret = rte_ring_dequeue_zc_bulk_elem_start(r, esize, n,
172 /* Copy the data from the ring */
173 test_ring_copy_from(&zcd, obj_table, esize, ret);
174 rte_ring_dequeue_zc_finish(r, ret);
181 test_ring_dequeue_zc_burst(struct rte_ring *r, void **obj_table,
182 unsigned int n, unsigned int *available)
185 struct rte_ring_zc_data zcd;
187 ret = rte_ring_dequeue_zc_burst_start(r, n, &zcd, available);
189 /* Copy the data from the ring */
190 test_ring_copy_from(&zcd, obj_table, sizeof(void *), ret);
191 rte_ring_dequeue_zc_finish(r, ret);
198 test_ring_dequeue_zc_burst_elem(struct rte_ring *r, void *obj_table,
199 unsigned int esize, unsigned int n, unsigned int *available)
202 struct rte_ring_zc_data zcd;
204 ret = rte_ring_dequeue_zc_burst_elem_start(r, esize, n,
207 /* Copy the data from the ring */
208 test_ring_copy_from(&zcd, obj_table, esize, ret);
209 rte_ring_dequeue_zc_finish(r, ret);
215 static const struct {
218 uint32_t create_flags;
220 unsigned int (*flegacy)(struct rte_ring *r,
221 void * const *obj_table, unsigned int n,
222 unsigned int *free_space);
223 unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
224 unsigned int esize, unsigned int n,
225 unsigned int *free_space);
228 unsigned int (*flegacy)(struct rte_ring *r,
229 void **obj_table, unsigned int n,
230 unsigned int *available);
231 unsigned int (*felem)(struct rte_ring *r, void *obj_table,
232 unsigned int esize, unsigned int n,
233 unsigned int *available);
235 } test_enqdeq_impl[] = {
237 .desc = "MP/MC sync mode",
238 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
241 .flegacy = rte_ring_enqueue_bulk,
242 .felem = rte_ring_enqueue_bulk_elem,
245 .flegacy = rte_ring_dequeue_bulk,
246 .felem = rte_ring_dequeue_bulk_elem,
250 .desc = "SP/SC sync mode",
251 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
252 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
254 .flegacy = rte_ring_sp_enqueue_bulk,
255 .felem = rte_ring_sp_enqueue_bulk_elem,
258 .flegacy = rte_ring_sc_dequeue_bulk,
259 .felem = rte_ring_sc_dequeue_bulk_elem,
263 .desc = "MP/MC sync mode",
264 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
267 .flegacy = rte_ring_mp_enqueue_bulk,
268 .felem = rte_ring_mp_enqueue_bulk_elem,
271 .flegacy = rte_ring_mc_dequeue_bulk,
272 .felem = rte_ring_mc_dequeue_bulk_elem,
276 .desc = "MP_RTS/MC_RTS sync mode",
277 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
278 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
280 .flegacy = rte_ring_enqueue_bulk,
281 .felem = rte_ring_enqueue_bulk_elem,
284 .flegacy = rte_ring_dequeue_bulk,
285 .felem = rte_ring_dequeue_bulk_elem,
289 .desc = "MP_HTS/MC_HTS sync mode",
290 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
291 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
293 .flegacy = rte_ring_enqueue_bulk,
294 .felem = rte_ring_enqueue_bulk_elem,
297 .flegacy = rte_ring_dequeue_bulk,
298 .felem = rte_ring_dequeue_bulk_elem,
302 .desc = "MP/MC sync mode",
303 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
306 .flegacy = rte_ring_enqueue_burst,
307 .felem = rte_ring_enqueue_burst_elem,
310 .flegacy = rte_ring_dequeue_burst,
311 .felem = rte_ring_dequeue_burst_elem,
315 .desc = "SP/SC sync mode",
316 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
317 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
319 .flegacy = rte_ring_sp_enqueue_burst,
320 .felem = rte_ring_sp_enqueue_burst_elem,
323 .flegacy = rte_ring_sc_dequeue_burst,
324 .felem = rte_ring_sc_dequeue_burst_elem,
328 .desc = "MP/MC sync mode",
329 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
332 .flegacy = rte_ring_mp_enqueue_burst,
333 .felem = rte_ring_mp_enqueue_burst_elem,
336 .flegacy = rte_ring_mc_dequeue_burst,
337 .felem = rte_ring_mc_dequeue_burst_elem,
341 .desc = "MP_RTS/MC_RTS sync mode",
342 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
343 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
345 .flegacy = rte_ring_enqueue_burst,
346 .felem = rte_ring_enqueue_burst_elem,
349 .flegacy = rte_ring_dequeue_burst,
350 .felem = rte_ring_dequeue_burst_elem,
354 .desc = "MP_HTS/MC_HTS sync mode",
355 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
356 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
358 .flegacy = rte_ring_enqueue_burst,
359 .felem = rte_ring_enqueue_burst_elem,
362 .flegacy = rte_ring_dequeue_burst,
363 .felem = rte_ring_dequeue_burst_elem,
367 .desc = "SP/SC sync mode (ZC)",
368 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
369 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
371 .flegacy = test_ring_enqueue_zc_bulk,
372 .felem = test_ring_enqueue_zc_bulk_elem,
375 .flegacy = test_ring_dequeue_zc_bulk,
376 .felem = test_ring_dequeue_zc_bulk_elem,
380 .desc = "MP_HTS/MC_HTS sync mode (ZC)",
381 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
382 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
384 .flegacy = test_ring_enqueue_zc_bulk,
385 .felem = test_ring_enqueue_zc_bulk_elem,
388 .flegacy = test_ring_dequeue_zc_bulk,
389 .felem = test_ring_dequeue_zc_bulk_elem,
393 .desc = "SP/SC sync mode (ZC)",
394 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
395 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
397 .flegacy = test_ring_enqueue_zc_burst,
398 .felem = test_ring_enqueue_zc_burst_elem,
401 .flegacy = test_ring_dequeue_zc_burst,
402 .felem = test_ring_dequeue_zc_burst_elem,
406 .desc = "MP_HTS/MC_HTS sync mode (ZC)",
407 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
408 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
410 .flegacy = test_ring_enqueue_zc_burst,
411 .felem = test_ring_enqueue_zc_burst_elem,
414 .flegacy = test_ring_dequeue_zc_burst,
415 .felem = test_ring_dequeue_zc_burst_elem,
421 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
422 unsigned int test_idx)
425 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
427 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
432 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
433 unsigned int test_idx)
436 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
438 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
443 test_ring_mem_init(void *obj, unsigned int count, int esize)
447 /* Legacy queue APIs? */
449 for (i = 0; i < count; i++)
450 ((void **)obj)[i] = (void *)(uintptr_t)i;
452 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
453 ((uint32_t *)obj)[i] = i;
457 test_ring_mem_cmp(void *src, void *dst, unsigned int size)
461 ret = memcmp(src, dst, size);
463 rte_hexdump(stdout, "src", src, size);
464 rte_hexdump(stdout, "dst", dst, size);
465 printf("data after dequeue is not the same\n");
472 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
474 printf("\n%s: ", istr);
477 printf("legacy APIs: ");
479 printf("elem APIs: element size %dB ", esize);
481 if (api_type == TEST_RING_IGNORE_API_TYPE)
484 if (api_type & TEST_RING_THREAD_DEF)
485 printf(": default enqueue/dequeue: ");
486 else if (api_type & TEST_RING_THREAD_SPSC)
488 else if (api_type & TEST_RING_THREAD_MPMC)
491 if (api_type & TEST_RING_ELEM_SINGLE)
493 else if (api_type & TEST_RING_ELEM_BULK)
495 else if (api_type & TEST_RING_ELEM_BURST)
500 * Various negative test cases.
503 test_ring_negative_tests(void)
505 struct rte_ring *rp = NULL;
506 struct rte_ring *rt = NULL;
509 /* Test with esize not a multiple of 4 */
510 rp = test_ring_create("test_bad_element_size", 23,
511 RING_SIZE + 1, SOCKET_ID_ANY, 0);
513 printf("Test failed to detect invalid element size\n");
518 for (i = 0; i < RTE_DIM(esize); i++) {
519 /* Test if ring size is not power of 2 */
520 rp = test_ring_create("test_bad_ring_size", esize[i],
521 RING_SIZE + 1, SOCKET_ID_ANY, 0);
523 printf("Test failed to detect odd count\n");
527 /* Test if ring size is exceeding the limit */
528 rp = test_ring_create("test_bad_ring_size", esize[i],
529 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
531 printf("Test failed to detect limits\n");
535 /* Tests if lookup returns NULL on non-existing ring */
536 rp = rte_ring_lookup("ring_not_found");
537 if (rp != NULL && rte_errno != ENOENT) {
538 printf("Test failed to detect NULL ring lookup\n");
542 /* Test to if a non-power of 2 count causes the create
543 * function to fail correctly
545 rp = test_ring_create("test_ring_count", esize[i], 4097,
550 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
552 RING_F_SP_ENQ | RING_F_SC_DEQ);
554 printf("test_ring_negative fail to create ring\n");
558 TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp,
561 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail);
563 /* Tests if it would always fail to create ring with an used
566 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
584 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
585 * Random number of elements are enqueued and dequeued.
588 test_ring_burst_bulk_tests1(unsigned int test_idx)
591 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
593 unsigned int i, j, temp_sz;
595 const unsigned int rsz = RING_SIZE - 1;
597 for (i = 0; i < RTE_DIM(esize); i++) {
598 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
599 test_enqdeq_impl[test_idx].api_type, esize[i]);
601 /* Create the ring */
602 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
603 RING_SIZE, SOCKET_ID_ANY,
604 test_enqdeq_impl[test_idx].create_flags);
606 /* alloc dummy object pointers */
607 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
610 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
613 /* alloc some room for copied objects */
614 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
619 printf("Random full/empty test\n");
621 for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) {
622 /* random shift in the ring */
623 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
624 printf("%s: iteration %u, random shift: %u;\n",
626 ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
628 TEST_RING_VERIFY(ret != 0, r, goto fail);
630 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
632 TEST_RING_VERIFY(ret == rand, r, goto fail);
635 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
637 TEST_RING_VERIFY(ret != 0, r, goto fail);
639 TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail);
640 TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail);
641 TEST_RING_VERIFY(rte_ring_full(r), r, goto fail);
642 TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail);
645 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
647 TEST_RING_VERIFY(ret == (int)rsz, r, goto fail);
649 TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail);
650 TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail);
651 TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail);
652 TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail);
655 temp_sz = rsz * sizeof(void *);
657 temp_sz = rsz * esize[i];
658 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
659 temp_sz) == 0, r, goto fail);
662 /* Free memory before test completed */
680 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
681 * Sequence of simple enqueues/dequeues and validate the enqueued and
685 test_ring_burst_bulk_tests2(unsigned int test_idx)
688 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
692 for (i = 0; i < RTE_DIM(esize); i++) {
693 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
694 test_enqdeq_impl[test_idx].api_type, esize[i]);
696 /* Create the ring */
697 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
698 RING_SIZE, SOCKET_ID_ANY,
699 test_enqdeq_impl[test_idx].create_flags);
701 /* alloc dummy object pointers */
702 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
705 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
708 /* alloc some room for copied objects */
709 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
714 printf("enqueue 1 obj\n");
715 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
716 TEST_RING_VERIFY(ret == 1, r, goto fail);
717 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
719 printf("enqueue 2 objs\n");
720 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
721 TEST_RING_VERIFY(ret == 2, r, goto fail);
722 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
724 printf("enqueue MAX_BULK objs\n");
725 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
727 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
728 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK);
730 printf("dequeue 1 obj\n");
731 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
732 TEST_RING_VERIFY(ret == 1, r, goto fail);
733 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
735 printf("dequeue 2 objs\n");
736 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
737 TEST_RING_VERIFY(ret == 2, r, goto fail);
738 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
740 printf("dequeue MAX_BULK objs\n");
741 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
743 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
744 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
747 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
748 RTE_PTR_DIFF(cur_dst, dst)) == 0,
751 /* Free memory before test completed */
769 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
770 * Enqueue and dequeue to cover the entire ring length.
773 test_ring_burst_bulk_tests3(unsigned int test_idx)
776 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
780 for (i = 0; i < RTE_DIM(esize); i++) {
781 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
782 test_enqdeq_impl[test_idx].api_type, esize[i]);
784 /* Create the ring */
785 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
786 RING_SIZE, SOCKET_ID_ANY,
787 test_enqdeq_impl[test_idx].create_flags);
789 /* alloc dummy object pointers */
790 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
793 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
796 /* alloc some room for copied objects */
797 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
802 printf("fill and empty the ring\n");
803 for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
804 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
806 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
807 cur_src = test_ring_inc_ptr(cur_src, esize[i],
810 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
812 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
813 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
818 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
819 RTE_PTR_DIFF(cur_dst, dst)) == 0,
822 /* Free memory before test completed */
840 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
841 * Enqueue till the ring is full and dequeue till the ring becomes empty.
844 test_ring_burst_bulk_tests4(unsigned int test_idx)
847 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
850 unsigned int api_type, num_elems;
852 api_type = test_enqdeq_impl[test_idx].api_type;
854 for (i = 0; i < RTE_DIM(esize); i++) {
855 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
856 test_enqdeq_impl[test_idx].api_type, esize[i]);
858 /* Create the ring */
859 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
860 RING_SIZE, SOCKET_ID_ANY,
861 test_enqdeq_impl[test_idx].create_flags);
863 /* alloc dummy object pointers */
864 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
867 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
870 /* alloc some room for copied objects */
871 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
876 printf("Test enqueue without enough memory space\n");
877 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
878 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
880 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
881 cur_src = test_ring_inc_ptr(cur_src, esize[i],
885 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
886 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
887 TEST_RING_VERIFY(ret == 2, r, goto fail);
888 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
890 printf("Enqueue the remaining entries = MAX_BULK - 3\n");
891 /* Bulk APIs enqueue exact number of elements */
892 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
893 num_elems = MAX_BULK - 3;
895 num_elems = MAX_BULK;
896 /* Always one free entry left */
897 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
899 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
900 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
902 printf("Test if ring is full\n");
903 TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail);
905 printf("Test enqueue for a full entry\n");
906 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
908 TEST_RING_VERIFY(ret == 0, r, goto fail);
910 printf("Test dequeue without enough objects\n");
911 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
912 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
914 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
915 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
919 /* Available memory space for the exact MAX_BULK entries */
920 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
921 TEST_RING_VERIFY(ret == 2, r, goto fail);
922 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
924 /* Bulk APIs enqueue exact number of elements */
925 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
926 num_elems = MAX_BULK - 3;
928 num_elems = MAX_BULK;
929 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
931 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
932 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
934 printf("Test if ring is empty\n");
935 /* Check if ring is empty */
936 TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail);
939 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
940 RTE_PTR_DIFF(cur_dst, dst)) == 0,
943 /* Free memory before test completed */
961 * Test default, single element, bulk and burst APIs
964 test_ring_basic_ex(void)
968 struct rte_ring *rp = NULL;
969 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
971 for (i = 0; i < RTE_DIM(esize); i++) {
972 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
974 RING_F_SP_ENQ | RING_F_SC_DEQ);
976 printf("%s: failed to create ring\n", __func__);
980 /* alloc dummy object pointers */
981 src = test_ring_calloc(RING_SIZE, esize[i]);
983 printf("%s: failed to alloc src memory\n", __func__);
986 test_ring_mem_init(src, RING_SIZE, esize[i]);
989 /* alloc some room for copied objects */
990 dst = test_ring_calloc(RING_SIZE, esize[i]);
992 printf("%s: failed to alloc dst memory\n", __func__);
997 TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp,
1000 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1002 printf("%u ring entries are now free\n",
1003 rte_ring_free_count(rp));
1005 for (j = 0; j < RING_SIZE - 1; j++) {
1006 ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
1007 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1008 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1009 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1012 TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test);
1014 for (j = 0; j < RING_SIZE - 1; j++) {
1015 ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
1016 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1017 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
1018 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
1021 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
1024 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1025 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1026 rp, goto fail_test);
1028 /* Following tests use the configured flags to decide
1031 /* reset memory of dst */
1032 memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
1034 /* reset cur_src and cur_dst */
1038 /* Covering the ring burst operation */
1039 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1040 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1041 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1042 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
1044 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1045 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1046 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1047 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1049 /* Covering the ring bulk operation */
1050 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
1051 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1052 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1053 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
1055 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
1056 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
1057 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
1058 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
1061 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1062 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1063 rp, goto fail_test);
1083 * Basic test cases with exact size ring.
1086 test_ring_with_exact_size(void)
1088 struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
1089 void **src_orig = NULL, **dst_orig = NULL;
1090 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
1091 const unsigned int ring_sz = 16;
1095 for (i = 0; i < RTE_DIM(esize); i++) {
1096 test_ring_print_test_string("Test exact size ring",
1097 TEST_RING_IGNORE_API_TYPE,
1100 std_r = test_ring_create("std", esize[i], ring_sz,
1102 RING_F_SP_ENQ | RING_F_SC_DEQ);
1103 if (std_r == NULL) {
1104 printf("%s: error, can't create std ring\n", __func__);
1107 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
1109 RING_F_SP_ENQ | RING_F_SC_DEQ |
1111 if (exact_sz_r == NULL) {
1112 printf("%s: error, can't create exact size ring\n",
1117 /* alloc object pointers. Allocate one extra object
1118 * and create an unaligned address.
1120 src_orig = test_ring_calloc(17, esize[i]);
1121 if (src_orig == NULL)
1123 test_ring_mem_init(src_orig, 17, esize[i]);
1124 src = (void **)((uintptr_t)src_orig + 1);
1127 dst_orig = test_ring_calloc(17, esize[i]);
1128 if (dst_orig == NULL)
1130 dst = (void **)((uintptr_t)dst_orig + 1);
1134 * Check that the exact size ring is bigger than the
1137 TEST_RING_VERIFY(rte_ring_get_size(std_r) <=
1138 rte_ring_get_size(exact_sz_r),
1139 std_r, goto test_fail);
1142 * check that the exact_sz_ring can hold one more element
1143 * than the standard ring. (16 vs 15 elements)
1145 for (j = 0; j < ring_sz - 1; j++) {
1146 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1147 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1148 TEST_RING_VERIFY(ret == 0, std_r, goto test_fail);
1149 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1150 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1151 TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail);
1152 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1154 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1155 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1156 TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail);
1157 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1158 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1159 TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
1160 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1162 /* check that dequeue returns the expected number of elements */
1163 ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
1164 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1165 TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail);
1166 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
1168 /* check that the capacity function returns expected value */
1169 TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz,
1170 exact_sz_r, goto test_fail);
1173 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
1174 RTE_PTR_DIFF(cur_dst, dst)) == 0,
1175 exact_sz_r, goto test_fail);
1179 rte_ring_free(std_r);
1180 rte_ring_free(exact_sz_r);
1192 rte_ring_free(std_r);
1193 rte_ring_free(exact_sz_r);
1203 /* Negative test cases */
1204 if (test_ring_negative_tests() < 0)
1207 /* Some basic operations */
1208 if (test_ring_basic_ex() < 0)
1211 if (test_ring_with_exact_size() < 0)
1214 /* Burst and bulk operations with sp/sc, mp/mc and default.
1215 * The test cases are split into smaller test cases to
1216 * help clang compile faster.
1218 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1221 rc = test_ring_burst_bulk_tests1(i);
1225 rc = test_ring_burst_bulk_tests2(i);
1229 rc = test_ring_burst_bulk_tests3(i);
1233 rc = test_ring_burst_bulk_tests4(i);
1238 /* dump the ring status */
1239 rte_ring_list_dump(stdout);
1248 REGISTER_TEST_COMMAND(ring_autotest, test_ring);