1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_memory.h>
17 #include <rte_launch.h>
18 #include <rte_cycles.h>
20 #include <rte_per_lcore.h>
21 #include <rte_lcore.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_malloc.h>
26 #include <rte_ring_elem.h>
27 #include <rte_random.h>
28 #include <rte_errno.h>
29 #include <rte_hexdump.h>
32 #include "test_ring.h"
38 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
39 * legacy/custom element size (4B, 8B, 16B, 20B) APIs.
40 * Some tests incorporate unaligned addresses for objects.
41 * The enqueued/dequeued data is validated for correctness.
43 * #. Performance tests are in test_ring_perf.c
46 #define RING_SIZE 4096
50 * Validate the return value of test cases and print details of the
51 * ring if validation fails
54 * Expression to validate return value.
56 * A pointer to the ring structure.
58 #define TEST_RING_VERIFY(exp, r, errst) do { \
60 printf("error at %s:%d\tcondition " #exp " failed\n", \
61 __func__, __LINE__); \
62 rte_ring_dump(stdout, (r)); \
67 #define TEST_RING_FULL_EMPTY_ITER 8
69 static const int esize[] = {-1, 4, 8, 16, 20};
74 uint32_t create_flags;
76 unsigned int (*flegacy)(struct rte_ring *r,
77 void * const *obj_table, unsigned int n,
78 unsigned int *free_space);
79 unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
80 unsigned int esize, unsigned int n,
81 unsigned int *free_space);
84 unsigned int (*flegacy)(struct rte_ring *r,
85 void **obj_table, unsigned int n,
86 unsigned int *available);
87 unsigned int (*felem)(struct rte_ring *r, void *obj_table,
88 unsigned int esize, unsigned int n,
89 unsigned int *available);
91 } test_enqdeq_impl[] = {
93 .desc = "MP/MC sync mode",
94 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
97 .flegacy = rte_ring_enqueue_bulk,
98 .felem = rte_ring_enqueue_bulk_elem,
101 .flegacy = rte_ring_dequeue_bulk,
102 .felem = rte_ring_dequeue_bulk_elem,
106 .desc = "SP/SC sync mode",
107 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
108 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
110 .flegacy = rte_ring_sp_enqueue_bulk,
111 .felem = rte_ring_sp_enqueue_bulk_elem,
114 .flegacy = rte_ring_sc_dequeue_bulk,
115 .felem = rte_ring_sc_dequeue_bulk_elem,
119 .desc = "MP/MC sync mode",
120 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
123 .flegacy = rte_ring_mp_enqueue_bulk,
124 .felem = rte_ring_mp_enqueue_bulk_elem,
127 .flegacy = rte_ring_mc_dequeue_bulk,
128 .felem = rte_ring_mc_dequeue_bulk_elem,
132 .desc = "MP_RTS/MC_RTS sync mode",
133 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
134 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
136 .flegacy = rte_ring_enqueue_bulk,
137 .felem = rte_ring_enqueue_bulk_elem,
140 .flegacy = rte_ring_dequeue_bulk,
141 .felem = rte_ring_dequeue_bulk_elem,
145 .desc = "MP_HTS/MC_HTS sync mode",
146 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
147 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
149 .flegacy = rte_ring_enqueue_bulk,
150 .felem = rte_ring_enqueue_bulk_elem,
153 .flegacy = rte_ring_dequeue_bulk,
154 .felem = rte_ring_dequeue_bulk_elem,
158 .desc = "MP/MC sync mode",
159 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
162 .flegacy = rte_ring_enqueue_burst,
163 .felem = rte_ring_enqueue_burst_elem,
166 .flegacy = rte_ring_dequeue_burst,
167 .felem = rte_ring_dequeue_burst_elem,
171 .desc = "SP/SC sync mode",
172 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
173 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
175 .flegacy = rte_ring_sp_enqueue_burst,
176 .felem = rte_ring_sp_enqueue_burst_elem,
179 .flegacy = rte_ring_sc_dequeue_burst,
180 .felem = rte_ring_sc_dequeue_burst_elem,
184 .desc = "MP/MC sync mode",
185 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
188 .flegacy = rte_ring_mp_enqueue_burst,
189 .felem = rte_ring_mp_enqueue_burst_elem,
192 .flegacy = rte_ring_mc_dequeue_burst,
193 .felem = rte_ring_mc_dequeue_burst_elem,
197 .desc = "MP_RTS/MC_RTS sync mode",
198 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
199 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
201 .flegacy = rte_ring_enqueue_burst,
202 .felem = rte_ring_enqueue_burst_elem,
205 .flegacy = rte_ring_dequeue_burst,
206 .felem = rte_ring_dequeue_burst_elem,
210 .desc = "MP_HTS/MC_HTS sync mode",
211 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
212 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
214 .flegacy = rte_ring_enqueue_burst,
215 .felem = rte_ring_enqueue_burst_elem,
218 .flegacy = rte_ring_dequeue_burst,
219 .felem = rte_ring_dequeue_burst_elem,
225 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
226 unsigned int test_idx)
229 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
231 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
236 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
237 unsigned int test_idx)
240 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
242 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
247 test_ring_inc_ptr(void **obj, int esize, unsigned int n)
249 /* Legacy queue APIs? */
251 return ((void **)obj) + n;
253 return (void **)(((uint32_t *)obj) +
254 (n * esize / sizeof(uint32_t)));
258 test_ring_mem_init(void *obj, unsigned int count, int esize)
262 /* Legacy queue APIs? */
264 for (i = 0; i < count; i++)
265 ((void **)obj)[i] = (void *)(unsigned long)i;
267 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
268 ((uint32_t *)obj)[i] = i;
272 test_ring_mem_cmp(void *src, void *dst, unsigned int size)
276 ret = memcmp(src, dst, size);
278 rte_hexdump(stdout, "src", src, size);
279 rte_hexdump(stdout, "dst", dst, size);
280 printf("data after dequeue is not the same\n");
287 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
289 printf("\n%s: ", istr);
292 printf("legacy APIs: ");
294 printf("elem APIs: element size %dB ", esize);
296 if (api_type == TEST_RING_IGNORE_API_TYPE)
299 if (api_type & TEST_RING_THREAD_DEF)
300 printf(": default enqueue/dequeue: ");
301 else if (api_type & TEST_RING_THREAD_SPSC)
303 else if (api_type & TEST_RING_THREAD_MPMC)
306 if (api_type & TEST_RING_ELEM_SINGLE)
308 else if (api_type & TEST_RING_ELEM_BULK)
310 else if (api_type & TEST_RING_ELEM_BURST)
315 * Various negative test cases.
318 test_ring_negative_tests(void)
320 struct rte_ring *rp = NULL;
321 struct rte_ring *rt = NULL;
324 /* Test with esize not a multiple of 4 */
325 rp = test_ring_create("test_bad_element_size", 23,
326 RING_SIZE + 1, SOCKET_ID_ANY, 0);
328 printf("Test failed to detect invalid element size\n");
333 for (i = 0; i < RTE_DIM(esize); i++) {
334 /* Test if ring size is not power of 2 */
335 rp = test_ring_create("test_bad_ring_size", esize[i],
336 RING_SIZE + 1, SOCKET_ID_ANY, 0);
338 printf("Test failed to detect odd count\n");
342 /* Test if ring size is exceeding the limit */
343 rp = test_ring_create("test_bad_ring_size", esize[i],
344 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
346 printf("Test failed to detect limits\n");
350 /* Tests if lookup returns NULL on non-existing ring */
351 rp = rte_ring_lookup("ring_not_found");
352 if (rp != NULL && rte_errno != ENOENT) {
353 printf("Test failed to detect NULL ring lookup\n");
357 /* Test to if a non-power of 2 count causes the create
358 * function to fail correctly
360 rp = test_ring_create("test_ring_count", esize[i], 4097,
365 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
367 RING_F_SP_ENQ | RING_F_SC_DEQ);
369 printf("test_ring_negative fail to create ring\n");
373 TEST_RING_VERIFY(rte_ring_lookup("test_ring_negative") == rp,
376 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto test_fail);
378 /* Tests if it would always fail to create ring with an used
381 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
399 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
400 * Random number of elements are enqueued and dequeued.
403 test_ring_burst_bulk_tests1(unsigned int test_idx)
406 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
408 unsigned int i, j, temp_sz;
410 const unsigned int rsz = RING_SIZE - 1;
412 for (i = 0; i < RTE_DIM(esize); i++) {
413 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
414 test_enqdeq_impl[test_idx].api_type, esize[i]);
416 /* Create the ring */
417 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
418 RING_SIZE, SOCKET_ID_ANY,
419 test_enqdeq_impl[test_idx].create_flags);
421 /* alloc dummy object pointers */
422 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
425 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
428 /* alloc some room for copied objects */
429 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
434 printf("Random full/empty test\n");
436 for (j = 0; j != TEST_RING_FULL_EMPTY_ITER; j++) {
437 /* random shift in the ring */
438 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
439 printf("%s: iteration %u, random shift: %u;\n",
441 ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
443 TEST_RING_VERIFY(ret != 0, r, goto fail);
445 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
447 TEST_RING_VERIFY(ret == rand, r, goto fail);
450 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
452 TEST_RING_VERIFY(ret != 0, r, goto fail);
454 TEST_RING_VERIFY(rte_ring_free_count(r) == 0, r, goto fail);
455 TEST_RING_VERIFY(rsz == rte_ring_count(r), r, goto fail);
456 TEST_RING_VERIFY(rte_ring_full(r), r, goto fail);
457 TEST_RING_VERIFY(rte_ring_empty(r) == 0, r, goto fail);
460 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
462 TEST_RING_VERIFY(ret == (int)rsz, r, goto fail);
464 TEST_RING_VERIFY(rsz == rte_ring_free_count(r), r, goto fail);
465 TEST_RING_VERIFY(rte_ring_count(r) == 0, r, goto fail);
466 TEST_RING_VERIFY(rte_ring_full(r) == 0, r, goto fail);
467 TEST_RING_VERIFY(rte_ring_empty(r), r, goto fail);
470 temp_sz = rsz * sizeof(void *);
472 temp_sz = rsz * esize[i];
473 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
474 temp_sz) == 0, r, goto fail);
477 /* Free memory before test completed */
495 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
496 * Sequence of simple enqueues/dequeues and validate the enqueued and
500 test_ring_burst_bulk_tests2(unsigned int test_idx)
503 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
507 for (i = 0; i < RTE_DIM(esize); i++) {
508 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
509 test_enqdeq_impl[test_idx].api_type, esize[i]);
511 /* Create the ring */
512 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
513 RING_SIZE, SOCKET_ID_ANY,
514 test_enqdeq_impl[test_idx].create_flags);
516 /* alloc dummy object pointers */
517 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
520 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
523 /* alloc some room for copied objects */
524 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
529 printf("enqueue 1 obj\n");
530 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
531 TEST_RING_VERIFY(ret == 1, r, goto fail);
532 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
534 printf("enqueue 2 objs\n");
535 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
536 TEST_RING_VERIFY(ret == 2, r, goto fail);
537 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
539 printf("enqueue MAX_BULK objs\n");
540 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
542 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
543 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK);
545 printf("dequeue 1 obj\n");
546 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
547 TEST_RING_VERIFY(ret == 1, r, goto fail);
548 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
550 printf("dequeue 2 objs\n");
551 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
552 TEST_RING_VERIFY(ret == 2, r, goto fail);
553 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
555 printf("dequeue MAX_BULK objs\n");
556 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
558 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
559 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
562 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
563 RTE_PTR_DIFF(cur_dst, dst)) == 0,
566 /* Free memory before test completed */
584 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
585 * Enqueue and dequeue to cover the entire ring length.
588 test_ring_burst_bulk_tests3(unsigned int test_idx)
591 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
595 for (i = 0; i < RTE_DIM(esize); i++) {
596 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
597 test_enqdeq_impl[test_idx].api_type, esize[i]);
599 /* Create the ring */
600 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
601 RING_SIZE, SOCKET_ID_ANY,
602 test_enqdeq_impl[test_idx].create_flags);
604 /* alloc dummy object pointers */
605 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
608 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
611 /* alloc some room for copied objects */
612 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
617 printf("fill and empty the ring\n");
618 for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
619 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
621 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
622 cur_src = test_ring_inc_ptr(cur_src, esize[i],
625 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
627 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
628 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
633 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
634 RTE_PTR_DIFF(cur_dst, dst)) == 0,
637 /* Free memory before test completed */
655 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
656 * Enqueue till the ring is full and dequeue till the ring becomes empty.
659 test_ring_burst_bulk_tests4(unsigned int test_idx)
662 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
665 unsigned int api_type, num_elems;
667 api_type = test_enqdeq_impl[test_idx].api_type;
669 for (i = 0; i < RTE_DIM(esize); i++) {
670 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
671 test_enqdeq_impl[test_idx].api_type, esize[i]);
673 /* Create the ring */
674 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
675 RING_SIZE, SOCKET_ID_ANY,
676 test_enqdeq_impl[test_idx].create_flags);
678 /* alloc dummy object pointers */
679 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
682 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
685 /* alloc some room for copied objects */
686 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
691 printf("Test enqueue without enough memory space\n");
692 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
693 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
695 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
696 cur_src = test_ring_inc_ptr(cur_src, esize[i],
700 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
701 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
702 TEST_RING_VERIFY(ret == 2, r, goto fail);
703 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
705 printf("Enqueue the remaining entries = MAX_BULK - 3\n");
706 /* Bulk APIs enqueue exact number of elements */
707 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
708 num_elems = MAX_BULK - 3;
710 num_elems = MAX_BULK;
711 /* Always one free entry left */
712 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
714 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
715 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
717 printf("Test if ring is full\n");
718 TEST_RING_VERIFY(rte_ring_full(r) == 1, r, goto fail);
720 printf("Test enqueue for a full entry\n");
721 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
723 TEST_RING_VERIFY(ret == 0, r, goto fail);
725 printf("Test dequeue without enough objects\n");
726 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
727 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
729 TEST_RING_VERIFY(ret == MAX_BULK, r, goto fail);
730 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
734 /* Available memory space for the exact MAX_BULK entries */
735 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
736 TEST_RING_VERIFY(ret == 2, r, goto fail);
737 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
739 /* Bulk APIs enqueue exact number of elements */
740 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
741 num_elems = MAX_BULK - 3;
743 num_elems = MAX_BULK;
744 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
746 TEST_RING_VERIFY(ret == MAX_BULK - 3, r, goto fail);
747 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
749 printf("Test if ring is empty\n");
750 /* Check if ring is empty */
751 TEST_RING_VERIFY(rte_ring_empty(r) == 1, r, goto fail);
754 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
755 RTE_PTR_DIFF(cur_dst, dst)) == 0,
758 /* Free memory before test completed */
776 * Test default, single element, bulk and burst APIs
779 test_ring_basic_ex(void)
783 struct rte_ring *rp = NULL;
784 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
786 for (i = 0; i < RTE_DIM(esize); i++) {
787 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
789 RING_F_SP_ENQ | RING_F_SC_DEQ);
791 printf("%s: failed to create ring\n", __func__);
795 /* alloc dummy object pointers */
796 src = test_ring_calloc(RING_SIZE, esize[i]);
798 printf("%s: failed to alloc src memory\n", __func__);
801 test_ring_mem_init(src, RING_SIZE, esize[i]);
804 /* alloc some room for copied objects */
805 dst = test_ring_calloc(RING_SIZE, esize[i]);
807 printf("%s: failed to alloc dst memory\n", __func__);
812 TEST_RING_VERIFY(rte_ring_lookup("test_ring_basic_ex") == rp,
815 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
817 printf("%u ring entries are now free\n",
818 rte_ring_free_count(rp));
820 for (j = 0; j < RING_SIZE - 1; j++) {
821 ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
822 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
823 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
824 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
827 TEST_RING_VERIFY(rte_ring_full(rp) == 1, rp, goto fail_test);
829 for (j = 0; j < RING_SIZE - 1; j++) {
830 ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
831 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
832 TEST_RING_VERIFY(ret == 0, rp, goto fail_test);
833 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
836 TEST_RING_VERIFY(rte_ring_empty(rp) == 1, rp, goto fail_test);
839 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
840 RTE_PTR_DIFF(cur_dst, dst)) == 0,
843 /* Following tests use the configured flags to decide
846 /* reset memory of dst */
847 memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
849 /* reset cur_src and cur_dst */
853 /* Covering the ring burst operation */
854 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
855 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
856 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
857 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
859 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
860 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
861 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
862 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
864 /* Covering the ring bulk operation */
865 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
866 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
867 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
868 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
870 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
871 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
872 TEST_RING_VERIFY(ret == 2, rp, goto fail_test);
873 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
876 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
877 RTE_PTR_DIFF(cur_dst, dst)) == 0,
898 * Basic test cases with exact size ring.
901 test_ring_with_exact_size(void)
903 struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
904 void **src_orig = NULL, **dst_orig = NULL;
905 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
906 const unsigned int ring_sz = 16;
910 for (i = 0; i < RTE_DIM(esize); i++) {
911 test_ring_print_test_string("Test exact size ring",
912 TEST_RING_IGNORE_API_TYPE,
915 std_r = test_ring_create("std", esize[i], ring_sz,
917 RING_F_SP_ENQ | RING_F_SC_DEQ);
919 printf("%s: error, can't create std ring\n", __func__);
922 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
924 RING_F_SP_ENQ | RING_F_SC_DEQ |
926 if (exact_sz_r == NULL) {
927 printf("%s: error, can't create exact size ring\n",
932 /* alloc object pointers. Allocate one extra object
933 * and create an unaligned address.
935 src_orig = test_ring_calloc(17, esize[i]);
936 if (src_orig == NULL)
938 test_ring_mem_init(src_orig, 17, esize[i]);
939 src = (void **)((uintptr_t)src_orig + 1);
942 dst_orig = test_ring_calloc(17, esize[i]);
943 if (dst_orig == NULL)
945 dst = (void **)((uintptr_t)dst_orig + 1);
949 * Check that the exact size ring is bigger than the
952 TEST_RING_VERIFY(rte_ring_get_size(std_r) <=
953 rte_ring_get_size(exact_sz_r),
954 std_r, goto test_fail);
957 * check that the exact_sz_ring can hold one more element
958 * than the standard ring. (16 vs 15 elements)
960 for (j = 0; j < ring_sz - 1; j++) {
961 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
962 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
963 TEST_RING_VERIFY(ret == 0, std_r, goto test_fail);
964 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
965 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
966 TEST_RING_VERIFY(ret == 0, exact_sz_r, goto test_fail);
967 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
969 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
970 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
971 TEST_RING_VERIFY(ret == -ENOBUFS, std_r, goto test_fail);
972 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
973 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
974 TEST_RING_VERIFY(ret != -ENOBUFS, exact_sz_r, goto test_fail);
975 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
977 /* check that dequeue returns the expected number of elements */
978 ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
979 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
980 TEST_RING_VERIFY(ret == (int)ring_sz, exact_sz_r, goto test_fail);
981 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
983 /* check that the capacity function returns expected value */
984 TEST_RING_VERIFY(rte_ring_get_capacity(exact_sz_r) == ring_sz,
985 exact_sz_r, goto test_fail);
988 TEST_RING_VERIFY(test_ring_mem_cmp(src, dst,
989 RTE_PTR_DIFF(cur_dst, dst)) == 0,
990 exact_sz_r, goto test_fail);
994 rte_ring_free(std_r);
995 rte_ring_free(exact_sz_r);
1007 rte_ring_free(std_r);
1008 rte_ring_free(exact_sz_r);
1018 /* Negative test cases */
1019 if (test_ring_negative_tests() < 0)
1022 /* Some basic operations */
1023 if (test_ring_basic_ex() < 0)
1026 if (test_ring_with_exact_size() < 0)
1029 /* Burst and bulk operations with sp/sc, mp/mc and default.
1030 * The test cases are split into smaller test cases to
1031 * help clang compile faster.
1033 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1036 rc = test_ring_burst_bulk_tests1(i);
1040 rc = test_ring_burst_bulk_tests2(i);
1044 rc = test_ring_burst_bulk_tests3(i);
1048 rc = test_ring_burst_bulk_tests4(i);
1053 /* dump the ring status */
1054 rte_ring_list_dump(stdout);
1063 REGISTER_TEST_COMMAND(ring_autotest, test_ring);