1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_memory.h>
17 #include <rte_launch.h>
18 #include <rte_cycles.h>
20 #include <rte_per_lcore.h>
21 #include <rte_lcore.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_malloc.h>
26 #include <rte_ring_elem.h>
27 #include <rte_random.h>
28 #include <rte_errno.h>
29 #include <rte_hexdump.h>
32 #include "test_ring.h"
38 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
39 * legacy/custom element size (4B, 8B, 16B, 20B) APIs.
40 * Some tests incorporate unaligned addresses for objects.
41 * The enqueued/dequeued data is validated for correctness.
43 * #. Performance tests are in test_ring_perf.c
46 #define RING_SIZE 4096
49 #define TEST_RING_VERIFY(exp) \
51 printf("error at %s:%d\tcondition " #exp " failed\n", \
52 __func__, __LINE__); \
53 rte_ring_dump(stdout, r); \
57 #define TEST_RING_FULL_EMTPY_ITER 8
59 static const int esize[] = {-1, 4, 8, 16, 20};
64 uint32_t create_flags;
66 unsigned int (*flegacy)(struct rte_ring *r,
67 void * const *obj_table, unsigned int n,
68 unsigned int *free_space);
69 unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
70 unsigned int esize, unsigned int n,
71 unsigned int *free_space);
74 unsigned int (*flegacy)(struct rte_ring *r,
75 void **obj_table, unsigned int n,
76 unsigned int *available);
77 unsigned int (*felem)(struct rte_ring *r, void *obj_table,
78 unsigned int esize, unsigned int n,
79 unsigned int *available);
81 } test_enqdeq_impl[] = {
83 .desc = "MP/MC sync mode",
84 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
87 .flegacy = rte_ring_enqueue_bulk,
88 .felem = rte_ring_enqueue_bulk_elem,
91 .flegacy = rte_ring_dequeue_bulk,
92 .felem = rte_ring_dequeue_bulk_elem,
96 .desc = "SP/SC sync mode",
97 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
98 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
100 .flegacy = rte_ring_sp_enqueue_bulk,
101 .felem = rte_ring_sp_enqueue_bulk_elem,
104 .flegacy = rte_ring_sc_dequeue_bulk,
105 .felem = rte_ring_sc_dequeue_bulk_elem,
109 .desc = "MP/MC sync mode",
110 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
113 .flegacy = rte_ring_mp_enqueue_bulk,
114 .felem = rte_ring_mp_enqueue_bulk_elem,
117 .flegacy = rte_ring_mc_dequeue_bulk,
118 .felem = rte_ring_mc_dequeue_bulk_elem,
122 .desc = "MP_RTS/MC_RTS sync mode",
123 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
124 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
126 .flegacy = rte_ring_enqueue_bulk,
127 .felem = rte_ring_enqueue_bulk_elem,
130 .flegacy = rte_ring_dequeue_bulk,
131 .felem = rte_ring_dequeue_bulk_elem,
135 .desc = "MP_HTS/MC_HTS sync mode",
136 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
137 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
139 .flegacy = rte_ring_enqueue_bulk,
140 .felem = rte_ring_enqueue_bulk_elem,
143 .flegacy = rte_ring_dequeue_bulk,
144 .felem = rte_ring_dequeue_bulk_elem,
148 .desc = "MP/MC sync mode",
149 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
152 .flegacy = rte_ring_enqueue_burst,
153 .felem = rte_ring_enqueue_burst_elem,
156 .flegacy = rte_ring_dequeue_burst,
157 .felem = rte_ring_dequeue_burst_elem,
161 .desc = "SP/SC sync mode",
162 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
163 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
165 .flegacy = rte_ring_sp_enqueue_burst,
166 .felem = rte_ring_sp_enqueue_burst_elem,
169 .flegacy = rte_ring_sc_dequeue_burst,
170 .felem = rte_ring_sc_dequeue_burst_elem,
174 .desc = "MP/MC sync mode",
175 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
178 .flegacy = rte_ring_mp_enqueue_burst,
179 .felem = rte_ring_mp_enqueue_burst_elem,
182 .flegacy = rte_ring_mc_dequeue_burst,
183 .felem = rte_ring_mc_dequeue_burst_elem,
187 .desc = "MP_RTS/MC_RTS sync mode",
188 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
189 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
191 .flegacy = rte_ring_enqueue_burst,
192 .felem = rte_ring_enqueue_burst_elem,
195 .flegacy = rte_ring_dequeue_burst,
196 .felem = rte_ring_dequeue_burst_elem,
200 .desc = "MP_HTS/MC_HTS sync mode",
201 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
202 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
204 .flegacy = rte_ring_enqueue_burst,
205 .felem = rte_ring_enqueue_burst_elem,
208 .flegacy = rte_ring_dequeue_burst,
209 .felem = rte_ring_dequeue_burst_elem,
215 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
216 unsigned int test_idx)
219 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
221 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
226 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
227 unsigned int test_idx)
230 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
232 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
237 test_ring_inc_ptr(void **obj, int esize, unsigned int n)
239 /* Legacy queue APIs? */
241 return ((void **)obj) + n;
243 return (void **)(((uint32_t *)obj) +
244 (n * esize / sizeof(uint32_t)));
248 test_ring_mem_init(void *obj, unsigned int count, int esize)
252 /* Legacy queue APIs? */
254 for (i = 0; i < count; i++)
255 ((void **)obj)[i] = (void *)(unsigned long)i;
257 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
258 ((uint32_t *)obj)[i] = i;
262 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
264 printf("\n%s: ", istr);
267 printf("legacy APIs: ");
269 printf("elem APIs: element size %dB ", esize);
271 if (api_type == TEST_RING_IGNORE_API_TYPE)
274 if (api_type & TEST_RING_THREAD_DEF)
275 printf(": default enqueue/dequeue: ");
276 else if (api_type & TEST_RING_THREAD_SPSC)
278 else if (api_type & TEST_RING_THREAD_MPMC)
281 if (api_type & TEST_RING_ELEM_SINGLE)
283 else if (api_type & TEST_RING_ELEM_BULK)
285 else if (api_type & TEST_RING_ELEM_BURST)
290 * Various negative test cases.
293 test_ring_negative_tests(void)
295 struct rte_ring *rp = NULL;
296 struct rte_ring *rt = NULL;
299 /* Test with esize not a multiple of 4 */
300 rp = test_ring_create("test_bad_element_size", 23,
301 RING_SIZE + 1, SOCKET_ID_ANY, 0);
303 printf("Test failed to detect invalid element size\n");
308 for (i = 0; i < RTE_DIM(esize); i++) {
309 /* Test if ring size is not power of 2 */
310 rp = test_ring_create("test_bad_ring_size", esize[i],
311 RING_SIZE + 1, SOCKET_ID_ANY, 0);
313 printf("Test failed to detect odd count\n");
317 /* Test if ring size is exceeding the limit */
318 rp = test_ring_create("test_bad_ring_size", esize[i],
319 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
321 printf("Test failed to detect limits\n");
325 /* Tests if lookup returns NULL on non-existing ring */
326 rp = rte_ring_lookup("ring_not_found");
327 if (rp != NULL && rte_errno != ENOENT) {
328 printf("Test failed to detect NULL ring lookup\n");
332 /* Test to if a non-power of 2 count causes the create
333 * function to fail correctly
335 rp = test_ring_create("test_ring_count", esize[i], 4097,
340 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
342 RING_F_SP_ENQ | RING_F_SC_DEQ);
344 printf("test_ring_negative fail to create ring\n");
348 if (rte_ring_lookup("test_ring_negative") != rp)
351 if (rte_ring_empty(rp) != 1) {
352 printf("test_ring_nagative ring is not empty but it should be\n");
356 /* Tests if it would always fail to create ring with an used
359 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
377 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
378 * Random number of elements are enqueued and dequeued.
381 test_ring_burst_bulk_tests1(unsigned int test_idx)
384 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
388 const unsigned int rsz = RING_SIZE - 1;
390 for (i = 0; i < RTE_DIM(esize); i++) {
391 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
392 test_enqdeq_impl[test_idx].api_type, esize[i]);
394 /* Create the ring */
395 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
396 RING_SIZE, SOCKET_ID_ANY,
397 test_enqdeq_impl[test_idx].create_flags);
399 /* alloc dummy object pointers */
400 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
403 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
406 /* alloc some room for copied objects */
407 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
412 printf("Random full/empty test\n");
414 for (j = 0; j != TEST_RING_FULL_EMTPY_ITER; j++) {
415 /* random shift in the ring */
416 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
417 printf("%s: iteration %u, random shift: %u;\n",
419 ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
421 TEST_RING_VERIFY(ret != 0);
423 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
425 TEST_RING_VERIFY(ret == rand);
428 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
430 TEST_RING_VERIFY(ret != 0);
432 TEST_RING_VERIFY(rte_ring_free_count(r) == 0);
433 TEST_RING_VERIFY(rsz == rte_ring_count(r));
434 TEST_RING_VERIFY(rte_ring_full(r));
435 TEST_RING_VERIFY(rte_ring_empty(r) == 0);
438 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
440 TEST_RING_VERIFY(ret == (int)rsz);
441 TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
442 TEST_RING_VERIFY(rte_ring_count(r) == 0);
443 TEST_RING_VERIFY(rte_ring_full(r) == 0);
444 TEST_RING_VERIFY(rte_ring_empty(r));
447 if (esize[i] == -1) {
448 TEST_RING_VERIFY(memcmp(src, dst,
449 rsz * sizeof(void *)) == 0);
451 TEST_RING_VERIFY(memcmp(src, dst,
452 rsz * esize[i]) == 0);
455 /* Free memory before test completed */
473 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
474 * Sequence of simple enqueues/dequeues and validate the enqueued and
478 test_ring_burst_bulk_tests2(unsigned int test_idx)
481 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
485 for (i = 0; i < RTE_DIM(esize); i++) {
486 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
487 test_enqdeq_impl[test_idx].api_type, esize[i]);
489 /* Create the ring */
490 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
491 RING_SIZE, SOCKET_ID_ANY,
492 test_enqdeq_impl[test_idx].create_flags);
494 /* alloc dummy object pointers */
495 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
498 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
501 /* alloc some room for copied objects */
502 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
507 printf("enqueue 1 obj\n");
508 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
511 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
513 printf("enqueue 2 objs\n");
514 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
517 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
519 printf("enqueue MAX_BULK objs\n");
520 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
524 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK);
526 printf("dequeue 1 obj\n");
527 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
530 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
532 printf("dequeue 2 objs\n");
533 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
536 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
538 printf("dequeue MAX_BULK objs\n");
539 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
543 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
546 if (memcmp(src, dst, RTE_PTR_DIFF(cur_dst, dst))) {
547 rte_hexdump(stdout, "src", src,
548 RTE_PTR_DIFF(cur_src, src));
549 rte_hexdump(stdout, "dst", dst,
550 RTE_PTR_DIFF(cur_dst, dst));
551 printf("data after dequeue is not the same\n");
555 /* Free memory before test completed */
573 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
574 * Enqueue and dequeue to cover the entire ring length.
577 test_ring_burst_bulk_tests3(unsigned int test_idx)
580 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
584 for (i = 0; i < RTE_DIM(esize); i++) {
585 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
586 test_enqdeq_impl[test_idx].api_type, esize[i]);
588 /* Create the ring */
589 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
590 RING_SIZE, SOCKET_ID_ANY,
591 test_enqdeq_impl[test_idx].create_flags);
593 /* alloc dummy object pointers */
594 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
597 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
600 /* alloc some room for copied objects */
601 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
606 printf("fill and empty the ring\n");
607 for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
608 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
612 cur_src = test_ring_inc_ptr(cur_src, esize[i],
615 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
619 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
624 if (memcmp(src, dst, RTE_PTR_DIFF(cur_dst, dst))) {
625 rte_hexdump(stdout, "src", src,
626 RTE_PTR_DIFF(cur_src, src));
627 rte_hexdump(stdout, "dst", dst,
628 RTE_PTR_DIFF(cur_dst, dst));
629 printf("data after dequeue is not the same\n");
633 /* Free memory before test completed */
651 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
652 * Enqueue till the ring is full and dequeue till the ring becomes empty.
655 test_ring_burst_bulk_tests4(unsigned int test_idx)
658 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
661 unsigned int api_type, num_elems;
663 api_type = test_enqdeq_impl[test_idx].api_type;
665 for (i = 0; i < RTE_DIM(esize); i++) {
666 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
667 test_enqdeq_impl[test_idx].api_type, esize[i]);
669 /* Create the ring */
670 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
671 RING_SIZE, SOCKET_ID_ANY,
672 test_enqdeq_impl[test_idx].create_flags);
674 /* alloc dummy object pointers */
675 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
678 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
681 /* alloc some room for copied objects */
682 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
687 printf("Test enqueue without enough memory space\n");
688 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
689 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
693 cur_src = test_ring_inc_ptr(cur_src, esize[i],
697 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
698 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
701 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
703 printf("Enqueue the remaining entries = MAX_BULK - 3\n");
704 /* Bulk APIs enqueue exact number of elements */
705 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
706 num_elems = MAX_BULK - 3;
708 num_elems = MAX_BULK;
709 /* Always one free entry left */
710 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
712 if (ret != MAX_BULK - 3)
714 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
716 printf("Test if ring is full\n");
717 if (rte_ring_full(r) != 1)
720 printf("Test enqueue for a full entry\n");
721 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
726 printf("Test dequeue without enough objects\n");
727 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
728 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
732 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
736 /* Available memory space for the exact MAX_BULK entries */
737 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
740 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
742 /* Bulk APIs enqueue exact number of elements */
743 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
744 num_elems = MAX_BULK - 3;
746 num_elems = MAX_BULK;
747 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
749 if (ret != MAX_BULK - 3)
751 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
753 printf("Test if ring is empty\n");
754 /* Check if ring is empty */
755 if (rte_ring_empty(r) != 1)
759 if (memcmp(src, dst, RTE_PTR_DIFF(cur_dst, dst))) {
760 rte_hexdump(stdout, "src", src,
761 RTE_PTR_DIFF(cur_src, src));
762 rte_hexdump(stdout, "dst", dst,
763 RTE_PTR_DIFF(cur_dst, dst));
764 printf("data after dequeue is not the same\n");
768 /* Free memory before test completed */
786 * Test default, single element, bulk and burst APIs
789 test_ring_basic_ex(void)
793 struct rte_ring *rp = NULL;
794 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
796 for (i = 0; i < RTE_DIM(esize); i++) {
797 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
799 RING_F_SP_ENQ | RING_F_SC_DEQ);
801 printf("%s: failed to create ring\n", __func__);
805 /* alloc dummy object pointers */
806 src = test_ring_calloc(RING_SIZE, esize[i]);
808 printf("%s: failed to alloc src memory\n", __func__);
811 test_ring_mem_init(src, RING_SIZE, esize[i]);
814 /* alloc some room for copied objects */
815 dst = test_ring_calloc(RING_SIZE, esize[i]);
817 printf("%s: failed to alloc dst memory\n", __func__);
822 if (rte_ring_lookup("test_ring_basic_ex") != rp) {
823 printf("%s: failed to find ring\n", __func__);
827 if (rte_ring_empty(rp) != 1) {
828 printf("%s: ring is not empty but it should be\n",
833 printf("%u ring entries are now free\n",
834 rte_ring_free_count(rp));
836 for (j = 0; j < RING_SIZE - 1; j++) {
837 ret = test_ring_enqueue(rp, cur_src, esize[i], 1,
838 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
840 printf("%s: rte_ring_enqueue fails\n",
844 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
847 if (rte_ring_full(rp) != 1) {
848 printf("%s: ring is not full but it should be\n",
853 for (j = 0; j < RING_SIZE - 1; j++) {
854 ret = test_ring_dequeue(rp, cur_dst, esize[i], 1,
855 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
857 printf("%s: rte_ring_dequeue fails\n",
861 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
864 if (rte_ring_empty(rp) != 1) {
865 printf("%s: ring is not empty but it should be\n",
871 if (memcmp(src, dst, RTE_PTR_DIFF(cur_dst, dst))) {
872 rte_hexdump(stdout, "src", src, RTE_PTR_DIFF(cur_src, src));
873 rte_hexdump(stdout, "dst", dst, RTE_PTR_DIFF(cur_dst, dst));
874 printf("data after dequeue is not the same\n");
878 /* Following tests use the configured flags to decide
881 /* reset memory of dst */
882 memset(dst, 0, RTE_PTR_DIFF(cur_dst, dst));
884 /* reset cur_src and cur_dst */
888 /* Covering the ring burst operation */
889 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
890 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
892 printf("%s: rte_ring_enqueue_burst fails\n", __func__);
895 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
897 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
898 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
900 printf("%s: rte_ring_dequeue_burst fails\n", __func__);
903 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
905 /* Covering the ring bulk operation */
906 ret = test_ring_enqueue(rp, cur_src, esize[i], 2,
907 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
909 printf("%s: rte_ring_enqueue_bulk fails\n", __func__);
912 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
914 ret = test_ring_dequeue(rp, cur_dst, esize[i], 2,
915 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
917 printf("%s: rte_ring_dequeue_bulk fails\n", __func__);
920 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
923 if (memcmp(src, dst, RTE_PTR_DIFF(cur_dst, dst))) {
924 rte_hexdump(stdout, "src", src, RTE_PTR_DIFF(cur_src, src));
925 rte_hexdump(stdout, "dst", dst, RTE_PTR_DIFF(cur_dst, dst));
926 printf("data after dequeue is not the same\n");
948 * Basic test cases with exact size ring.
951 test_ring_with_exact_size(void)
953 struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
954 void **src_orig = NULL, **dst_orig = NULL;
955 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
956 const unsigned int ring_sz = 16;
960 for (i = 0; i < RTE_DIM(esize); i++) {
961 test_ring_print_test_string("Test exact size ring",
962 TEST_RING_IGNORE_API_TYPE,
965 std_r = test_ring_create("std", esize[i], ring_sz,
967 RING_F_SP_ENQ | RING_F_SC_DEQ);
969 printf("%s: error, can't create std ring\n", __func__);
972 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
974 RING_F_SP_ENQ | RING_F_SC_DEQ |
976 if (exact_sz_r == NULL) {
977 printf("%s: error, can't create exact size ring\n",
982 /* alloc object pointers. Allocate one extra object
983 * and create an unaligned address.
985 src_orig = test_ring_calloc(17, esize[i]);
986 if (src_orig == NULL)
988 test_ring_mem_init(src_orig, 17, esize[i]);
989 src = (void **)((uintptr_t)src_orig + 1);
992 dst_orig = test_ring_calloc(17, esize[i]);
993 if (dst_orig == NULL)
995 dst = (void **)((uintptr_t)dst_orig + 1);
999 * Check that the exact size ring is bigger than the
1002 if (rte_ring_get_size(std_r) >= rte_ring_get_size(exact_sz_r)) {
1003 printf("%s: error, std ring (size: %u) is not smaller than exact size one (size %u)\n",
1005 rte_ring_get_size(std_r),
1006 rte_ring_get_size(exact_sz_r));
1010 * check that the exact_sz_ring can hold one more element
1011 * than the standard ring. (16 vs 15 elements)
1013 for (j = 0; j < ring_sz - 1; j++) {
1014 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1015 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1017 printf("%s: error, enqueue failed\n", __func__);
1020 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1021 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1023 printf("%s: error, enqueue failed\n", __func__);
1026 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1028 ret = test_ring_enqueue(std_r, cur_src, esize[i], 1,
1029 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1030 if (ret != -ENOBUFS) {
1031 printf("%s: error, unexpected successful enqueue\n",
1035 ret = test_ring_enqueue(exact_sz_r, cur_src, esize[i], 1,
1036 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
1037 if (ret == -ENOBUFS) {
1038 printf("%s: error, enqueue failed\n", __func__);
1041 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
1043 /* check that dequeue returns the expected number of elements */
1044 ret = test_ring_dequeue(exact_sz_r, cur_dst, esize[i], ring_sz,
1045 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
1046 if (ret != (int)ring_sz) {
1047 printf("%s: error, failed to dequeue expected nb of elements\n",
1051 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], ring_sz);
1053 /* check that the capacity function returns expected value */
1054 if (rte_ring_get_capacity(exact_sz_r) != ring_sz) {
1055 printf("%s: error, incorrect ring capacity reported\n",
1061 if (memcmp(src, dst, RTE_PTR_DIFF(cur_dst, dst))) {
1062 rte_hexdump(stdout, "src", src, RTE_PTR_DIFF(cur_src, src));
1063 rte_hexdump(stdout, "dst", dst, RTE_PTR_DIFF(cur_dst, dst));
1064 printf("data after dequeue is not the same\n");
1070 rte_ring_free(std_r);
1071 rte_ring_free(exact_sz_r);
1083 rte_ring_free(std_r);
1084 rte_ring_free(exact_sz_r);
1094 /* Negative test cases */
1095 if (test_ring_negative_tests() < 0)
1098 /* Some basic operations */
1099 if (test_ring_basic_ex() < 0)
1102 if (test_ring_with_exact_size() < 0)
1105 /* Burst and bulk operations with sp/sc, mp/mc and default.
1106 * The test cases are split into smaller test cases to
1107 * help clang compile faster.
1109 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1112 rc = test_ring_burst_bulk_tests1(i);
1116 rc = test_ring_burst_bulk_tests2(i);
1120 rc = test_ring_burst_bulk_tests3(i);
1124 rc = test_ring_burst_bulk_tests4(i);
1129 /* dump the ring status */
1130 rte_ring_list_dump(stdout);
1139 REGISTER_TEST_COMMAND(ring_autotest, test_ring);