1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/queue.h>
14 #include <rte_common.h>
16 #include <rte_memory.h>
17 #include <rte_launch.h>
18 #include <rte_cycles.h>
20 #include <rte_per_lcore.h>
21 #include <rte_lcore.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_malloc.h>
26 #include <rte_ring_elem.h>
27 #include <rte_random.h>
28 #include <rte_errno.h>
29 #include <rte_hexdump.h>
32 #include "test_ring.h"
38 * #. Functional tests. Tests single/bulk/burst, default/SPSC/MPMC,
39 * legacy/custom element size (4B, 8B, 16B, 20B) APIs.
40 * Some tests incorporate unaligned addresses for objects.
41 * The enqueued/dequeued data is validated for correctness.
43 * #. Performance tests are in test_ring_perf.c
46 #define RING_SIZE 4096
49 #define TEST_RING_VERIFY(exp) \
51 printf("error at %s:%d\tcondition " #exp " failed\n", \
52 __func__, __LINE__); \
53 rte_ring_dump(stdout, r); \
57 #define TEST_RING_FULL_EMTPY_ITER 8
59 static const int esize[] = {-1, 4, 8, 16, 20};
64 uint32_t create_flags;
66 unsigned int (*flegacy)(struct rte_ring *r,
67 void * const *obj_table, unsigned int n,
68 unsigned int *free_space);
69 unsigned int (*felem)(struct rte_ring *r, const void *obj_table,
70 unsigned int esize, unsigned int n,
71 unsigned int *free_space);
74 unsigned int (*flegacy)(struct rte_ring *r,
75 void **obj_table, unsigned int n,
76 unsigned int *available);
77 unsigned int (*felem)(struct rte_ring *r, void *obj_table,
78 unsigned int esize, unsigned int n,
79 unsigned int *available);
81 } test_enqdeq_impl[] = {
83 .desc = "MP/MC sync mode",
84 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
87 .flegacy = rte_ring_enqueue_bulk,
88 .felem = rte_ring_enqueue_bulk_elem,
91 .flegacy = rte_ring_dequeue_bulk,
92 .felem = rte_ring_dequeue_bulk_elem,
96 .desc = "SP/SC sync mode",
97 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_SPSC,
98 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
100 .flegacy = rte_ring_sp_enqueue_bulk,
101 .felem = rte_ring_sp_enqueue_bulk_elem,
104 .flegacy = rte_ring_sc_dequeue_bulk,
105 .felem = rte_ring_sc_dequeue_bulk_elem,
109 .desc = "MP/MC sync mode",
110 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_MPMC,
113 .flegacy = rte_ring_mp_enqueue_bulk,
114 .felem = rte_ring_mp_enqueue_bulk_elem,
117 .flegacy = rte_ring_mc_dequeue_bulk,
118 .felem = rte_ring_mc_dequeue_bulk_elem,
122 .desc = "MP_RTS/MC_RTS sync mode",
123 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
124 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
126 .flegacy = rte_ring_enqueue_bulk,
127 .felem = rte_ring_enqueue_bulk_elem,
130 .flegacy = rte_ring_dequeue_bulk,
131 .felem = rte_ring_dequeue_bulk_elem,
135 .desc = "MP_HTS/MC_HTS sync mode",
136 .api_type = TEST_RING_ELEM_BULK | TEST_RING_THREAD_DEF,
137 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
139 .flegacy = rte_ring_enqueue_bulk,
140 .felem = rte_ring_enqueue_bulk_elem,
143 .flegacy = rte_ring_dequeue_bulk,
144 .felem = rte_ring_dequeue_bulk_elem,
148 .desc = "MP/MC sync mode",
149 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
152 .flegacy = rte_ring_enqueue_burst,
153 .felem = rte_ring_enqueue_burst_elem,
156 .flegacy = rte_ring_dequeue_burst,
157 .felem = rte_ring_dequeue_burst_elem,
161 .desc = "SP/SC sync mode",
162 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_SPSC,
163 .create_flags = RING_F_SP_ENQ | RING_F_SC_DEQ,
165 .flegacy = rte_ring_sp_enqueue_burst,
166 .felem = rte_ring_sp_enqueue_burst_elem,
169 .flegacy = rte_ring_sc_dequeue_burst,
170 .felem = rte_ring_sc_dequeue_burst_elem,
174 .desc = "MP/MC sync mode",
175 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_MPMC,
178 .flegacy = rte_ring_mp_enqueue_burst,
179 .felem = rte_ring_mp_enqueue_burst_elem,
182 .flegacy = rte_ring_mc_dequeue_burst,
183 .felem = rte_ring_mc_dequeue_burst_elem,
187 .desc = "MP_RTS/MC_RTS sync mode",
188 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
189 .create_flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ,
191 .flegacy = rte_ring_enqueue_burst,
192 .felem = rte_ring_enqueue_burst_elem,
195 .flegacy = rte_ring_dequeue_burst,
196 .felem = rte_ring_dequeue_burst_elem,
200 .desc = "MP_HTS/MC_HTS sync mode",
201 .api_type = TEST_RING_ELEM_BURST | TEST_RING_THREAD_DEF,
202 .create_flags = RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ,
204 .flegacy = rte_ring_enqueue_burst,
205 .felem = rte_ring_enqueue_burst_elem,
208 .flegacy = rte_ring_dequeue_burst,
209 .felem = rte_ring_dequeue_burst_elem,
215 test_ring_enq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
216 unsigned int test_idx)
219 return test_enqdeq_impl[test_idx].enq.flegacy(r, obj, n, NULL);
221 return test_enqdeq_impl[test_idx].enq.felem(r, obj, esize, n,
226 test_ring_deq_impl(struct rte_ring *r, void **obj, int esize, unsigned int n,
227 unsigned int test_idx)
230 return test_enqdeq_impl[test_idx].deq.flegacy(r, obj, n, NULL);
232 return test_enqdeq_impl[test_idx].deq.felem(r, obj, esize, n,
237 test_ring_inc_ptr(void **obj, int esize, unsigned int n)
239 /* Legacy queue APIs? */
241 return ((void **)obj) + n;
243 return (void **)(((uint32_t *)obj) +
244 (n * esize / sizeof(uint32_t)));
248 test_ring_mem_init(void *obj, unsigned int count, int esize)
252 /* Legacy queue APIs? */
254 for (i = 0; i < count; i++)
255 ((void **)obj)[i] = (void *)(unsigned long)i;
257 for (i = 0; i < (count * esize / sizeof(uint32_t)); i++)
258 ((uint32_t *)obj)[i] = i;
262 test_ring_print_test_string(const char *istr, unsigned int api_type, int esize)
264 printf("\n%s: ", istr);
267 printf("legacy APIs: ");
269 printf("elem APIs: element size %dB ", esize);
271 if (api_type == TEST_RING_IGNORE_API_TYPE)
274 if (api_type & TEST_RING_THREAD_DEF)
275 printf(": default enqueue/dequeue: ");
276 else if (api_type & TEST_RING_THREAD_SPSC)
278 else if (api_type & TEST_RING_THREAD_MPMC)
281 if (api_type & TEST_RING_ELEM_SINGLE)
283 else if (api_type & TEST_RING_ELEM_BULK)
285 else if (api_type & TEST_RING_ELEM_BURST)
290 * Various negative test cases.
293 test_ring_negative_tests(void)
295 struct rte_ring *rp = NULL;
296 struct rte_ring *rt = NULL;
299 /* Test with esize not a multiple of 4 */
300 rp = test_ring_create("test_bad_element_size", 23,
301 RING_SIZE + 1, SOCKET_ID_ANY, 0);
303 printf("Test failed to detect invalid element size\n");
308 for (i = 0; i < RTE_DIM(esize); i++) {
309 /* Test if ring size is not power of 2 */
310 rp = test_ring_create("test_bad_ring_size", esize[i],
311 RING_SIZE + 1, SOCKET_ID_ANY, 0);
313 printf("Test failed to detect odd count\n");
317 /* Test if ring size is exceeding the limit */
318 rp = test_ring_create("test_bad_ring_size", esize[i],
319 RTE_RING_SZ_MASK + 1, SOCKET_ID_ANY, 0);
321 printf("Test failed to detect limits\n");
325 /* Tests if lookup returns NULL on non-existing ring */
326 rp = rte_ring_lookup("ring_not_found");
327 if (rp != NULL && rte_errno != ENOENT) {
328 printf("Test failed to detect NULL ring lookup\n");
332 /* Test to if a non-power of 2 count causes the create
333 * function to fail correctly
335 rp = test_ring_create("test_ring_count", esize[i], 4097,
340 rp = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
342 RING_F_SP_ENQ | RING_F_SC_DEQ);
344 printf("test_ring_negative fail to create ring\n");
348 if (rte_ring_lookup("test_ring_negative") != rp)
351 if (rte_ring_empty(rp) != 1) {
352 printf("test_ring_nagative ring is not empty but it should be\n");
356 /* Tests if it would always fail to create ring with an used
359 rt = test_ring_create("test_ring_negative", esize[i], RING_SIZE,
377 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
378 * Random number of elements are enqueued and dequeued.
381 test_ring_burst_bulk_tests1(unsigned int test_idx)
384 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
388 const unsigned int rsz = RING_SIZE - 1;
390 for (i = 0; i < RTE_DIM(esize); i++) {
391 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
392 test_enqdeq_impl[test_idx].api_type, esize[i]);
394 /* Create the ring */
395 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
396 RING_SIZE, SOCKET_ID_ANY,
397 test_enqdeq_impl[test_idx].create_flags);
399 /* alloc dummy object pointers */
400 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
403 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
406 /* alloc some room for copied objects */
407 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
412 printf("Random full/empty test\n");
414 for (j = 0; j != TEST_RING_FULL_EMTPY_ITER; j++) {
415 /* random shift in the ring */
416 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
417 printf("%s: iteration %u, random shift: %u;\n",
419 ret = test_ring_enq_impl(r, cur_src, esize[i], rand,
421 TEST_RING_VERIFY(ret != 0);
423 ret = test_ring_deq_impl(r, cur_dst, esize[i], rand,
425 TEST_RING_VERIFY(ret == rand);
428 ret = test_ring_enq_impl(r, cur_src, esize[i], rsz,
430 TEST_RING_VERIFY(ret != 0);
432 TEST_RING_VERIFY(rte_ring_free_count(r) == 0);
433 TEST_RING_VERIFY(rsz == rte_ring_count(r));
434 TEST_RING_VERIFY(rte_ring_full(r));
435 TEST_RING_VERIFY(rte_ring_empty(r) == 0);
438 ret = test_ring_deq_impl(r, cur_dst, esize[i], rsz,
440 TEST_RING_VERIFY(ret == (int)rsz);
441 TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
442 TEST_RING_VERIFY(rte_ring_count(r) == 0);
443 TEST_RING_VERIFY(rte_ring_full(r) == 0);
444 TEST_RING_VERIFY(rte_ring_empty(r));
447 TEST_RING_VERIFY(memcmp(src, dst, rsz) == 0);
450 /* Free memory before test completed */
468 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
469 * Sequence of simple enqueues/dequeues and validate the enqueued and
473 test_ring_burst_bulk_tests2(unsigned int test_idx)
476 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
480 for (i = 0; i < RTE_DIM(esize); i++) {
481 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
482 test_enqdeq_impl[test_idx].api_type, esize[i]);
484 /* Create the ring */
485 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
486 RING_SIZE, SOCKET_ID_ANY,
487 test_enqdeq_impl[test_idx].create_flags);
489 /* alloc dummy object pointers */
490 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
493 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
496 /* alloc some room for copied objects */
497 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
502 printf("enqueue 1 obj\n");
503 ret = test_ring_enq_impl(r, cur_src, esize[i], 1, test_idx);
506 cur_src = test_ring_inc_ptr(cur_src, esize[i], 1);
508 printf("enqueue 2 objs\n");
509 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
512 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
514 printf("enqueue MAX_BULK objs\n");
515 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
519 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK);
521 printf("dequeue 1 obj\n");
522 ret = test_ring_deq_impl(r, cur_dst, esize[i], 1, test_idx);
525 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 1);
527 printf("dequeue 2 objs\n");
528 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
531 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
533 printf("dequeue MAX_BULK objs\n");
534 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
538 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK);
541 if (memcmp(src, dst, cur_dst - dst)) {
542 rte_hexdump(stdout, "src", src, cur_src - src);
543 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
544 printf("data after dequeue is not the same\n");
548 /* Free memory before test completed */
566 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
567 * Enqueue and dequeue to cover the entire ring length.
570 test_ring_burst_bulk_tests3(unsigned int test_idx)
573 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
577 for (i = 0; i < RTE_DIM(esize); i++) {
578 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
579 test_enqdeq_impl[test_idx].api_type, esize[i]);
581 /* Create the ring */
582 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
583 RING_SIZE, SOCKET_ID_ANY,
584 test_enqdeq_impl[test_idx].create_flags);
586 /* alloc dummy object pointers */
587 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
590 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
593 /* alloc some room for copied objects */
594 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
599 printf("fill and empty the ring\n");
600 for (j = 0; j < RING_SIZE / MAX_BULK; j++) {
601 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
605 cur_src = test_ring_inc_ptr(cur_src, esize[i],
608 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
612 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
617 if (memcmp(src, dst, cur_dst - dst)) {
618 rte_hexdump(stdout, "src", src, cur_src - src);
619 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
620 printf("data after dequeue is not the same\n");
624 /* Free memory before test completed */
642 * Burst and bulk operations with sp/sc, mp/mc and default (during creation)
643 * Enqueue till the ring is full and dequeue till the ring becomes empty.
646 test_ring_burst_bulk_tests4(unsigned int test_idx)
649 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
652 unsigned int api_type, num_elems;
654 api_type = test_enqdeq_impl[test_idx].api_type;
656 for (i = 0; i < RTE_DIM(esize); i++) {
657 test_ring_print_test_string(test_enqdeq_impl[test_idx].desc,
658 test_enqdeq_impl[test_idx].api_type, esize[i]);
660 /* Create the ring */
661 r = test_ring_create("test_ring_burst_bulk_tests", esize[i],
662 RING_SIZE, SOCKET_ID_ANY,
663 test_enqdeq_impl[test_idx].create_flags);
665 /* alloc dummy object pointers */
666 src = test_ring_calloc(RING_SIZE * 2, esize[i]);
669 test_ring_mem_init(src, RING_SIZE * 2, esize[i]);
672 /* alloc some room for copied objects */
673 dst = test_ring_calloc(RING_SIZE * 2, esize[i]);
678 printf("Test enqueue without enough memory space\n");
679 for (j = 0; j < (RING_SIZE/MAX_BULK - 1); j++) {
680 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
684 cur_src = test_ring_inc_ptr(cur_src, esize[i],
688 printf("Enqueue 2 objects, free entries = MAX_BULK - 2\n");
689 ret = test_ring_enq_impl(r, cur_src, esize[i], 2, test_idx);
692 cur_src = test_ring_inc_ptr(cur_src, esize[i], 2);
694 printf("Enqueue the remaining entries = MAX_BULK - 3\n");
695 /* Bulk APIs enqueue exact number of elements */
696 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
697 num_elems = MAX_BULK - 3;
699 num_elems = MAX_BULK;
700 /* Always one free entry left */
701 ret = test_ring_enq_impl(r, cur_src, esize[i], num_elems,
703 if (ret != MAX_BULK - 3)
705 cur_src = test_ring_inc_ptr(cur_src, esize[i], MAX_BULK - 3);
707 printf("Test if ring is full\n");
708 if (rte_ring_full(r) != 1)
711 printf("Test enqueue for a full entry\n");
712 ret = test_ring_enq_impl(r, cur_src, esize[i], MAX_BULK,
717 printf("Test dequeue without enough objects\n");
718 for (j = 0; j < RING_SIZE / MAX_BULK - 1; j++) {
719 ret = test_ring_deq_impl(r, cur_dst, esize[i], MAX_BULK,
723 cur_dst = test_ring_inc_ptr(cur_dst, esize[i],
727 /* Available memory space for the exact MAX_BULK entries */
728 ret = test_ring_deq_impl(r, cur_dst, esize[i], 2, test_idx);
731 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], 2);
733 /* Bulk APIs enqueue exact number of elements */
734 if ((api_type & TEST_RING_ELEM_BULK) == TEST_RING_ELEM_BULK)
735 num_elems = MAX_BULK - 3;
737 num_elems = MAX_BULK;
738 ret = test_ring_deq_impl(r, cur_dst, esize[i], num_elems,
740 if (ret != MAX_BULK - 3)
742 cur_dst = test_ring_inc_ptr(cur_dst, esize[i], MAX_BULK - 3);
744 printf("Test if ring is empty\n");
745 /* Check if ring is empty */
746 if (rte_ring_empty(r) != 1)
750 if (memcmp(src, dst, cur_dst - dst)) {
751 rte_hexdump(stdout, "src", src, cur_src - src);
752 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
753 printf("data after dequeue is not the same\n");
757 /* Free memory before test completed */
775 * Test default, single element, bulk and burst APIs
778 test_ring_basic_ex(void)
782 struct rte_ring *rp = NULL;
785 for (i = 0; i < RTE_DIM(esize); i++) {
786 obj = test_ring_calloc(RING_SIZE, esize[i]);
788 printf("%s: failed to alloc memory\n", __func__);
792 rp = test_ring_create("test_ring_basic_ex", esize[i], RING_SIZE,
794 RING_F_SP_ENQ | RING_F_SC_DEQ);
796 printf("%s: failed to create ring\n", __func__);
800 if (rte_ring_lookup("test_ring_basic_ex") != rp) {
801 printf("%s: failed to find ring\n", __func__);
805 if (rte_ring_empty(rp) != 1) {
806 printf("%s: ring is not empty but it should be\n",
811 printf("%u ring entries are now free\n",
812 rte_ring_free_count(rp));
814 for (j = 0; j < RING_SIZE; j++) {
815 test_ring_enqueue(rp, obj, esize[i], 1,
816 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
819 if (rte_ring_full(rp) != 1) {
820 printf("%s: ring is not full but it should be\n",
825 for (j = 0; j < RING_SIZE; j++) {
826 test_ring_dequeue(rp, obj, esize[i], 1,
827 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
830 if (rte_ring_empty(rp) != 1) {
831 printf("%s: ring is not empty but it should be\n",
836 /* Following tests use the configured flags to decide
839 /* Covering the ring burst operation */
840 ret = test_ring_enqueue(rp, obj, esize[i], 2,
841 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
843 printf("%s: rte_ring_enqueue_burst fails\n", __func__);
847 ret = test_ring_dequeue(rp, obj, esize[i], 2,
848 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
850 printf("%s: rte_ring_dequeue_burst fails\n", __func__);
854 /* Covering the ring bulk operation */
855 ret = test_ring_enqueue(rp, obj, esize[i], 2,
856 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
858 printf("%s: rte_ring_enqueue_bulk fails\n", __func__);
862 ret = test_ring_dequeue(rp, obj, esize[i], 2,
863 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BULK);
865 printf("%s: rte_ring_dequeue_bulk fails\n", __func__);
886 * Basic test cases with exact size ring.
889 test_ring_with_exact_size(void)
891 struct rte_ring *std_r = NULL, *exact_sz_r = NULL;
894 const unsigned int ring_sz = 16;
898 for (i = 0; i < RTE_DIM(esize); i++) {
899 test_ring_print_test_string("Test exact size ring",
900 TEST_RING_IGNORE_API_TYPE,
903 /* alloc object pointers. Allocate one extra object
904 * and create an unaligned address.
906 obj_orig = test_ring_calloc(17, esize[i]);
907 if (obj_orig == NULL)
909 obj = ((char *)obj_orig) + 1;
911 std_r = test_ring_create("std", esize[i], ring_sz,
913 RING_F_SP_ENQ | RING_F_SC_DEQ);
915 printf("%s: error, can't create std ring\n", __func__);
918 exact_sz_r = test_ring_create("exact sz", esize[i], ring_sz,
920 RING_F_SP_ENQ | RING_F_SC_DEQ |
922 if (exact_sz_r == NULL) {
923 printf("%s: error, can't create exact size ring\n",
929 * Check that the exact size ring is bigger than the
932 if (rte_ring_get_size(std_r) >= rte_ring_get_size(exact_sz_r)) {
933 printf("%s: error, std ring (size: %u) is not smaller than exact size one (size %u)\n",
935 rte_ring_get_size(std_r),
936 rte_ring_get_size(exact_sz_r));
940 * check that the exact_sz_ring can hold one more element
941 * than the standard ring. (16 vs 15 elements)
943 for (j = 0; j < ring_sz - 1; j++) {
944 test_ring_enqueue(std_r, obj, esize[i], 1,
945 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
946 test_ring_enqueue(exact_sz_r, obj, esize[i], 1,
947 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
949 ret = test_ring_enqueue(std_r, obj, esize[i], 1,
950 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
951 if (ret != -ENOBUFS) {
952 printf("%s: error, unexpected successful enqueue\n",
956 ret = test_ring_enqueue(exact_sz_r, obj, esize[i], 1,
957 TEST_RING_THREAD_DEF | TEST_RING_ELEM_SINGLE);
958 if (ret == -ENOBUFS) {
959 printf("%s: error, enqueue failed\n", __func__);
963 /* check that dequeue returns the expected number of elements */
964 ret = test_ring_dequeue(exact_sz_r, obj, esize[i], ring_sz,
965 TEST_RING_THREAD_DEF | TEST_RING_ELEM_BURST);
966 if (ret != (int)ring_sz) {
967 printf("%s: error, failed to dequeue expected nb of elements\n",
972 /* check that the capacity function returns expected value */
973 if (rte_ring_get_capacity(exact_sz_r) != ring_sz) {
974 printf("%s: error, incorrect ring capacity reported\n",
980 rte_ring_free(std_r);
981 rte_ring_free(exact_sz_r);
991 rte_ring_free(std_r);
992 rte_ring_free(exact_sz_r);
1002 /* Negative test cases */
1003 if (test_ring_negative_tests() < 0)
1006 /* Some basic operations */
1007 if (test_ring_basic_ex() < 0)
1010 if (test_ring_with_exact_size() < 0)
1013 /* Burst and bulk operations with sp/sc, mp/mc and default.
1014 * The test cases are split into smaller test cases to
1015 * help clang compile faster.
1017 for (i = 0; i != RTE_DIM(test_enqdeq_impl); i++) {
1020 rc = test_ring_burst_bulk_tests1(i);
1024 rc = test_ring_burst_bulk_tests2(i);
1028 rc = test_ring_burst_bulk_tests3(i);
1032 rc = test_ring_burst_bulk_tests4(i);
1037 /* dump the ring status */
1038 rte_ring_list_dump(stdout);
1047 REGISTER_TEST_COMMAND(ring_autotest, test_ring);