4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_memory.h>
46 #include <rte_memzone.h>
47 #include <rte_launch.h>
48 #include <rte_cycles.h>
50 #include <rte_per_lcore.h>
51 #include <rte_lcore.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_malloc.h>
56 #include <rte_random.h>
57 #include <rte_common.h>
58 #include <rte_errno.h>
59 #include <rte_hexdump.h>
67 * #. Basic tests: done on one core:
69 * - Using single producer/single consumer functions:
71 * - Enqueue one object, two objects, MAX_BULK objects
72 * - Dequeue one object, two objects, MAX_BULK objects
73 * - Check that dequeued pointers are correct
75 * - Using multi producers/multi consumers functions:
77 * - Enqueue one object, two objects, MAX_BULK objects
78 * - Dequeue one object, two objects, MAX_BULK objects
79 * - Check that dequeued pointers are correct
81 * - Test watermark and default bulk enqueue/dequeue:
84 * - Set default bulk value
85 * - Enqueue objects, check that -EDQUOT is returned when
86 * watermark is exceeded
87 * - Check that dequeued pointers are correct
89 * #. Check live watermark change
91 * - Start a loop on another lcore that will enqueue and dequeue
92 * objects in a ring. It will monitor the value of watermark.
93 * - At the same time, change the watermark on the master lcore.
94 * - The slave lcore will check that watermark changes from 16 to 32.
96 * #. Performance tests.
98 * Tests done in test_ring_perf.c
101 #define RING_SIZE 4096
106 static rte_atomic32_t synchro;
108 static struct rte_ring *r;
110 #define TEST_RING_VERIFY(exp) \
112 printf("error at %s:%d\tcondition " #exp " failed\n", \
113 __func__, __LINE__); \
114 rte_ring_dump(stdout, r); \
118 #define TEST_RING_FULL_EMTPY_ITER 8
121 check_live_watermark_change(__attribute__((unused)) void *dummy)
123 uint64_t hz = rte_get_timer_hz();
124 void *obj_table[MAX_BULK];
125 unsigned watermark, watermark_old = 16;
126 uint64_t cur_time, end_time;
131 /* init the object table */
132 memset(obj_table, 0, sizeof(obj_table));
133 end_time = rte_get_timer_cycles() + (hz * 2);
135 /* check that bulk and watermark are 4 and 32 (respectively) */
138 /* add in ring until we reach watermark */
140 for (i = 0; i < 16; i ++) {
143 ret = rte_ring_enqueue_bulk(r, obj_table, count);
146 if (ret != -EDQUOT) {
147 printf("Cannot enqueue objects, or watermark not "
148 "reached (ret=%d)\n", ret);
152 /* read watermark, the only change allowed is from 16 to 32 */
153 watermark = r->prod.watermark;
154 if (watermark != watermark_old &&
155 (watermark_old != 16 || watermark != 32)) {
156 printf("Bad watermark change %u -> %u\n", watermark_old,
160 watermark_old = watermark;
162 /* dequeue objects from ring */
164 ret = rte_ring_dequeue_bulk(r, obj_table, count);
166 printf("Cannot dequeue (ret=%d)\n", ret);
171 cur_time = rte_get_timer_cycles();
172 diff = end_time - cur_time;
175 if (watermark_old != 32 ) {
176 printf(" watermark was not updated (wm=%u)\n",
185 test_live_watermark_change(void)
187 unsigned lcore_id = rte_lcore_id();
188 unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
190 printf("Test watermark live modification\n");
191 rte_ring_set_water_mark(r, 16);
193 /* launch a thread that will enqueue and dequeue, checking
194 * watermark and quota */
195 rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
198 rte_ring_set_water_mark(r, 32);
201 if (rte_eal_wait_lcore(lcore_id2) < 0)
207 /* Test for catch on invalid watermark values */
209 test_set_watermark( void ){
213 struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
215 printf( " ring lookup failed\n" );
218 count = r->prod.size*2;
219 setwm = rte_ring_set_water_mark(r, count);
220 if (setwm != -EINVAL){
221 printf("Test failed to detect invalid watermark count value\n");
226 rte_ring_set_water_mark(r, count);
227 if (r->prod.watermark != r->prod.size) {
228 printf("Test failed to detect invalid watermark count value\n");
238 * helper routine for test_ring_basic
241 test_ring_basic_full_empty(void * const src[], void *dst[])
244 const unsigned rsz = RING_SIZE - 1;
246 printf("Basic full/empty test\n");
248 for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
250 /* random shift in the ring */
251 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
252 printf("%s: iteration %u, random shift: %u;\n",
254 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
256 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
259 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
261 TEST_RING_VERIFY(0 == rte_ring_free_count(r));
262 TEST_RING_VERIFY(rsz == rte_ring_count(r));
263 TEST_RING_VERIFY(rte_ring_full(r));
264 TEST_RING_VERIFY(0 == rte_ring_empty(r));
267 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
268 TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
269 TEST_RING_VERIFY(0 == rte_ring_count(r));
270 TEST_RING_VERIFY(0 == rte_ring_full(r));
271 TEST_RING_VERIFY(rte_ring_empty(r));
274 TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
275 rte_ring_dump(stdout, r);
281 test_ring_basic(void)
283 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
285 unsigned i, num_elems;
287 /* alloc dummy object pointers */
288 src = malloc(RING_SIZE*2*sizeof(void *));
292 for (i = 0; i < RING_SIZE*2 ; i++) {
293 src[i] = (void *)(unsigned long)i;
297 /* alloc some room for copied objects */
298 dst = malloc(RING_SIZE*2*sizeof(void *));
302 memset(dst, 0, RING_SIZE*2*sizeof(void *));
305 printf("enqueue 1 obj\n");
306 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
311 printf("enqueue 2 objs\n");
312 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
317 printf("enqueue MAX_BULK objs\n");
318 ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
323 printf("dequeue 1 obj\n");
324 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
329 printf("dequeue 2 objs\n");
330 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
335 printf("dequeue MAX_BULK objs\n");
336 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
342 if (memcmp(src, dst, cur_dst - dst)) {
343 rte_hexdump(stdout, "src", src, cur_src - src);
344 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
345 printf("data after dequeue is not the same\n");
351 printf("enqueue 1 obj\n");
352 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
357 printf("enqueue 2 objs\n");
358 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
363 printf("enqueue MAX_BULK objs\n");
364 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
369 printf("dequeue 1 obj\n");
370 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
375 printf("dequeue 2 objs\n");
376 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
381 printf("dequeue MAX_BULK objs\n");
382 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
388 if (memcmp(src, dst, cur_dst - dst)) {
389 rte_hexdump(stdout, "src", src, cur_src - src);
390 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
391 printf("data after dequeue is not the same\n");
397 printf("fill and empty the ring\n");
398 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
399 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
403 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
410 if (memcmp(src, dst, cur_dst - dst)) {
411 rte_hexdump(stdout, "src", src, cur_src - src);
412 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
413 printf("data after dequeue is not the same\n");
417 if (test_ring_basic_full_empty(src, dst) != 0)
423 printf("test watermark and default bulk enqueue / dequeue\n");
424 rte_ring_set_water_mark(r, 20);
430 ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
431 cur_src += num_elems;
433 printf("Cannot enqueue\n");
436 ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
437 cur_src += num_elems;
438 if (ret != -EDQUOT) {
439 printf("Watermark not exceeded\n");
442 ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
443 cur_dst += num_elems;
445 printf("Cannot dequeue\n");
448 ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
449 cur_dst += num_elems;
451 printf("Cannot dequeue2\n");
456 if (memcmp(src, dst, cur_dst - dst)) {
457 rte_hexdump(stdout, "src", src, cur_src - src);
458 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
459 printf("data after dequeue is not the same\n");
466 ret = rte_ring_mp_enqueue(r, cur_src);
470 ret = rte_ring_mc_dequeue(r, cur_dst);
489 test_ring_burst_basic(void)
491 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
495 /* alloc dummy object pointers */
496 src = malloc(RING_SIZE*2*sizeof(void *));
500 for (i = 0; i < RING_SIZE*2 ; i++) {
501 src[i] = (void *)(unsigned long)i;
505 /* alloc some room for copied objects */
506 dst = malloc(RING_SIZE*2*sizeof(void *));
510 memset(dst, 0, RING_SIZE*2*sizeof(void *));
513 printf("Test SP & SC basic functions \n");
514 printf("enqueue 1 obj\n");
515 ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
517 if ((ret & RTE_RING_SZ_MASK) != 1)
520 printf("enqueue 2 objs\n");
521 ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
523 if ((ret & RTE_RING_SZ_MASK) != 2)
526 printf("enqueue MAX_BULK objs\n");
527 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
529 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
532 printf("dequeue 1 obj\n");
533 ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
535 if ((ret & RTE_RING_SZ_MASK) != 1)
538 printf("dequeue 2 objs\n");
539 ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
541 if ((ret & RTE_RING_SZ_MASK) != 2)
544 printf("dequeue MAX_BULK objs\n");
545 ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
547 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
551 if (memcmp(src, dst, cur_dst - dst)) {
552 rte_hexdump(stdout, "src", src, cur_src - src);
553 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
554 printf("data after dequeue is not the same\n");
561 printf("Test enqueue without enough memory space \n");
562 for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
563 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
565 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
570 printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
571 ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
573 if ((ret & RTE_RING_SZ_MASK) != 2)
576 printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
577 /* Always one free entry left */
578 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
579 cur_src += MAX_BULK - 3;
580 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
583 printf("Test if ring is full \n");
584 if (rte_ring_full(r) != 1)
587 printf("Test enqueue for a full entry \n");
588 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
589 if ((ret & RTE_RING_SZ_MASK) != 0)
592 printf("Test dequeue without enough objects \n");
593 for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
594 ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
596 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
600 /* Available memory space for the exact MAX_BULK entries */
601 ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
603 if ((ret & RTE_RING_SZ_MASK) != 2)
606 ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
607 cur_dst += MAX_BULK - 3;
608 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
611 printf("Test if ring is empty \n");
612 /* Check if ring is empty */
613 if (1 != rte_ring_empty(r))
617 if (memcmp(src, dst, cur_dst - dst)) {
618 rte_hexdump(stdout, "src", src, cur_src - src);
619 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
620 printf("data after dequeue is not the same\n");
627 printf("Test MP & MC basic functions \n");
629 printf("enqueue 1 obj\n");
630 ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
632 if ((ret & RTE_RING_SZ_MASK) != 1)
635 printf("enqueue 2 objs\n");
636 ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
638 if ((ret & RTE_RING_SZ_MASK) != 2)
641 printf("enqueue MAX_BULK objs\n");
642 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
644 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
647 printf("dequeue 1 obj\n");
648 ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
650 if ((ret & RTE_RING_SZ_MASK) != 1)
653 printf("dequeue 2 objs\n");
654 ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
656 if ((ret & RTE_RING_SZ_MASK) != 2)
659 printf("dequeue MAX_BULK objs\n");
660 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
662 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
666 if (memcmp(src, dst, cur_dst - dst)) {
667 rte_hexdump(stdout, "src", src, cur_src - src);
668 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
669 printf("data after dequeue is not the same\n");
676 printf("fill and empty the ring\n");
677 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
678 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
680 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
682 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
684 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
689 if (memcmp(src, dst, cur_dst - dst)) {
690 rte_hexdump(stdout, "src", src, cur_src - src);
691 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
692 printf("data after dequeue is not the same\n");
699 printf("Test enqueue without enough memory space \n");
700 for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
701 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
703 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
707 /* Available memory space for the exact MAX_BULK objects */
708 ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
710 if ((ret & RTE_RING_SZ_MASK) != 2)
713 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
714 cur_src += MAX_BULK - 3;
715 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
719 printf("Test dequeue without enough objects \n");
720 for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
721 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
723 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
727 /* Available objects - the exact MAX_BULK */
728 ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
730 if ((ret & RTE_RING_SZ_MASK) != 2)
733 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
734 cur_dst += MAX_BULK - 3;
735 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
739 if (memcmp(src, dst, cur_dst - dst)) {
740 rte_hexdump(stdout, "src", src, cur_src - src);
741 rte_hexdump(stdout, "dst", dst, cur_dst - dst);
742 printf("data after dequeue is not the same\n");
749 printf("Covering rte_ring_enqueue_burst functions \n");
751 ret = rte_ring_enqueue_burst(r, cur_src, 2);
753 if ((ret & RTE_RING_SZ_MASK) != 2)
756 ret = rte_ring_dequeue_burst(r, cur_dst, 2);
761 /* Free memory before test completed */
777 test_ring_stats(void)
780 #ifndef RTE_LIBRTE_RING_DEBUG
781 printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
784 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
787 unsigned num_items = 0;
788 unsigned failed_enqueue_ops = 0;
789 unsigned failed_enqueue_items = 0;
790 unsigned failed_dequeue_ops = 0;
791 unsigned failed_dequeue_items = 0;
792 unsigned last_enqueue_ops = 0;
793 unsigned last_enqueue_items = 0;
794 unsigned last_quota_ops = 0;
795 unsigned last_quota_items = 0;
796 unsigned lcore_id = rte_lcore_id();
797 struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
799 printf("Test the ring stats.\n");
801 /* Reset the watermark in case it was set in another test. */
802 rte_ring_set_water_mark(r, 0);
804 /* Reset the ring stats. */
805 memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
807 /* Allocate some dummy object pointers. */
808 src = malloc(RING_SIZE*2*sizeof(void *));
812 for (i = 0; i < RING_SIZE*2 ; i++) {
813 src[i] = (void *)(unsigned long)i;
816 /* Allocate some memory for copied objects. */
817 dst = malloc(RING_SIZE*2*sizeof(void *));
821 memset(dst, 0, RING_SIZE*2*sizeof(void *));
823 /* Set the head and tail pointers. */
827 /* Do Enqueue tests. */
828 printf("Test the dequeue stats.\n");
830 /* Fill the ring up to RING_SIZE -1. */
831 printf("Fill the ring.\n");
832 for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
833 rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
837 /* Adjust for final enqueue = MAX_BULK -1. */
840 printf("Verify that the ring is full.\n");
841 if (rte_ring_full(r) != 1)
845 printf("Verify the enqueue success stats.\n");
846 /* Stats should match above enqueue operations to fill the ring. */
847 if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
850 /* Current max objects is RING_SIZE -1. */
851 if (ring_stats->enq_success_objs != RING_SIZE -1)
854 /* Shouldn't have any failures yet. */
855 if (ring_stats->enq_fail_bulk != 0)
857 if (ring_stats->enq_fail_objs != 0)
861 printf("Test stats for SP burst enqueue to a full ring.\n");
863 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
864 if ((ret & RTE_RING_SZ_MASK) != 0)
867 failed_enqueue_ops += 1;
868 failed_enqueue_items += num_items;
870 /* The enqueue should have failed. */
871 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
873 if (ring_stats->enq_fail_objs != failed_enqueue_items)
877 printf("Test stats for SP bulk enqueue to a full ring.\n");
879 ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
883 failed_enqueue_ops += 1;
884 failed_enqueue_items += num_items;
886 /* The enqueue should have failed. */
887 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
889 if (ring_stats->enq_fail_objs != failed_enqueue_items)
893 printf("Test stats for MP burst enqueue to a full ring.\n");
895 ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
896 if ((ret & RTE_RING_SZ_MASK) != 0)
899 failed_enqueue_ops += 1;
900 failed_enqueue_items += num_items;
902 /* The enqueue should have failed. */
903 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
905 if (ring_stats->enq_fail_objs != failed_enqueue_items)
909 printf("Test stats for MP bulk enqueue to a full ring.\n");
911 ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
915 failed_enqueue_ops += 1;
916 failed_enqueue_items += num_items;
918 /* The enqueue should have failed. */
919 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
921 if (ring_stats->enq_fail_objs != failed_enqueue_items)
925 /* Do Dequeue tests. */
926 printf("Test the dequeue stats.\n");
928 printf("Empty the ring.\n");
929 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
930 rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
934 /* There was only RING_SIZE -1 objects to dequeue. */
937 printf("Verify ring is empty.\n");
938 if (1 != rte_ring_empty(r))
941 printf("Verify the dequeue success stats.\n");
942 /* Stats should match above dequeue operations. */
943 if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
946 /* Objects dequeued is RING_SIZE -1. */
947 if (ring_stats->deq_success_objs != RING_SIZE -1)
950 /* Shouldn't have any dequeue failure stats yet. */
951 if (ring_stats->deq_fail_bulk != 0)
954 printf("Test stats for SC burst dequeue with an empty ring.\n");
956 ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
957 if ((ret & RTE_RING_SZ_MASK) != 0)
960 failed_dequeue_ops += 1;
961 failed_dequeue_items += num_items;
963 /* The dequeue should have failed. */
964 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
966 if (ring_stats->deq_fail_objs != failed_dequeue_items)
970 printf("Test stats for SC bulk dequeue with an empty ring.\n");
972 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
976 failed_dequeue_ops += 1;
977 failed_dequeue_items += num_items;
979 /* The dequeue should have failed. */
980 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
982 if (ring_stats->deq_fail_objs != failed_dequeue_items)
986 printf("Test stats for MC burst dequeue with an empty ring.\n");
988 ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
989 if ((ret & RTE_RING_SZ_MASK) != 0)
991 failed_dequeue_ops += 1;
992 failed_dequeue_items += num_items;
994 /* The dequeue should have failed. */
995 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
997 if (ring_stats->deq_fail_objs != failed_dequeue_items)
1001 printf("Test stats for MC bulk dequeue with an empty ring.\n");
1003 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
1007 failed_dequeue_ops += 1;
1008 failed_dequeue_items += num_items;
1010 /* The dequeue should have failed. */
1011 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
1013 if (ring_stats->deq_fail_objs != failed_dequeue_items)
1017 printf("Test total enqueue/dequeue stats.\n");
1018 /* At this point the enqueue and dequeue stats should be the same. */
1019 if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
1021 if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
1023 if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
1025 if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
1029 /* Watermark Tests. */
1030 printf("Test the watermark/quota stats.\n");
1032 printf("Verify the initial watermark stats.\n");
1033 /* Watermark stats should be 0 since there is no watermark. */
1034 if (ring_stats->enq_quota_bulk != 0)
1036 if (ring_stats->enq_quota_objs != 0)
1039 /* Set a watermark. */
1040 rte_ring_set_water_mark(r, 16);
1042 /* Reset pointers. */
1046 last_enqueue_ops = ring_stats->enq_success_bulk;
1047 last_enqueue_items = ring_stats->enq_success_objs;
1050 printf("Test stats for SP burst enqueue below watermark.\n");
1052 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1053 if ((ret & RTE_RING_SZ_MASK) != num_items)
1056 /* Watermark stats should still be 0. */
1057 if (ring_stats->enq_quota_bulk != 0)
1059 if (ring_stats->enq_quota_objs != 0)
1062 /* Success stats should have increased. */
1063 if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
1065 if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
1068 last_enqueue_ops = ring_stats->enq_success_bulk;
1069 last_enqueue_items = ring_stats->enq_success_objs;
1072 printf("Test stats for SP burst enqueue at watermark.\n");
1074 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1075 if ((ret & RTE_RING_SZ_MASK) != num_items)
1078 /* Watermark stats should have changed. */
1079 if (ring_stats->enq_quota_bulk != 1)
1081 if (ring_stats->enq_quota_objs != num_items)
1084 last_quota_ops = ring_stats->enq_quota_bulk;
1085 last_quota_items = ring_stats->enq_quota_objs;
1088 printf("Test stats for SP burst enqueue above watermark.\n");
1090 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1091 if ((ret & RTE_RING_SZ_MASK) != num_items)
1094 /* Watermark stats should have changed. */
1095 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1097 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1100 last_quota_ops = ring_stats->enq_quota_bulk;
1101 last_quota_items = ring_stats->enq_quota_objs;
1104 printf("Test stats for MP burst enqueue above watermark.\n");
1106 ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
1107 if ((ret & RTE_RING_SZ_MASK) != num_items)
1110 /* Watermark stats should have changed. */
1111 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1113 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1116 last_quota_ops = ring_stats->enq_quota_bulk;
1117 last_quota_items = ring_stats->enq_quota_objs;
1120 printf("Test stats for SP bulk enqueue above watermark.\n");
1122 ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
1126 /* Watermark stats should have changed. */
1127 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1129 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1132 last_quota_ops = ring_stats->enq_quota_bulk;
1133 last_quota_items = ring_stats->enq_quota_objs;
1136 printf("Test stats for MP bulk enqueue above watermark.\n");
1138 ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
1142 /* Watermark stats should have changed. */
1143 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1145 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1148 printf("Test watermark success stats.\n");
1149 /* Success stats should be same as last non-watermarked enqueue. */
1150 if (ring_stats->enq_success_bulk != last_enqueue_ops)
1152 if (ring_stats->enq_success_objs != last_enqueue_items)
1158 /* Empty the ring. */
1159 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
1160 rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
1161 cur_dst += MAX_BULK;
1164 /* Reset the watermark. */
1165 rte_ring_set_water_mark(r, 0);
1167 /* Reset the ring stats. */
1168 memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
1170 /* Free memory before test completed */
1187 * it will always fail to create ring with a wrong ring size number in this function
1190 test_ring_creation_with_wrong_size(void)
1192 struct rte_ring * rp = NULL;
1194 /* Test if ring size is not power of 2 */
1195 rp = rte_ring_create("test_bad_ring_size", RING_SIZE + 1, SOCKET_ID_ANY, 0);
1200 /* Test if ring size is exceeding the limit */
1201 rp = rte_ring_create("test_bad_ring_size", (RTE_RING_SZ_MASK + 1), SOCKET_ID_ANY, 0);
1209 * it tests if it would always fail to create ring with an used ring name
1212 test_ring_creation_with_an_used_name(void)
1214 struct rte_ring * rp;
1216 rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
1224 * Test to if a non-power of 2 count causes the create
1225 * function to fail correctly
1228 test_create_count_odd(void)
1230 struct rte_ring *r = rte_ring_create("test_ring_count",
1231 4097, SOCKET_ID_ANY, 0 );
1239 test_lookup_null(void)
1241 struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
1243 if (rte_errno != ENOENT){
1244 printf( "test failed to returnn error on null pointer\n");
1251 * it tests some more basic ring operations
1254 test_ring_basic_ex(void)
1258 struct rte_ring * rp;
1261 obj = rte_calloc("test_ring_basic_ex_malloc", RING_SIZE, sizeof(void *), 0);
1263 printf("test_ring_basic_ex fail to rte_malloc\n");
1267 rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY,
1268 RING_F_SP_ENQ | RING_F_SC_DEQ);
1270 printf("test_ring_basic_ex fail to create ring\n");
1274 if (rte_ring_lookup("test_ring_basic_ex") != rp) {
1278 if (rte_ring_empty(rp) != 1) {
1279 printf("test_ring_basic_ex ring is not empty but it should be\n");
1283 printf("%u ring entries are now free\n", rte_ring_free_count(rp));
1285 for (i = 0; i < RING_SIZE; i ++) {
1286 rte_ring_enqueue(rp, obj[i]);
1289 if (rte_ring_full(rp) != 1) {
1290 printf("test_ring_basic_ex ring is not full but it should be\n");
1294 for (i = 0; i < RING_SIZE; i ++) {
1295 rte_ring_dequeue(rp, &obj[i]);
1298 if (rte_ring_empty(rp) != 1) {
1299 printf("test_ring_basic_ex ring is not empty but it should be\n");
1303 /* Covering the ring burst operation */
1304 ret = rte_ring_enqueue_burst(rp, obj, 2);
1305 if ((ret & RTE_RING_SZ_MASK) != 2) {
1306 printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
1310 ret = rte_ring_dequeue_burst(rp, obj, 2);
1312 printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
1327 /* some more basic operations */
1328 if (test_ring_basic_ex() < 0)
1331 rte_atomic32_init(&synchro);
1334 r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
1338 /* retrieve the ring from its name */
1339 if (rte_ring_lookup("test") != r) {
1340 printf("Cannot lookup ring from its name\n");
1344 /* burst operations */
1345 if (test_ring_burst_basic() < 0)
1348 /* basic operations */
1349 if (test_ring_basic() < 0)
1353 if (test_ring_stats() < 0)
1356 /* basic operations */
1357 if (test_live_watermark_change() < 0)
1360 if ( test_set_watermark() < 0){
1361 printf ("Test failed to detect invalid parameter\n");
1365 printf ( "Test detected forced bad watermark values\n");
1367 if ( test_create_count_odd() < 0){
1368 printf ("Test failed to detect odd count\n");
1372 printf ( "Test detected odd count\n");
1374 if ( test_lookup_null() < 0){
1375 printf ("Test failed to detect NULL ring lookup\n");
1379 printf ( "Test detected NULL ring lookup \n");
1381 /* test of creating ring with wrong size */
1382 if (test_ring_creation_with_wrong_size() < 0)
1385 /* test of creation ring with an used name */
1386 if (test_ring_creation_with_an_used_name() < 0)
1389 /* dump the ring status */
1390 rte_ring_list_dump(stdout);
1395 static struct test_command ring_cmd = {
1396 .command = "ring_autotest",
1397 .callback = test_ring,
1399 REGISTER_TEST_COMMAND(ring_cmd);