4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
44 #include <rte_common.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_launch.h>
49 #include <rte_cycles.h>
50 #include <rte_tailq.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_malloc.h>
58 #include <rte_random.h>
59 #include <rte_common.h>
60 #include <rte_errno.h>
61 #include <rte_hexdump.h>
63 #include <cmdline_parse.h>
71 * #. Basic tests: done on one core:
73 * - Using single producer/single consumer functions:
75 * - Enqueue one object, two objects, MAX_BULK objects
76 * - Dequeue one object, two objects, MAX_BULK objects
77 * - Check that dequeued pointers are correct
79 * - Using multi producers/multi consumers functions:
81 * - Enqueue one object, two objects, MAX_BULK objects
82 * - Dequeue one object, two objects, MAX_BULK objects
83 * - Check that dequeued pointers are correct
85 * - Test watermark and default bulk enqueue/dequeue:
88 * - Set default bulk value
89 * - Enqueue objects, check that -EDQUOT is returned when
90 * watermark is exceeded
91 * - Check that dequeued pointers are correct
93 * #. Check live watermark change
95 * - Start a loop on another lcore that will enqueue and dequeue
96 * objects in a ring. It will monitor the value of watermark.
97 * - At the same time, change the watermark on the master lcore.
98 * - The slave lcore will check that watermark changes from 16 to 32.
100 * #. Performance tests.
102 * This test is done on the following configurations:
104 * - One core enqueuing, one core dequeuing
105 * - One core enqueuing, other cores dequeuing
106 * - One core dequeuing, other cores enqueuing
107 * - Half of the cores enqueuing, the other half dequeuing
109 * When only one core enqueues/dequeues, the test is done with the
110 * SP/SC functions in addition to the MP/MC functions.
112 * The test is done with different bulk size.
114 * On each core, the test enqueues or dequeues objects during
115 * TIME_S seconds. The number of successes and failures are stored on
116 * each core, then summed and displayed.
118 * The test checks that the number of enqueues is equal to the
119 * number of dequeues.
122 #define RING_SIZE 4096
127 static rte_atomic32_t synchro;
129 static struct rte_ring *r;
132 unsigned enq_success ;
136 unsigned deq_success;
138 } __rte_cache_aligned;
140 static struct test_stats test_stats[RTE_MAX_LCORE];
143 ring_enqueue_test(int (que_func)(struct rte_ring*, void * const *, unsigned),
144 void* arg, unsigned bulk_or_burst)
146 unsigned success = 0;
150 unsigned long dummy_obj;
151 void *obj_table[MAX_BULK];
153 unsigned lcore_id = rte_lcore_id();
154 unsigned count = *((unsigned*)arg);
155 uint64_t start_cycles, end_cycles;
156 uint64_t time_diff = 0, hz = rte_get_hpet_hz();
158 /* init dummy object table */
159 for (i = 0; i< MAX_BULK; i++) {
160 dummy_obj = lcore_id + 0x1000 + i;
161 obj_table[i] = (void *)dummy_obj;
164 /* wait synchro for slaves */
165 if (lcore_id != rte_get_master_lcore())
166 while (rte_atomic32_read(&synchro) == 0);
168 start_cycles = rte_get_hpet_cycles();
170 /* enqueue as many object as possible */
171 while (time_diff/hz < TIME_S) {
172 for (i = 0; likely(i < N); i++) {
173 ret = que_func(r, obj_table, count);
176 * 1: for bulk operation
177 * 0: for burst operation
180 /* The *count* objects enqueued, unless fail */
183 else if (ret == -EDQUOT)
188 /* The actual objects enqueued */
190 success += (ret & RTE_RING_SZ_MASK);
195 end_cycles = rte_get_hpet_cycles();
196 time_diff = end_cycles - start_cycles;
199 /* write statistics in a shared structure */
200 test_stats[lcore_id].enq_success = success;
201 test_stats[lcore_id].enq_quota = quota;
202 test_stats[lcore_id].enq_fail = fail;
208 ring_dequeue_test(int (que_func)(struct rte_ring*, void **, unsigned),
209 void* arg, unsigned bulk_or_burst)
211 unsigned success = 0;
214 void *obj_table[MAX_BULK];
216 unsigned lcore_id = rte_lcore_id();
217 unsigned count = *((unsigned*)arg);
218 uint64_t start_cycles, end_cycles;
219 uint64_t time_diff = 0, hz = rte_get_hpet_hz();
221 /* wait synchro for slaves */
222 if (lcore_id != rte_get_master_lcore())
223 while (rte_atomic32_read(&synchro) == 0);
225 start_cycles = rte_get_hpet_cycles();
227 /* dequeue as many object as possible */
228 while (time_diff/hz < TIME_S) {
229 for (i = 0; likely(i < N); i++) {
230 ret = que_func(r, obj_table, count);
233 * 1: for bulk operation
234 * 0: for burst operation
248 end_cycles = rte_get_hpet_cycles();
249 time_diff = end_cycles - start_cycles;
252 /* write statistics in a shared structure */
253 test_stats[lcore_id].deq_success = success;
254 test_stats[lcore_id].deq_fail = fail;
260 test_ring_per_core_sp_enqueue(void *arg)
262 return ring_enqueue_test(&rte_ring_sp_enqueue_bulk, arg, 1);
266 test_ring_per_core_mp_enqueue(void *arg)
268 return ring_enqueue_test(&rte_ring_mp_enqueue_bulk, arg, 1);
272 test_ring_per_core_mc_dequeue(void *arg)
274 return ring_dequeue_test(&rte_ring_mc_dequeue_bulk, arg, 1);
278 test_ring_per_core_sc_dequeue(void *arg)
280 return ring_dequeue_test(&rte_ring_sc_dequeue_bulk, arg, 1);
284 test_ring_per_core_sp_enqueue_burst(void *arg)
286 return ring_enqueue_test(&rte_ring_sp_enqueue_burst, arg, 0);
290 test_ring_per_core_mp_enqueue_burst(void *arg)
292 return ring_enqueue_test(&rte_ring_mp_enqueue_burst, arg, 0);
296 test_ring_per_core_mc_dequeue_burst(void *arg)
298 return ring_dequeue_test(&rte_ring_mc_dequeue_burst, arg, 0);
302 test_ring_per_core_sc_dequeue_burst(void *arg)
304 return ring_dequeue_test(&rte_ring_sc_dequeue_burst, arg, 0);
307 #define TEST_RING_VERIFY(exp) \
309 printf("error at %s:%d\tcondition " #exp " failed\n", \
310 __func__, __LINE__); \
315 #define TEST_RING_FULL_EMTPY_ITER 8
319 launch_cores(unsigned enq_core_count, unsigned deq_core_count,
320 unsigned n_enq_bulk, unsigned n_deq_bulk,
321 int sp, int sc, int bulk_not_burst)
325 unsigned rate, deq_remain = 0;
326 unsigned enq_total, deq_total;
327 struct test_stats sum;
328 int (*enq_f)(void *);
329 int (*deq_f)(void *);
330 unsigned cores = enq_core_count + deq_core_count;
333 rte_atomic32_set(&synchro, 0);
335 printf("ring_autotest e/d_core=%u,%u e/d_bulk=%u,%u ",
336 enq_core_count, deq_core_count, n_enq_bulk, n_deq_bulk);
337 printf("sp=%d sc=%d ", sp, sc);
339 if (bulk_not_burst) {
340 /* set enqueue function to be used */
342 enq_f = test_ring_per_core_sp_enqueue;
344 enq_f = test_ring_per_core_mp_enqueue;
346 /* set dequeue function to be used */
348 deq_f = test_ring_per_core_sc_dequeue;
350 deq_f = test_ring_per_core_mc_dequeue;
353 /* set enqueue function to be used */
355 enq_f = test_ring_per_core_sp_enqueue_burst;
357 enq_f = test_ring_per_core_mp_enqueue_burst;
359 /* set dequeue function to be used */
361 deq_f = test_ring_per_core_sc_dequeue_burst;
363 deq_f = test_ring_per_core_mc_dequeue_burst;
366 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
367 if (enq_core_count != 0) {
369 rte_eal_remote_launch(enq_f, &n_enq_bulk, lcore_id);
371 if (deq_core_count != 1) {
373 rte_eal_remote_launch(deq_f, &n_deq_bulk, lcore_id);
377 memset(test_stats, 0, sizeof(test_stats));
379 /* start synchro and launch test on master */
380 rte_atomic32_set(&synchro, 1);
381 ret = deq_f(&n_deq_bulk);
384 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
388 if (rte_eal_wait_lcore(lcore_id) < 0)
392 memset(&sum, 0, sizeof(sum));
393 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
394 sum.enq_success += test_stats[lcore_id].enq_success;
395 sum.enq_quota += test_stats[lcore_id].enq_quota;
396 sum.enq_fail += test_stats[lcore_id].enq_fail;
397 sum.deq_success += test_stats[lcore_id].deq_success;
398 sum.deq_fail += test_stats[lcore_id].deq_fail;
402 while (rte_ring_sc_dequeue(r, &obj) == 0)
406 printf("per-lcore test returned -1\n");
410 enq_total = sum.enq_success + sum.enq_quota;
411 deq_total = sum.deq_success + deq_remain;
413 rate = deq_total/TIME_S;
415 printf("rate_persec=%u\n", rate);
417 if (enq_total != deq_total) {
418 printf("invalid enq/deq_success counter: %u %u\n",
419 enq_total, deq_total);
427 do_one_ring_test2(unsigned enq_core_count, unsigned deq_core_count,
428 unsigned n_enq_bulk, unsigned n_deq_bulk, unsigned bulk_or_burst)
434 do_sp = (enq_core_count == 1) ? 1 : 0;
435 do_sc = (deq_core_count == 1) ? 1 : 0;
437 for (sp = 0; sp <= do_sp; sp ++) {
438 for (sc = 0; sc <= do_sc; sc ++) {
439 ret = launch_cores(enq_core_count, deq_core_count,
440 n_enq_bulk, n_deq_bulk, sp, sc, bulk_or_burst);
449 do_one_ring_test(unsigned enq_core_count, unsigned deq_core_count,
450 unsigned bulk_or_burst)
452 unsigned bulk_enqueue_tab[] = { 1, 2, 4, 32, 0 };
453 unsigned bulk_dequeue_tab[] = { 1, 2, 4, 32, 0 };
454 unsigned *bulk_enqueue_ptr;
455 unsigned *bulk_dequeue_ptr;
458 for (bulk_enqueue_ptr = bulk_enqueue_tab;
460 bulk_enqueue_ptr++) {
462 for (bulk_dequeue_ptr = bulk_dequeue_tab;
464 bulk_dequeue_ptr++) {
466 ret = do_one_ring_test2(enq_core_count, deq_core_count,
478 check_live_watermark_change(__attribute__((unused)) void *dummy)
480 uint64_t hz = rte_get_hpet_hz();
481 void *obj_table[MAX_BULK];
482 unsigned watermark, watermark_old = 16;
483 uint64_t cur_time, end_time;
488 /* init the object table */
489 memset(obj_table, 0, sizeof(obj_table));
490 end_time = rte_get_hpet_cycles() + (hz * 2);
492 /* check that bulk and watermark are 4 and 32 (respectively) */
495 /* add in ring until we reach watermark */
497 for (i = 0; i < 16; i ++) {
500 ret = rte_ring_enqueue_bulk(r, obj_table, count);
503 if (ret != -EDQUOT) {
504 printf("Cannot enqueue objects, or watermark not "
505 "reached (ret=%d)\n", ret);
509 /* read watermark, the only change allowed is from 16 to 32 */
510 watermark = r->prod.watermark;
511 if (watermark != watermark_old &&
512 (watermark_old != 16 || watermark != 32)) {
513 printf("Bad watermark change %u -> %u\n", watermark_old,
517 watermark_old = watermark;
519 /* dequeue objects from ring */
521 ret = rte_ring_dequeue_bulk(r, obj_table, count);
523 printf("Cannot dequeue (ret=%d)\n", ret);
528 cur_time = rte_get_hpet_cycles();
529 diff = end_time - cur_time;
532 if (watermark_old != 32 ) {
533 printf(" watermark was not updated (wm=%u)\n",
542 test_live_watermark_change(void)
544 unsigned lcore_id = rte_lcore_id();
545 unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
547 printf("Test watermark live modification\n");
548 rte_ring_set_water_mark(r, 16);
550 /* launch a thread that will enqueue and dequeue, checking
551 * watermark and quota */
552 rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
555 rte_ring_set_water_mark(r, 32);
558 if (rte_eal_wait_lcore(lcore_id2) < 0)
564 /* Test for catch on invalid watermark values */
566 test_set_watermark( void ){
570 struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
572 printf( " ring lookup failed\n" );
575 count = r->prod.size*2;
576 setwm = rte_ring_set_water_mark(r, count);
577 if (setwm != -EINVAL){
578 printf("Test failed to detect invalid watermark count value\n");
583 rte_ring_set_water_mark(r, count);
584 if (r->prod.watermark != r->prod.size) {
585 printf("Test failed to detect invalid watermark count value\n");
595 * helper routine for test_ring_basic
598 test_ring_basic_full_empty(void * const src[], void *dst[])
601 const unsigned rsz = RING_SIZE - 1;
603 printf("Basic full/empty test\n");
605 for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
607 /* random shift in the ring */
608 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
609 printf("%s: iteration %u, random shift: %u;\n",
611 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
613 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
616 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
618 TEST_RING_VERIFY(0 == rte_ring_free_count(r));
619 TEST_RING_VERIFY(rsz == rte_ring_count(r));
620 TEST_RING_VERIFY(rte_ring_full(r));
621 TEST_RING_VERIFY(0 == rte_ring_empty(r));
624 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
625 TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
626 TEST_RING_VERIFY(0 == rte_ring_count(r));
627 TEST_RING_VERIFY(0 == rte_ring_full(r));
628 TEST_RING_VERIFY(rte_ring_empty(r));
631 TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
638 test_ring_basic(void)
640 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
642 unsigned i, num_elems;
644 /* alloc dummy object pointers */
645 src = malloc(RING_SIZE*2*sizeof(void *));
649 for (i = 0; i < RING_SIZE*2 ; i++) {
650 src[i] = (void *)(unsigned long)i;
654 /* alloc some room for copied objects */
655 dst = malloc(RING_SIZE*2*sizeof(void *));
659 memset(dst, 0, RING_SIZE*2*sizeof(void *));
662 printf("enqueue 1 obj\n");
663 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
668 printf("enqueue 2 objs\n");
669 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
674 printf("enqueue MAX_BULK objs\n");
675 ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
680 printf("dequeue 1 obj\n");
681 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
686 printf("dequeue 2 objs\n");
687 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
692 printf("dequeue MAX_BULK objs\n");
693 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
699 if (memcmp(src, dst, cur_dst - dst)) {
700 rte_hexdump("src", src, cur_src - src);
701 rte_hexdump("dst", dst, cur_dst - dst);
702 printf("data after dequeue is not the same\n");
708 printf("enqueue 1 obj\n");
709 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
714 printf("enqueue 2 objs\n");
715 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
720 printf("enqueue MAX_BULK objs\n");
721 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
726 printf("dequeue 1 obj\n");
727 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
732 printf("dequeue 2 objs\n");
733 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
738 printf("dequeue MAX_BULK objs\n");
739 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
745 if (memcmp(src, dst, cur_dst - dst)) {
746 rte_hexdump("src", src, cur_src - src);
747 rte_hexdump("dst", dst, cur_dst - dst);
748 printf("data after dequeue is not the same\n");
754 printf("fill and empty the ring\n");
755 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
756 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
760 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
767 if (memcmp(src, dst, cur_dst - dst)) {
768 rte_hexdump("src", src, cur_src - src);
769 rte_hexdump("dst", dst, cur_dst - dst);
770 printf("data after dequeue is not the same\n");
774 if (test_ring_basic_full_empty(src, dst) != 0)
780 printf("test watermark and default bulk enqueue / dequeue\n");
781 rte_ring_set_water_mark(r, 20);
787 ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
788 cur_src += num_elems;
790 printf("Cannot enqueue\n");
793 ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
794 cur_src += num_elems;
795 if (ret != -EDQUOT) {
796 printf("Watermark not exceeded\n");
799 ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
800 cur_dst += num_elems;
802 printf("Cannot dequeue\n");
805 ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
806 cur_dst += num_elems;
808 printf("Cannot dequeue2\n");
813 if (memcmp(src, dst, cur_dst - dst)) {
814 rte_hexdump("src", src, cur_src - src);
815 rte_hexdump("dst", dst, cur_dst - dst);
816 printf("data after dequeue is not the same\n");
823 ret = rte_ring_mp_enqueue(r, cur_src);
827 ret = rte_ring_mc_dequeue(r, cur_dst);
846 test_ring_burst_basic(void)
848 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
852 /* alloc dummy object pointers */
853 src = malloc(RING_SIZE*2*sizeof(void *));
857 for (i = 0; i < RING_SIZE*2 ; i++) {
858 src[i] = (void *)(unsigned long)i;
862 /* alloc some room for copied objects */
863 dst = malloc(RING_SIZE*2*sizeof(void *));
867 memset(dst, 0, RING_SIZE*2*sizeof(void *));
870 printf("Test SP & SC basic functions \n");
871 printf("enqueue 1 obj\n");
872 ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
874 if ((ret & RTE_RING_SZ_MASK) != 1)
877 printf("enqueue 2 objs\n");
878 ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
880 if ((ret & RTE_RING_SZ_MASK) != 2)
883 printf("enqueue MAX_BULK objs\n");
884 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
886 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
889 printf("dequeue 1 obj\n");
890 ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
892 if ((ret & RTE_RING_SZ_MASK) != 1)
895 printf("dequeue 2 objs\n");
896 ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
898 if ((ret & RTE_RING_SZ_MASK) != 2)
901 printf("dequeue MAX_BULK objs\n");
902 ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
904 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
908 if (memcmp(src, dst, cur_dst - dst)) {
909 rte_hexdump("src", src, cur_src - src);
910 rte_hexdump("dst", dst, cur_dst - dst);
911 printf("data after dequeue is not the same\n");
918 printf("Test enqueue without enough memory space \n");
919 for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
920 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
922 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
927 printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
928 ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
930 if ((ret & RTE_RING_SZ_MASK) != 2)
933 printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
934 /* Always one free entry left */
935 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
936 cur_src += MAX_BULK - 3;
937 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
940 printf("Test if ring is full \n");
941 if (rte_ring_full(r) != 1)
944 printf("Test enqueue for a full entry \n");
945 ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
946 if ((ret & RTE_RING_SZ_MASK) != 0)
949 printf("Test dequeue without enough objects \n");
950 for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
951 ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
953 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
957 /* Available memory space for the exact MAX_BULK entries */
958 ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
960 if ((ret & RTE_RING_SZ_MASK) != 2)
963 ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
964 cur_dst += MAX_BULK - 3;
965 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
968 printf("Test if ring is empty \n");
969 /* Check if ring is empty */
970 if (1 != rte_ring_empty(r))
974 if (memcmp(src, dst, cur_dst - dst)) {
975 rte_hexdump("src", src, cur_src - src);
976 rte_hexdump("dst", dst, cur_dst - dst);
977 printf("data after dequeue is not the same\n");
984 printf("Test MP & MC basic functions \n");
986 printf("enqueue 1 obj\n");
987 ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
989 if ((ret & RTE_RING_SZ_MASK) != 1)
992 printf("enqueue 2 objs\n");
993 ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
995 if ((ret & RTE_RING_SZ_MASK) != 2)
998 printf("enqueue MAX_BULK objs\n");
999 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
1000 cur_src += MAX_BULK;
1001 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
1004 printf("dequeue 1 obj\n");
1005 ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
1007 if ((ret & RTE_RING_SZ_MASK) != 1)
1010 printf("dequeue 2 objs\n");
1011 ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
1013 if ((ret & RTE_RING_SZ_MASK) != 2)
1016 printf("dequeue MAX_BULK objs\n");
1017 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
1018 cur_dst += MAX_BULK;
1019 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
1023 if (memcmp(src, dst, cur_dst - dst)) {
1024 rte_hexdump("src", src, cur_src - src);
1025 rte_hexdump("dst", dst, cur_dst - dst);
1026 printf("data after dequeue is not the same\n");
1033 printf("fill and empty the ring\n");
1034 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
1035 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
1036 cur_src += MAX_BULK;
1037 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
1039 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
1040 cur_dst += MAX_BULK;
1041 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
1046 if (memcmp(src, dst, cur_dst - dst)) {
1047 rte_hexdump("src", src, cur_src - src);
1048 rte_hexdump("dst", dst, cur_dst - dst);
1049 printf("data after dequeue is not the same\n");
1056 printf("Test enqueue without enough memory space \n");
1057 for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
1058 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
1059 cur_src += MAX_BULK;
1060 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
1064 /* Available memory space for the exact MAX_BULK objects */
1065 ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
1067 if ((ret & RTE_RING_SZ_MASK) != 2)
1070 ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
1071 cur_src += MAX_BULK - 3;
1072 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
1076 printf("Test dequeue without enough objects \n");
1077 for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
1078 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
1079 cur_dst += MAX_BULK;
1080 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
1084 /* Available objects - the exact MAX_BULK */
1085 ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
1087 if ((ret & RTE_RING_SZ_MASK) != 2)
1090 ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
1091 cur_dst += MAX_BULK - 3;
1092 if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
1096 if (memcmp(src, dst, cur_dst - dst)) {
1097 rte_hexdump("src", src, cur_src - src);
1098 rte_hexdump("dst", dst, cur_dst - dst);
1099 printf("data after dequeue is not the same\n");
1106 printf("Covering rte_ring_enqueue_burst functions \n");
1108 ret = rte_ring_enqueue_burst(r, cur_src, 2);
1110 if ((ret & RTE_RING_SZ_MASK) != 2)
1113 ret = rte_ring_dequeue_burst(r, cur_dst, 2);
1118 /* Free memory before test completed */
1134 test_ring_stats(void)
1137 #ifndef RTE_LIBRTE_RING_DEBUG
1138 printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
1141 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
1144 unsigned num_items = 0;
1145 unsigned failed_enqueue_ops = 0;
1146 unsigned failed_enqueue_items = 0;
1147 unsigned failed_dequeue_ops = 0;
1148 unsigned failed_dequeue_items = 0;
1149 unsigned last_enqueue_ops = 0;
1150 unsigned last_enqueue_items = 0;
1151 unsigned last_quota_ops = 0;
1152 unsigned last_quota_items = 0;
1153 unsigned lcore_id = rte_lcore_id();
1154 struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
1156 printf("Test the ring stats.\n");
1158 /* Reset the watermark in case it was set in another test. */
1159 rte_ring_set_water_mark(r, 0);
1161 /* Reset the ring stats. */
1162 memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
1164 /* Allocate some dummy object pointers. */
1165 src = malloc(RING_SIZE*2*sizeof(void *));
1169 for (i = 0; i < RING_SIZE*2 ; i++) {
1170 src[i] = (void *)(unsigned long)i;
1173 /* Allocate some memory for copied objects. */
1174 dst = malloc(RING_SIZE*2*sizeof(void *));
1178 memset(dst, 0, RING_SIZE*2*sizeof(void *));
1180 /* Set the head and tail pointers. */
1184 /* Do Enqueue tests. */
1185 printf("Test the dequeue stats.\n");
1187 /* Fill the ring up to RING_SIZE -1. */
1188 printf("Fill the ring.\n");
1189 for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
1190 rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
1191 cur_src += MAX_BULK;
1194 /* Adjust for final enqueue = MAX_BULK -1. */
1197 printf("Verify that the ring is full.\n");
1198 if (rte_ring_full(r) != 1)
1202 printf("Verify the enqueue success stats.\n");
1203 /* Stats should match above enqueue operations to fill the ring. */
1204 if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
1207 /* Current max objects is RING_SIZE -1. */
1208 if (ring_stats->enq_success_objs != RING_SIZE -1)
1211 /* Shouldn't have any failures yet. */
1212 if (ring_stats->enq_fail_bulk != 0)
1214 if (ring_stats->enq_fail_objs != 0)
1218 printf("Test stats for SP burst enqueue to a full ring.\n");
1220 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1221 if ((ret & RTE_RING_SZ_MASK) != 0)
1224 failed_enqueue_ops += 1;
1225 failed_enqueue_items += num_items;
1227 /* The enqueue should have failed. */
1228 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
1230 if (ring_stats->enq_fail_objs != failed_enqueue_items)
1234 printf("Test stats for SP bulk enqueue to a full ring.\n");
1236 ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
1237 if (ret != -ENOBUFS)
1240 failed_enqueue_ops += 1;
1241 failed_enqueue_items += num_items;
1243 /* The enqueue should have failed. */
1244 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
1246 if (ring_stats->enq_fail_objs != failed_enqueue_items)
1250 printf("Test stats for MP burst enqueue to a full ring.\n");
1252 ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
1253 if ((ret & RTE_RING_SZ_MASK) != 0)
1256 failed_enqueue_ops += 1;
1257 failed_enqueue_items += num_items;
1259 /* The enqueue should have failed. */
1260 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
1262 if (ring_stats->enq_fail_objs != failed_enqueue_items)
1266 printf("Test stats for MP bulk enqueue to a full ring.\n");
1268 ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
1269 if (ret != -ENOBUFS)
1272 failed_enqueue_ops += 1;
1273 failed_enqueue_items += num_items;
1275 /* The enqueue should have failed. */
1276 if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
1278 if (ring_stats->enq_fail_objs != failed_enqueue_items)
1282 /* Do Dequeue tests. */
1283 printf("Test the dequeue stats.\n");
1285 printf("Empty the ring.\n");
1286 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
1287 rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
1288 cur_dst += MAX_BULK;
1291 /* There was only RING_SIZE -1 objects to dequeue. */
1294 printf("Verify ring is empty.\n");
1295 if (1 != rte_ring_empty(r))
1298 printf("Verify the dequeue success stats.\n");
1299 /* Stats should match above dequeue operations. */
1300 if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
1303 /* Objects dequeued is RING_SIZE -1. */
1304 if (ring_stats->deq_success_objs != RING_SIZE -1)
1307 /* Shouldn't have any dequeue failure stats yet. */
1308 if (ring_stats->deq_fail_bulk != 0)
1311 printf("Test stats for SC burst dequeue with an empty ring.\n");
1313 ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
1314 if ((ret & RTE_RING_SZ_MASK) != 0)
1317 failed_dequeue_ops += 1;
1318 failed_dequeue_items += num_items;
1320 /* The dequeue should have failed. */
1321 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
1323 if (ring_stats->deq_fail_objs != failed_dequeue_items)
1327 printf("Test stats for SC bulk dequeue with an empty ring.\n");
1329 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
1333 failed_dequeue_ops += 1;
1334 failed_dequeue_items += num_items;
1336 /* The dequeue should have failed. */
1337 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
1339 if (ring_stats->deq_fail_objs != failed_dequeue_items)
1343 printf("Test stats for MC burst dequeue with an empty ring.\n");
1345 ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
1346 if ((ret & RTE_RING_SZ_MASK) != 0)
1348 failed_dequeue_ops += 1;
1349 failed_dequeue_items += num_items;
1351 /* The dequeue should have failed. */
1352 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
1354 if (ring_stats->deq_fail_objs != failed_dequeue_items)
1358 printf("Test stats for MC bulk dequeue with an empty ring.\n");
1360 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
1364 failed_dequeue_ops += 1;
1365 failed_dequeue_items += num_items;
1367 /* The dequeue should have failed. */
1368 if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
1370 if (ring_stats->deq_fail_objs != failed_dequeue_items)
1374 printf("Test total enqueue/dequeue stats.\n");
1375 /* At this point the enqueue and dequeue stats should be the same. */
1376 if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
1378 if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
1380 if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
1382 if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
1386 /* Watermark Tests. */
1387 printf("Test the watermark/quota stats.\n");
1389 printf("Verify the initial watermark stats.\n");
1390 /* Watermark stats should be 0 since there is no watermark. */
1391 if (ring_stats->enq_quota_bulk != 0)
1393 if (ring_stats->enq_quota_objs != 0)
1396 /* Set a watermark. */
1397 rte_ring_set_water_mark(r, 16);
1399 /* Reset pointers. */
1403 last_enqueue_ops = ring_stats->enq_success_bulk;
1404 last_enqueue_items = ring_stats->enq_success_objs;
1407 printf("Test stats for SP burst enqueue below watermark.\n");
1409 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1410 if ((ret & RTE_RING_SZ_MASK) != num_items)
1413 /* Watermark stats should still be 0. */
1414 if (ring_stats->enq_quota_bulk != 0)
1416 if (ring_stats->enq_quota_objs != 0)
1419 /* Success stats should have increased. */
1420 if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
1422 if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
1425 last_enqueue_ops = ring_stats->enq_success_bulk;
1426 last_enqueue_items = ring_stats->enq_success_objs;
1429 printf("Test stats for SP burst enqueue at watermark.\n");
1431 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1432 if ((ret & RTE_RING_SZ_MASK) != num_items)
1435 /* Watermark stats should have changed. */
1436 if (ring_stats->enq_quota_bulk != 1)
1438 if (ring_stats->enq_quota_objs != num_items)
1441 last_quota_ops = ring_stats->enq_quota_bulk;
1442 last_quota_items = ring_stats->enq_quota_objs;
1445 printf("Test stats for SP burst enqueue above watermark.\n");
1447 ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
1448 if ((ret & RTE_RING_SZ_MASK) != num_items)
1451 /* Watermark stats should have changed. */
1452 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1454 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1457 last_quota_ops = ring_stats->enq_quota_bulk;
1458 last_quota_items = ring_stats->enq_quota_objs;
1461 printf("Test stats for MP burst enqueue above watermark.\n");
1463 ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
1464 if ((ret & RTE_RING_SZ_MASK) != num_items)
1467 /* Watermark stats should have changed. */
1468 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1470 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1473 last_quota_ops = ring_stats->enq_quota_bulk;
1474 last_quota_items = ring_stats->enq_quota_objs;
1477 printf("Test stats for SP bulk enqueue above watermark.\n");
1479 ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
1483 /* Watermark stats should have changed. */
1484 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1486 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1489 last_quota_ops = ring_stats->enq_quota_bulk;
1490 last_quota_items = ring_stats->enq_quota_objs;
1493 printf("Test stats for MP bulk enqueue above watermark.\n");
1495 ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
1499 /* Watermark stats should have changed. */
1500 if (ring_stats->enq_quota_bulk != last_quota_ops +1)
1502 if (ring_stats->enq_quota_objs != last_quota_items + num_items)
1505 printf("Test watermark success stats.\n");
1506 /* Success stats should be same as last non-watermarked enqueue. */
1507 if (ring_stats->enq_success_bulk != last_enqueue_ops)
1509 if (ring_stats->enq_success_objs != last_enqueue_items)
1515 /* Empty the ring. */
1516 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
1517 rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
1518 cur_dst += MAX_BULK;
1521 /* Reset the watermark. */
1522 rte_ring_set_water_mark(r, 0);
1524 /* Reset the ring stats. */
1525 memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
1527 /* Free memory before test completed */
1544 * it will always fail to create ring with a wrong ring size number in this function
1547 test_ring_creation_with_wrong_size(void)
1549 struct rte_ring * rp = NULL;
1551 /* Test if ring size is not power of 2 */
1552 rp = rte_ring_create("test_bad_ring_size", RING_SIZE + 1, SOCKET_ID_ANY, 0);
1557 /* Test if ring size is exceeding the limit */
1558 rp = rte_ring_create("test_bad_ring_size", (RTE_RING_SZ_MASK + 1), SOCKET_ID_ANY, 0);
1566 * it tests if it would always fail to create ring with an used ring name
1569 test_ring_creation_with_an_used_name(void)
1571 struct rte_ring * rp;
1573 rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
1581 * Test to if a non-power of 2 count causes the create
1582 * function to fail correctly
1585 test_create_count_odd(void)
1587 struct rte_ring *r = rte_ring_create("test_ring_count",
1588 4097, SOCKET_ID_ANY, 0 );
1596 test_lookup_null(void)
1598 struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
1600 if (rte_errno != ENOENT){
1601 printf( "test failed to returnn error on null pointer\n");
1608 * it tests some more basic ring operations
1611 test_ring_basic_ex(void)
1615 struct rte_ring * rp;
1618 obj = (void **)rte_zmalloc("test_ring_basic_ex_malloc", (RING_SIZE * sizeof(void *)), 0);
1620 printf("test_ring_basic_ex fail to rte_malloc\n");
1624 rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY,
1625 RING_F_SP_ENQ | RING_F_SC_DEQ);
1627 printf("test_ring_basic_ex fail to create ring\n");
1631 if (rte_ring_lookup("test_ring_basic_ex") != rp) {
1635 if (rte_ring_empty(rp) != 1) {
1636 printf("test_ring_basic_ex ring is not empty but it should be\n");
1640 printf("%u ring entries are now free\n", rte_ring_free_count(rp));
1642 for (i = 0; i < RING_SIZE; i ++) {
1643 rte_ring_enqueue(rp, obj[i]);
1646 if (rte_ring_full(rp) != 1) {
1647 printf("test_ring_basic_ex ring is not full but it should be\n");
1651 for (i = 0; i < RING_SIZE; i ++) {
1652 rte_ring_dequeue(rp, &obj[i]);
1655 if (rte_ring_empty(rp) != 1) {
1656 printf("test_ring_basic_ex ring is not empty but it should be\n");
1660 /* Covering the ring burst operation */
1661 ret = rte_ring_enqueue_burst(rp, obj, 2);
1662 if ((ret & RTE_RING_SZ_MASK) != 2) {
1663 printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
1667 ret = rte_ring_dequeue_burst(rp, obj, 2);
1669 printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
1684 unsigned enq_core_count, deq_core_count;
1686 /* some more basic operations */
1687 if (test_ring_basic_ex() < 0)
1690 rte_atomic32_init(&synchro);
1693 r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
1697 /* retrieve the ring from its name */
1698 if (rte_ring_lookup("test") != r) {
1699 printf("Cannot lookup ring from its name\n");
1703 /* burst operations */
1704 if (test_ring_burst_basic() < 0)
1707 /* basic operations */
1708 if (test_ring_basic() < 0)
1712 if (test_ring_stats() < 0)
1715 /* basic operations */
1716 if (test_live_watermark_change() < 0)
1719 if ( test_set_watermark() < 0){
1720 printf ("Test failed to detect invalid parameter\n");
1724 printf ( "Test detected forced bad watermark values\n");
1726 if ( test_create_count_odd() < 0){
1727 printf ("Test failed to detect odd count\n");
1731 printf ( "Test detected odd count\n");
1733 if ( test_lookup_null() < 0){
1734 printf ("Test failed to detect NULL ring lookup\n");
1738 printf ( "Test detected NULL ring lookup \n");
1740 printf("start performance tests \n");
1742 /* one lcore for enqueue, one for dequeue */
1745 if (do_one_ring_test(enq_core_count, deq_core_count, 1) < 0)
1748 /* max cores for enqueue, one for dequeue */
1749 enq_core_count = rte_lcore_count() - 1;
1751 if (do_one_ring_test(enq_core_count, deq_core_count, 1) < 0)
1754 /* max cores for dequeue, one for enqueue */
1756 deq_core_count = rte_lcore_count() - 1;
1757 if (do_one_ring_test(enq_core_count, deq_core_count, 1) < 0)
1760 /* half for enqueue and half for dequeue */
1761 enq_core_count = rte_lcore_count() / 2;
1762 deq_core_count = rte_lcore_count() / 2;
1763 if (do_one_ring_test(enq_core_count, deq_core_count, 1) < 0)
1766 printf("start performance tests - burst operations \n");
1768 /* one lcore for enqueue, one for dequeue */
1771 if (do_one_ring_test(enq_core_count, deq_core_count, 0) < 0)
1774 /* max cores for enqueue, one for dequeue */
1775 enq_core_count = rte_lcore_count() - 1;
1777 if (do_one_ring_test(enq_core_count, deq_core_count, 0) < 0)
1780 /* max cores for dequeue, one for enqueue */
1782 deq_core_count = rte_lcore_count() - 1;
1783 if (do_one_ring_test(enq_core_count, deq_core_count, 0) < 0)
1786 /* half for enqueue and half for dequeue */
1787 enq_core_count = rte_lcore_count() / 2;
1788 deq_core_count = rte_lcore_count() / 2;
1789 if (do_one_ring_test(enq_core_count, deq_core_count, 0) < 0)
1792 /* test of creating ring with wrong size */
1793 if (test_ring_creation_with_wrong_size() < 0)
1796 /* test of creation ring with an used name */
1797 if (test_ring_creation_with_an_used_name() < 0)
1800 /* dump the ring status */
1801 rte_ring_list_dump();