4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * version: DPDK.L.1.2.3-3
43 #include <sys/queue.h>
45 #include <rte_common.h>
47 #include <rte_memory.h>
48 #include <rte_memzone.h>
49 #include <rte_launch.h>
50 #include <rte_cycles.h>
51 #include <rte_tailq.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_malloc.h>
59 #include <rte_random.h>
60 #include <rte_common.h>
61 #include <rte_errno.h>
63 #include <cmdline_parse.h>
71 * #. Basic tests: done on one core:
73 * - Using single producer/single consumer functions:
75 * - Enqueue one object, two objects, MAX_BULK objects
76 * - Dequeue one object, two objects, MAX_BULK objects
77 * - Check that dequeued pointers are correct
79 * - Using multi producers/multi consumers functions:
81 * - Enqueue one object, two objects, MAX_BULK objects
82 * - Dequeue one object, two objects, MAX_BULK objects
83 * - Check that dequeued pointers are correct
85 * - Test watermark and default bulk enqueue/dequeue:
88 * - Set default bulk value
89 * - Enqueue objects, check that -EDQUOT is returned when
90 * watermark is exceeded
91 * - Check that dequeued pointers are correct
93 * #. Check quota and watermark
95 * - Start a loop on another lcore that will enqueue and dequeue
96 * objects in a ring. It will monitor the value of quota (default
97 * bulk count) and watermark.
98 * - At the same time, change the quota and the watermark on the
100 * - The slave lcore will check that bulk count changes from 4 to
101 * 8, and watermark changes from 16 to 32.
103 * #. Performance tests.
105 * This test is done on the following configurations:
107 * - One core enqueuing, one core dequeuing
108 * - One core enqueuing, other cores dequeuing
109 * - One core dequeuing, other cores enqueuing
110 * - Half of the cores enqueuing, the other half dequeuing
112 * When only one core enqueues/dequeues, the test is done with the
113 * SP/SC functions in addition to the MP/MC functions.
115 * The test is done with different bulk size.
117 * On each core, the test enqueues or dequeues objects during
118 * TIME_S seconds. The number of successes and failures are stored on
119 * each core, then summed and displayed.
121 * The test checks that the number of enqueues is equal to the
122 * number of dequeues.
125 #define RING_SIZE 4096
130 static rte_atomic32_t synchro;
132 static unsigned bulk_enqueue;
133 static unsigned bulk_dequeue;
134 static struct rte_ring *r;
137 unsigned enq_success ;
141 unsigned deq_success;
143 } __rte_cache_aligned;
145 static struct test_stats test_stats[RTE_MAX_LCORE];
147 #define DEFINE_ENQUEUE_FUNCTION(name, enq_code) \
149 name(__attribute__((unused)) void *arg) \
151 unsigned success = 0; \
152 unsigned quota = 0; \
155 unsigned long dummy_obj; \
156 void *obj_table[MAX_BULK]; \
158 unsigned lcore_id = rte_lcore_id(); \
159 uint64_t start_cycles, end_cycles; \
160 uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \
162 /* init dummy object table */ \
163 for (i = 0; i< MAX_BULK; i++) { \
164 dummy_obj = lcore_id + 0x1000 + i; \
165 obj_table[i] = (void *)dummy_obj; \
168 /* wait synchro for slaves */ \
169 if (lcore_id != rte_get_master_lcore()) \
170 while (rte_atomic32_read(&synchro) == 0); \
172 start_cycles = rte_get_hpet_cycles(); \
174 /* enqueue as many object as possible */ \
175 while (time_diff/hz < TIME_S) { \
176 for (i = 0; likely(i < N); i++) { \
180 else if (ret == -EDQUOT) \
185 end_cycles = rte_get_hpet_cycles(); \
186 time_diff = end_cycles - start_cycles; \
189 /* write statistics in a shared structure */ \
190 test_stats[lcore_id].enq_success = success; \
191 test_stats[lcore_id].enq_quota = quota; \
192 test_stats[lcore_id].enq_fail = fail; \
197 #define DEFINE_DEQUEUE_FUNCTION(name, deq_code) \
199 name(__attribute__((unused)) void *arg) \
201 unsigned success = 0; \
204 void *obj_table[MAX_BULK]; \
206 unsigned lcore_id = rte_lcore_id(); \
207 uint64_t start_cycles, end_cycles; \
208 uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \
210 /* wait synchro for slaves */ \
211 if (lcore_id != rte_get_master_lcore()) \
212 while (rte_atomic32_read(&synchro) == 0); \
214 start_cycles = rte_get_hpet_cycles(); \
216 /* dequeue as many object as possible */ \
217 while (time_diff/hz < TIME_S) { \
218 for (i = 0; likely(i < N); i++) { \
225 end_cycles = rte_get_hpet_cycles(); \
226 time_diff = end_cycles - start_cycles; \
229 /* write statistics in a shared structure */ \
230 test_stats[lcore_id].deq_success = success; \
231 test_stats[lcore_id].deq_fail = fail; \
236 DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_sp_enqueue,
237 rte_ring_sp_enqueue_bulk(r, obj_table, bulk_enqueue))
239 DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_sc_dequeue,
240 rte_ring_sc_dequeue_bulk(r, obj_table, bulk_dequeue))
242 DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_mp_enqueue,
243 rte_ring_mp_enqueue_bulk(r, obj_table, bulk_enqueue))
245 DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_mc_dequeue,
246 rte_ring_mc_dequeue_bulk(r, obj_table, bulk_dequeue))
248 #define TEST_RING_VERIFY(exp) \
250 printf("error at %s:%d\tcondition " #exp " failed\n", \
251 __func__, __LINE__); \
256 #define TEST_RING_FULL_EMTPY_ITER 8
260 launch_cores(unsigned enq_core_count, unsigned deq_core_count, int sp, int sc)
264 unsigned rate, deq_remain = 0;
265 unsigned enq_total, deq_total;
266 struct test_stats sum;
267 int (*enq_f)(void *);
268 int (*deq_f)(void *);
269 unsigned cores = enq_core_count + deq_core_count;
272 rte_atomic32_set(&synchro, 0);
274 printf("ring_autotest e/d_core=%u,%u e/d_bulk=%u,%u ",
275 enq_core_count, deq_core_count, bulk_enqueue, bulk_dequeue);
276 printf("sp=%d sc=%d ", sp, sc);
278 /* set enqueue function to be used */
280 enq_f = test_ring_per_core_sp_enqueue;
282 enq_f = test_ring_per_core_mp_enqueue;
284 /* set dequeue function to be used */
286 deq_f = test_ring_per_core_sc_dequeue;
288 deq_f = test_ring_per_core_mc_dequeue;
290 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
291 if (enq_core_count != 0) {
293 rte_eal_remote_launch(enq_f, NULL, lcore_id);
295 if (deq_core_count != 1) {
297 rte_eal_remote_launch(deq_f, NULL, lcore_id);
301 memset(test_stats, 0, sizeof(test_stats));
303 /* start synchro and launch test on master */
304 rte_atomic32_set(&synchro, 1);
308 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
312 if (rte_eal_wait_lcore(lcore_id) < 0)
316 memset(&sum, 0, sizeof(sum));
317 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
318 sum.enq_success += test_stats[lcore_id].enq_success;
319 sum.enq_quota += test_stats[lcore_id].enq_quota;
320 sum.enq_fail += test_stats[lcore_id].enq_fail;
321 sum.deq_success += test_stats[lcore_id].deq_success;
322 sum.deq_fail += test_stats[lcore_id].deq_fail;
326 while (rte_ring_sc_dequeue(r, &obj) == 0)
330 printf("per-lcore test returned -1\n");
334 enq_total = (sum.enq_success * bulk_enqueue) +
335 (sum.enq_quota * bulk_enqueue);
336 deq_total = (sum.deq_success * bulk_dequeue) + deq_remain;
338 rate = deq_total/TIME_S;
340 printf("rate_persec=%u\n", rate);
342 if (enq_total != deq_total) {
343 printf("invalid enq/deq_success counter: %u %u\n",
344 enq_total, deq_total);
352 do_one_ring_test2(unsigned enq_core_count, unsigned deq_core_count,
353 unsigned n_enq_bulk, unsigned n_deq_bulk)
359 bulk_enqueue = n_enq_bulk;
360 bulk_dequeue = n_deq_bulk;
362 do_sp = (enq_core_count == 1) ? 1 : 0;
363 do_sc = (deq_core_count == 1) ? 1 : 0;
365 for (sp = 0; sp <= do_sp; sp ++) {
366 for (sc = 0; sc <= do_sc; sc ++) {
367 ret = launch_cores(enq_core_count,
378 do_one_ring_test(unsigned enq_core_count, unsigned deq_core_count)
380 unsigned bulk_enqueue_tab[] = { 1, 2, 4, 32, 0 };
381 unsigned bulk_dequeue_tab[] = { 1, 2, 4, 32, 0 };
382 unsigned *bulk_enqueue_ptr;
383 unsigned *bulk_dequeue_ptr;
386 for (bulk_enqueue_ptr = bulk_enqueue_tab;
388 bulk_enqueue_ptr++) {
390 for (bulk_dequeue_ptr = bulk_dequeue_tab;
392 bulk_dequeue_ptr++) {
394 ret = do_one_ring_test2(enq_core_count, deq_core_count,
405 check_quota_and_watermark(__attribute__((unused)) void *dummy)
407 uint64_t hz = rte_get_hpet_hz();
408 void *obj_table[MAX_BULK];
409 unsigned watermark, watermark_old = 16;
410 uint64_t cur_time, end_time;
413 unsigned quota, quota_old = 4;
415 /* init the object table */
416 memset(obj_table, 0, sizeof(obj_table));
417 end_time = rte_get_hpet_cycles() + (hz * 2);
419 /* check that bulk and watermark are 4 and 32 (respectively) */
422 /* read quota, the only change allowed is from 4 to 8 */
423 quota = rte_ring_get_bulk_count(r);
424 if (quota != quota_old && (quota_old != 4 || quota != 8)) {
425 printf("Bad quota change %u -> %u\n", quota_old,
431 /* add in ring until we reach watermark */
433 for (i = 0; i < 16; i ++) {
436 ret = rte_ring_enqueue_bulk(r, obj_table, quota);
439 if (ret != -EDQUOT) {
440 printf("Cannot enqueue objects, or watermark not "
441 "reached (ret=%d)\n", ret);
445 /* read watermark, the only change allowed is from 16 to 32 */
446 watermark = i * quota;
447 if (watermark != watermark_old &&
448 (watermark_old != 16 || watermark != 32)) {
449 printf("Bad watermark change %u -> %u\n", watermark_old,
453 watermark_old = watermark;
455 /* dequeue objects from ring */
457 ret = rte_ring_dequeue_bulk(r, obj_table, quota);
459 printf("Cannot dequeue (ret=%d)\n", ret);
464 cur_time = rte_get_hpet_cycles();
465 diff = end_time - cur_time;
468 if (watermark_old != 32 || quota_old != 8) {
469 printf("quota or watermark was not updated (q=%u wm=%u)\n",
470 quota_old, watermark_old);
478 test_quota_and_watermark(void)
480 unsigned lcore_id = rte_lcore_id();
481 unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
483 printf("Test quota and watermark live modification\n");
485 rte_ring_set_bulk_count(r, 4);
486 rte_ring_set_water_mark(r, 16);
488 /* launch a thread that will enqueue and dequeue, checking
489 * watermark and quota */
490 rte_eal_remote_launch(check_quota_and_watermark, NULL, lcore_id2);
493 rte_ring_set_bulk_count(r, 8);
494 rte_ring_set_water_mark(r, 32);
497 if (rte_eal_wait_lcore(lcore_id2) < 0)
502 /* Test for catch on invalid watermark values */
504 test_set_watermark( void ){
508 struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
510 printf( " ring lookup failed\n" );
513 count = r->prod.size*2;
514 setwm = rte_ring_set_water_mark(r, count);
515 if (setwm != -EINVAL){
516 printf("Test failed to detect invalid watermark count value\n");
521 setwm = rte_ring_set_water_mark(r, count);
522 if (r->prod.watermark != r->prod.size) {
523 printf("Test failed to detect invalid watermark count value\n");
533 * helper routine for test_ring_basic
536 test_ring_basic_full_empty(void * const src[], void *dst[])
539 const unsigned rsz = RING_SIZE - 1;
541 printf("Basic full/empty test\n");
543 for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
545 /* random shift in the ring */
546 rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
547 printf("%s: iteration %u, random shift: %u;\n",
549 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
551 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
554 TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
556 TEST_RING_VERIFY(0 == rte_ring_free_count(r));
557 TEST_RING_VERIFY(rsz == rte_ring_count(r));
558 TEST_RING_VERIFY(rte_ring_full(r));
559 TEST_RING_VERIFY(0 == rte_ring_empty(r));
562 TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
563 TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
564 TEST_RING_VERIFY(0 == rte_ring_count(r));
565 TEST_RING_VERIFY(0 == rte_ring_full(r));
566 TEST_RING_VERIFY(rte_ring_empty(r));
569 TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
576 test_ring_basic(void)
578 void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
582 /* alloc dummy object pointers */
583 src = malloc(RING_SIZE*2*sizeof(void *));
587 for (i = 0; i < RING_SIZE*2 ; i++) {
588 src[i] = (void *)(unsigned long)i;
592 /* alloc some room for copied objects */
593 dst = malloc(RING_SIZE*2*sizeof(void *));
597 memset(dst, 0, RING_SIZE*2*sizeof(void *));
600 printf("enqueue 1 obj\n");
601 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
606 printf("enqueue 2 objs\n");
607 ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
612 printf("enqueue MAX_BULK objs\n");
613 ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
618 printf("dequeue 1 obj\n");
619 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
624 printf("dequeue 2 objs\n");
625 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
630 printf("dequeue MAX_BULK objs\n");
631 ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
637 if (memcmp(src, dst, cur_dst - dst)) {
638 test_hexdump("src", src, cur_src - src);
639 test_hexdump("dst", dst, cur_dst - dst);
640 printf("data after dequeue is not the same\n");
646 printf("enqueue 1 obj\n");
647 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
652 printf("enqueue 2 objs\n");
653 ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
658 printf("enqueue MAX_BULK objs\n");
659 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
664 printf("dequeue 1 obj\n");
665 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
670 printf("dequeue 2 objs\n");
671 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
676 printf("dequeue MAX_BULK objs\n");
677 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
683 if (memcmp(src, dst, cur_dst - dst)) {
684 test_hexdump("src", src, cur_src - src);
685 test_hexdump("dst", dst, cur_dst - dst);
686 printf("data after dequeue is not the same\n");
692 printf("fill and empty the ring\n");
693 for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
694 ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
698 ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
705 if (memcmp(src, dst, cur_dst - dst)) {
706 test_hexdump("src", src, cur_src - src);
707 test_hexdump("dst", dst, cur_dst - dst);
708 printf("data after dequeue is not the same\n");
712 if (test_ring_basic_full_empty(src, dst) != 0)
718 printf("test watermark and default bulk enqueue / dequeue\n");
719 rte_ring_set_bulk_count(r, 16);
720 rte_ring_set_water_mark(r, 20);
721 n = rte_ring_get_bulk_count(r);
723 printf("rte_ring_get_bulk_count() returned %u instead "
730 ret = rte_ring_enqueue_bulk(r, cur_src, n);
733 printf("Cannot enqueue\n");
736 ret = rte_ring_enqueue_bulk(r, cur_src, n);
738 if (ret != -EDQUOT) {
739 printf("Watermark not exceeded\n");
742 ret = rte_ring_dequeue_bulk(r, cur_dst, n);
745 printf("Cannot dequeue\n");
748 ret = rte_ring_dequeue_bulk(r, cur_dst, n);
751 printf("Cannot dequeue2\n");
756 if (memcmp(src, dst, cur_dst - dst)) {
757 test_hexdump("src", src, cur_src - src);
758 test_hexdump("dst", dst, cur_dst - dst);
759 printf("data after dequeue is not the same\n");
780 * it will always fail to create ring with a wrong ring size number in this function
783 test_ring_creation_with_wrong_size(void)
785 struct rte_ring * rp = NULL;
787 rp = rte_ring_create("test_bad_ring_size", RING_SIZE+1, SOCKET_ID_ANY, 0);
796 * it tests if it would always fail to create ring with an used ring name
799 test_ring_creation_with_an_used_name(void)
801 struct rte_ring * rp;
803 rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
811 * Test to if a non-power of 2 count causes the create
812 * function to fail correctly
815 test_create_count_odd(void)
817 struct rte_ring *r = rte_ring_create("test_ring_count",
818 4097, SOCKET_ID_ANY, 0 );
826 test_lookup_null(void)
828 struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
830 if (rte_errno != ENOENT){
831 printf( "test failed to returnn error on null pointer\n");
838 * it tests some more basic ring operations
841 test_ring_basic_ex(void)
845 struct rte_ring * rp;
848 obj = (void **)rte_zmalloc("test_ring_basic_ex_malloc", (RING_SIZE * sizeof(void *)), 0);
850 printf("test_ring_basic_ex fail to rte_malloc\n");
854 rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY, 0);
856 printf("test_ring_basic_ex fail to create ring\n");
860 if (rte_ring_lookup("test_ring_basic_ex") != rp) {
864 if (rte_ring_empty(rp) != 1) {
865 printf("test_ring_basic_ex ring is not empty but it should be\n");
869 printf("%u ring entries are now free\n", rte_ring_free_count(rp));
871 for (i = 0; i < RING_SIZE; i ++) {
872 rte_ring_enqueue(rp, obj[i]);
875 if (rte_ring_full(rp) != 1) {
876 printf("test_ring_basic_ex ring is not full but it should be\n");
880 for (i = 0; i < RING_SIZE; i ++) {
881 rte_ring_dequeue(rp, &obj[i]);
884 if (rte_ring_empty(rp) != 1) {
885 printf("test_ring_basic_ex ring is not empty but it should be\n");
900 unsigned enq_core_count, deq_core_count;
902 /* some more basic operations */
903 if (test_ring_basic_ex() < 0)
906 rte_atomic32_init(&synchro);
909 r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
913 /* retrieve the ring from its name */
914 if (rte_ring_lookup("test") != r) {
915 printf("Cannot lookup ring from its name\n");
919 /* basic operations */
920 if (test_ring_basic() < 0)
923 /* basic operations */
924 if (test_quota_and_watermark() < 0)
927 if ( test_set_watermark() < 0){
928 printf ("Test failed to detect invalid parameter\n");
932 printf ( "Test detected forced bad watermark values\n");
934 if ( test_create_count_odd() < 0){
935 printf ("Test failed to detect odd count\n");
939 printf ( "Test detected odd count\n");
941 if ( test_lookup_null() < 0){
942 printf ("Test failed to detect NULL ring lookup\n");
946 printf ( "Test detected NULL ring lookup \n");
949 printf("start performance tests\n");
951 /* one lcore for enqueue, one for dequeue */
954 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
957 /* max cores for enqueue, one for dequeue */
958 enq_core_count = rte_lcore_count() - 1;
960 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
963 /* max cores for dequeue, one for enqueue */
965 deq_core_count = rte_lcore_count() - 1;
966 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
969 /* half for enqueue and half for dequeue */
970 enq_core_count = rte_lcore_count() / 2;
971 deq_core_count = rte_lcore_count() / 2;
972 if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
975 /* test of creating ring with wrong size */
976 if (test_ring_creation_with_wrong_size() < 0)
979 /* test of creation ring with an used name */
980 if (test_ring_creation_with_an_used_name() < 0)
983 /* dump the ring status */
984 rte_ring_list_dump();