4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_launch.h>
44 #include <rte_per_lcore.h>
45 #include <rte_lcore.h>
46 #include <rte_debug.h>
47 #include <rte_ethdev.h>
48 #include <rte_cycles.h>
49 #include <rte_eventdev.h>
50 #include <rte_pause.h>
51 #include <rte_service.h>
52 #include <rte_service_component.h>
58 #define NUM_PACKETS (1<<18)
63 struct rte_mempool *mbuf_pool;
64 uint8_t port[MAX_PORTS];
65 uint8_t qid[MAX_QIDS];
70 static struct rte_event release_ev;
72 static inline struct rte_mbuf *
73 rte_gen_arp(int portid, struct rte_mempool *mp)
77 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
79 static const uint8_t arp_request[] = {
80 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
81 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
82 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
83 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
84 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
85 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
86 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00
90 int pkt_len = sizeof(arp_request) - 1;
92 m = rte_pktmbuf_alloc(mp);
96 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
97 arp_request, pkt_len);
98 rte_pktmbuf_pkt_len(m) = pkt_len;
99 rte_pktmbuf_data_len(m) = pkt_len;
101 RTE_SET_USED(portid);
109 const uint32_t XSTATS_MAX = 1024;
111 uint32_t ids[XSTATS_MAX];
112 uint64_t values[XSTATS_MAX];
113 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
115 for (i = 0; i < XSTATS_MAX; i++)
118 /* Device names / values */
119 int ret = rte_event_dev_xstats_names_get(evdev,
120 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
121 xstats_names, ids, XSTATS_MAX);
123 printf("%d: xstats names get() returned error\n",
127 ret = rte_event_dev_xstats_get(evdev,
128 RTE_EVENT_DEV_XSTATS_DEVICE,
129 0, ids, values, ret);
130 if (ret > (signed int)XSTATS_MAX)
131 printf("%s %d: more xstats available than space\n",
133 for (i = 0; (signed int)i < ret; i++) {
134 printf("%d : %s : %"PRIu64"\n",
135 i, xstats_names[i].name, values[i]);
138 /* Port names / values */
139 ret = rte_event_dev_xstats_names_get(evdev,
140 RTE_EVENT_DEV_XSTATS_PORT, 0,
141 xstats_names, ids, XSTATS_MAX);
142 ret = rte_event_dev_xstats_get(evdev,
143 RTE_EVENT_DEV_XSTATS_PORT, 1,
145 if (ret > (signed int)XSTATS_MAX)
146 printf("%s %d: more xstats available than space\n",
148 for (i = 0; (signed int)i < ret; i++) {
149 printf("%d : %s : %"PRIu64"\n",
150 i, xstats_names[i].name, values[i]);
153 /* Queue names / values */
154 ret = rte_event_dev_xstats_names_get(evdev,
155 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
156 xstats_names, ids, XSTATS_MAX);
157 ret = rte_event_dev_xstats_get(evdev,
158 RTE_EVENT_DEV_XSTATS_QUEUE,
159 1, ids, values, ret);
160 if (ret > (signed int)XSTATS_MAX)
161 printf("%s %d: more xstats available than space\n",
163 for (i = 0; (signed int)i < ret; i++) {
164 printf("%d : %s : %"PRIu64"\n",
165 i, xstats_names[i].name, values[i]);
169 /* initialization and config */
171 init(struct test *t, int nb_queues, int nb_ports)
173 struct rte_event_dev_config config = {
174 .nb_event_queues = nb_queues,
175 .nb_event_ports = nb_ports,
176 .nb_event_queue_flows = 1024,
177 .nb_events_limit = 4096,
178 .nb_event_port_dequeue_depth = 128,
179 .nb_event_port_enqueue_depth = 128,
183 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
185 memset(t, 0, sizeof(*t));
188 ret = rte_event_dev_configure(evdev, &config);
190 printf("%d: Error configuring device\n", __LINE__);
195 create_ports(struct test *t, int num_ports)
198 static const struct rte_event_port_conf conf = {
199 .new_event_threshold = 1024,
203 if (num_ports > MAX_PORTS)
206 for (i = 0; i < num_ports; i++) {
207 if (rte_event_port_setup(evdev, i, &conf) < 0) {
208 printf("Error setting up port %d\n", i);
218 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
223 const struct rte_event_queue_conf conf = {
224 .schedule_type = flags,
225 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
226 .nb_atomic_flows = 1024,
227 .nb_atomic_order_sequences = 1024,
230 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
231 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
232 printf("%d: error creating qid %d\n", __LINE__, i);
237 t->nb_qids += num_qids;
238 if (t->nb_qids > MAX_QIDS)
245 create_atomic_qids(struct test *t, int num_qids)
247 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
251 create_ordered_qids(struct test *t, int num_qids)
253 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
258 create_unordered_qids(struct test *t, int num_qids)
260 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
264 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
269 static const struct rte_event_queue_conf conf = {
270 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
271 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
274 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
275 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
276 printf("%d: error creating qid %d\n", __LINE__, i);
281 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
282 &t->qid[i], NULL, 1) != 1) {
283 printf("%d: error creating link for qid %d\n",
288 t->nb_qids += num_qids;
289 if (t->nb_qids > MAX_QIDS)
297 cleanup(struct test *t __rte_unused)
299 rte_event_dev_stop(evdev);
300 rte_event_dev_close(evdev);
304 struct test_event_dev_stats {
305 uint64_t rx_pkts; /**< Total packets received */
306 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
307 uint64_t tx_pkts; /**< Total packets transmitted */
309 /** Packets received on this port */
310 uint64_t port_rx_pkts[MAX_PORTS];
311 /** Packets dropped on this port */
312 uint64_t port_rx_dropped[MAX_PORTS];
313 /** Packets inflight on this port */
314 uint64_t port_inflight[MAX_PORTS];
315 /** Packets transmitted on this port */
316 uint64_t port_tx_pkts[MAX_PORTS];
317 /** Packets received on this qid */
318 uint64_t qid_rx_pkts[MAX_QIDS];
319 /** Packets dropped on this qid */
320 uint64_t qid_rx_dropped[MAX_QIDS];
321 /** Packets transmitted on this qid */
322 uint64_t qid_tx_pkts[MAX_QIDS];
326 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
329 static uint32_t total_ids[3]; /* rx, tx and drop */
330 static uint32_t port_rx_pkts_ids[MAX_PORTS];
331 static uint32_t port_rx_dropped_ids[MAX_PORTS];
332 static uint32_t port_inflight_ids[MAX_PORTS];
333 static uint32_t port_tx_pkts_ids[MAX_PORTS];
334 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
335 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
336 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
339 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
340 "dev_rx", &total_ids[0]);
341 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
342 "dev_drop", &total_ids[1]);
343 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
344 "dev_tx", &total_ids[2]);
345 for (i = 0; i < MAX_PORTS; i++) {
347 snprintf(name, sizeof(name), "port_%u_rx", i);
348 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
349 dev_id, name, &port_rx_pkts_ids[i]);
350 snprintf(name, sizeof(name), "port_%u_drop", i);
351 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
352 dev_id, name, &port_rx_dropped_ids[i]);
353 snprintf(name, sizeof(name), "port_%u_inflight", i);
354 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
355 dev_id, name, &port_inflight_ids[i]);
356 snprintf(name, sizeof(name), "port_%u_tx", i);
357 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
358 dev_id, name, &port_tx_pkts_ids[i]);
360 for (i = 0; i < MAX_QIDS; i++) {
362 snprintf(name, sizeof(name), "qid_%u_rx", i);
363 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
364 dev_id, name, &qid_rx_pkts_ids[i]);
365 snprintf(name, sizeof(name), "qid_%u_drop", i);
366 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
367 dev_id, name, &qid_rx_dropped_ids[i]);
368 snprintf(name, sizeof(name), "qid_%u_tx", i);
369 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
370 dev_id, name, &qid_tx_pkts_ids[i]);
376 /* run_prio_packet_test
377 * This performs a basic packet priority check on the test instance passed in.
378 * It is factored out of the main priority tests as the same tests must be
379 * performed to ensure prioritization of each type of QID.
382 * - An initialized test structure, including mempool
383 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
384 * - t->qid[0] is the QID to be tested
385 * - if LB QID, the CQ must be mapped to the QID.
388 run_prio_packet_test(struct test *t)
391 const uint32_t MAGIC_SEQN[] = {4711, 1234};
392 const uint32_t PRIORITY[] = {
393 RTE_EVENT_DEV_PRIORITY_NORMAL,
394 RTE_EVENT_DEV_PRIORITY_HIGHEST
397 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
398 /* generate pkt and enqueue */
400 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
402 printf("%d: gen of pkt failed\n", __LINE__);
405 arp->seqn = MAGIC_SEQN[i];
407 ev = (struct rte_event){
408 .priority = PRIORITY[i],
409 .op = RTE_EVENT_OP_NEW,
410 .queue_id = t->qid[0],
413 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
415 printf("%d: error failed to enqueue\n", __LINE__);
420 rte_service_run_iter_on_app_lcore(t->service_id, 1);
422 struct test_event_dev_stats stats;
423 err = test_event_dev_stats_get(evdev, &stats);
425 printf("%d: error failed to get stats\n", __LINE__);
429 if (stats.port_rx_pkts[t->port[0]] != 2) {
430 printf("%d: error stats incorrect for directed port\n",
432 rte_event_dev_dump(evdev, stdout);
436 struct rte_event ev, ev2;
438 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
440 printf("%d: error failed to deq\n", __LINE__);
441 rte_event_dev_dump(evdev, stdout);
444 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
445 printf("%d: first packet out not highest priority\n",
447 rte_event_dev_dump(evdev, stdout);
450 rte_pktmbuf_free(ev.mbuf);
452 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
454 printf("%d: error failed to deq\n", __LINE__);
455 rte_event_dev_dump(evdev, stdout);
458 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
459 printf("%d: second packet out not lower priority\n",
461 rte_event_dev_dump(evdev, stdout);
464 rte_pktmbuf_free(ev2.mbuf);
471 test_single_directed_packet(struct test *t)
473 const int rx_enq = 0;
474 const int wrk_enq = 2;
477 /* Create instance with 3 directed QIDs going to 3 ports */
478 if (init(t, 3, 3) < 0 ||
479 create_ports(t, 3) < 0 ||
480 create_directed_qids(t, 3, t->port) < 0)
483 if (rte_event_dev_start(evdev) < 0) {
484 printf("%d: Error with start call\n", __LINE__);
488 /************** FORWARD ****************/
489 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
490 struct rte_event ev = {
491 .op = RTE_EVENT_OP_NEW,
497 printf("%d: gen of pkt failed\n", __LINE__);
501 const uint32_t MAGIC_SEQN = 4711;
502 arp->seqn = MAGIC_SEQN;
504 /* generate pkt and enqueue */
505 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
507 printf("%d: error failed to enqueue\n", __LINE__);
511 /* Run schedule() as dir packets may need to be re-ordered */
512 rte_service_run_iter_on_app_lcore(t->service_id, 1);
514 struct test_event_dev_stats stats;
515 err = test_event_dev_stats_get(evdev, &stats);
517 printf("%d: error failed to get stats\n", __LINE__);
521 if (stats.port_rx_pkts[rx_enq] != 1) {
522 printf("%d: error stats incorrect for directed port\n",
528 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
530 printf("%d: error failed to deq\n", __LINE__);
534 err = test_event_dev_stats_get(evdev, &stats);
535 if (stats.port_rx_pkts[wrk_enq] != 0 &&
536 stats.port_rx_pkts[wrk_enq] != 1) {
537 printf("%d: error directed stats post-dequeue\n", __LINE__);
541 if (ev.mbuf->seqn != MAGIC_SEQN) {
542 printf("%d: error magic sequence number not dequeued\n",
547 rte_pktmbuf_free(ev.mbuf);
553 test_directed_forward_credits(struct test *t)
558 if (init(t, 1, 1) < 0 ||
559 create_ports(t, 1) < 0 ||
560 create_directed_qids(t, 1, t->port) < 0)
563 if (rte_event_dev_start(evdev) < 0) {
564 printf("%d: Error with start call\n", __LINE__);
568 struct rte_event ev = {
569 .op = RTE_EVENT_OP_NEW,
573 for (i = 0; i < 1000; i++) {
574 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
576 printf("%d: error failed to enqueue\n", __LINE__);
579 rte_service_run_iter_on_app_lcore(t->service_id, 1);
582 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
584 printf("%d: error failed to deq\n", __LINE__);
588 /* re-write event to be a forward, and continue looping it */
589 ev.op = RTE_EVENT_OP_FORWARD;
598 test_priority_directed(struct test *t)
600 if (init(t, 1, 1) < 0 ||
601 create_ports(t, 1) < 0 ||
602 create_directed_qids(t, 1, t->port) < 0) {
603 printf("%d: Error initializing device\n", __LINE__);
607 if (rte_event_dev_start(evdev) < 0) {
608 printf("%d: Error with start call\n", __LINE__);
612 return run_prio_packet_test(t);
616 test_priority_atomic(struct test *t)
618 if (init(t, 1, 1) < 0 ||
619 create_ports(t, 1) < 0 ||
620 create_atomic_qids(t, 1) < 0) {
621 printf("%d: Error initializing device\n", __LINE__);
626 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
627 printf("%d: error mapping qid to port\n", __LINE__);
630 if (rte_event_dev_start(evdev) < 0) {
631 printf("%d: Error with start call\n", __LINE__);
635 return run_prio_packet_test(t);
639 test_priority_ordered(struct test *t)
641 if (init(t, 1, 1) < 0 ||
642 create_ports(t, 1) < 0 ||
643 create_ordered_qids(t, 1) < 0) {
644 printf("%d: Error initializing device\n", __LINE__);
649 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
650 printf("%d: error mapping qid to port\n", __LINE__);
653 if (rte_event_dev_start(evdev) < 0) {
654 printf("%d: Error with start call\n", __LINE__);
658 return run_prio_packet_test(t);
662 test_priority_unordered(struct test *t)
664 if (init(t, 1, 1) < 0 ||
665 create_ports(t, 1) < 0 ||
666 create_unordered_qids(t, 1) < 0) {
667 printf("%d: Error initializing device\n", __LINE__);
672 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
673 printf("%d: error mapping qid to port\n", __LINE__);
676 if (rte_event_dev_start(evdev) < 0) {
677 printf("%d: Error with start call\n", __LINE__);
681 return run_prio_packet_test(t);
685 burst_packets(struct test *t)
687 /************** CONFIG ****************/
692 /* Create instance with 2 ports and 2 queues */
693 if (init(t, 2, 2) < 0 ||
694 create_ports(t, 2) < 0 ||
695 create_atomic_qids(t, 2) < 0) {
696 printf("%d: Error initializing device\n", __LINE__);
700 /* CQ mapping to QID */
701 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
703 printf("%d: error mapping lb qid0\n", __LINE__);
706 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
708 printf("%d: error mapping lb qid1\n", __LINE__);
712 if (rte_event_dev_start(evdev) < 0) {
713 printf("%d: Error with start call\n", __LINE__);
717 /************** FORWARD ****************/
718 const uint32_t rx_port = 0;
719 const uint32_t NUM_PKTS = 2;
721 for (i = 0; i < NUM_PKTS; i++) {
722 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
724 printf("%d: error generating pkt\n", __LINE__);
728 struct rte_event ev = {
729 .op = RTE_EVENT_OP_NEW,
734 /* generate pkt and enqueue */
735 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
737 printf("%d: Failed to enqueue\n", __LINE__);
741 rte_service_run_iter_on_app_lcore(t->service_id, 1);
743 /* Check stats for all NUM_PKTS arrived to sched core */
744 struct test_event_dev_stats stats;
746 err = test_event_dev_stats_get(evdev, &stats);
748 printf("%d: failed to get stats\n", __LINE__);
751 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
752 printf("%d: Sched core didn't receive all %d pkts\n",
754 rte_event_dev_dump(evdev, stdout);
762 /******** DEQ QID 1 *******/
765 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
767 rte_pktmbuf_free(ev.mbuf);
770 if (deq_pkts != NUM_PKTS/2) {
771 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
776 /******** DEQ QID 2 *******/
780 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
782 rte_pktmbuf_free(ev.mbuf);
784 if (deq_pkts != NUM_PKTS/2) {
785 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
795 abuse_inflights(struct test *t)
797 const int rx_enq = 0;
798 const int wrk_enq = 2;
801 /* Create instance with 4 ports */
802 if (init(t, 1, 4) < 0 ||
803 create_ports(t, 4) < 0 ||
804 create_atomic_qids(t, 1) < 0) {
805 printf("%d: Error initializing device\n", __LINE__);
809 /* CQ mapping to QID */
810 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
812 printf("%d: error mapping lb qid\n", __LINE__);
817 if (rte_event_dev_start(evdev) < 0) {
818 printf("%d: Error with start call\n", __LINE__);
822 /* Enqueue op only */
823 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
825 printf("%d: Failed to enqueue\n", __LINE__);
830 rte_service_run_iter_on_app_lcore(t->service_id, 1);
832 struct test_event_dev_stats stats;
834 err = test_event_dev_stats_get(evdev, &stats);
836 printf("%d: failed to get stats\n", __LINE__);
840 if (stats.rx_pkts != 0 ||
841 stats.tx_pkts != 0 ||
842 stats.port_inflight[wrk_enq] != 0) {
843 printf("%d: Sched core didn't handle pkt as expected\n",
853 xstats_tests(struct test *t)
855 const int wrk_enq = 2;
858 /* Create instance with 4 ports */
859 if (init(t, 1, 4) < 0 ||
860 create_ports(t, 4) < 0 ||
861 create_atomic_qids(t, 1) < 0) {
862 printf("%d: Error initializing device\n", __LINE__);
866 /* CQ mapping to QID */
867 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
869 printf("%d: error mapping lb qid\n", __LINE__);
874 if (rte_event_dev_start(evdev) < 0) {
875 printf("%d: Error with start call\n", __LINE__);
879 const uint32_t XSTATS_MAX = 1024;
882 uint32_t ids[XSTATS_MAX];
883 uint64_t values[XSTATS_MAX];
884 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
886 for (i = 0; i < XSTATS_MAX; i++)
889 /* Device names / values */
890 int ret = rte_event_dev_xstats_names_get(evdev,
891 RTE_EVENT_DEV_XSTATS_DEVICE,
892 0, xstats_names, ids, XSTATS_MAX);
894 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
897 ret = rte_event_dev_xstats_get(evdev,
898 RTE_EVENT_DEV_XSTATS_DEVICE,
899 0, ids, values, ret);
901 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
905 /* Port names / values */
906 ret = rte_event_dev_xstats_names_get(evdev,
907 RTE_EVENT_DEV_XSTATS_PORT, 0,
908 xstats_names, ids, XSTATS_MAX);
910 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
913 ret = rte_event_dev_xstats_get(evdev,
914 RTE_EVENT_DEV_XSTATS_PORT, 0,
917 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
921 /* Queue names / values */
922 ret = rte_event_dev_xstats_names_get(evdev,
923 RTE_EVENT_DEV_XSTATS_QUEUE,
924 0, xstats_names, ids, XSTATS_MAX);
926 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
930 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
931 ret = rte_event_dev_xstats_get(evdev,
932 RTE_EVENT_DEV_XSTATS_QUEUE,
933 1, ids, values, ret);
934 if (ret != -EINVAL) {
935 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
939 ret = rte_event_dev_xstats_get(evdev,
940 RTE_EVENT_DEV_XSTATS_QUEUE,
941 0, ids, values, ret);
943 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
947 /* enqueue packets to check values */
948 for (i = 0; i < 3; i++) {
950 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
952 printf("%d: gen of pkt failed\n", __LINE__);
955 ev.queue_id = t->qid[i];
956 ev.op = RTE_EVENT_OP_NEW;
961 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
963 printf("%d: Failed to enqueue\n", __LINE__);
968 rte_service_run_iter_on_app_lcore(t->service_id, 1);
970 /* Device names / values */
971 int num_stats = rte_event_dev_xstats_names_get(evdev,
972 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
973 xstats_names, ids, XSTATS_MAX);
976 ret = rte_event_dev_xstats_get(evdev,
977 RTE_EVENT_DEV_XSTATS_DEVICE,
978 0, ids, values, num_stats);
979 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
980 for (i = 0; (signed int)i < ret; i++) {
981 if (expected[i] != values[i]) {
983 "%d Error xstat %d (id %d) %s : %"PRIu64
984 ", expect %"PRIu64"\n",
985 __LINE__, i, ids[i], xstats_names[i].name,
986 values[i], expected[i]);
991 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
994 /* ensure reset statistics are zero-ed */
995 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
996 ret = rte_event_dev_xstats_get(evdev,
997 RTE_EVENT_DEV_XSTATS_DEVICE,
998 0, ids, values, num_stats);
999 for (i = 0; (signed int)i < ret; i++) {
1000 if (expected_zero[i] != values[i]) {
1002 "%d Error, xstat %d (id %d) %s : %"PRIu64
1003 ", expect %"PRIu64"\n",
1004 __LINE__, i, ids[i], xstats_names[i].name,
1005 values[i], expected_zero[i]);
1010 /* port reset checks */
1011 num_stats = rte_event_dev_xstats_names_get(evdev,
1012 RTE_EVENT_DEV_XSTATS_PORT, 0,
1013 xstats_names, ids, XSTATS_MAX);
1016 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1017 0, ids, values, num_stats);
1019 static const uint64_t port_expected[] = {
1024 0 /* avg pkt cycles */,
1026 0 /* rx ring used */,
1027 4096 /* rx ring free */,
1028 0 /* cq ring used */,
1029 32 /* cq ring free */,
1030 0 /* dequeue calls */,
1031 /* 10 dequeue burst buckets */
1035 if (ret != RTE_DIM(port_expected)) {
1037 "%s %d: wrong number of port stats (%d), expected %zu\n",
1038 __func__, __LINE__, ret, RTE_DIM(port_expected));
1041 for (i = 0; (signed int)i < ret; i++) {
1042 if (port_expected[i] != values[i]) {
1044 "%s : %d: Error stat %s is %"PRIu64
1045 ", expected %"PRIu64"\n",
1046 __func__, __LINE__, xstats_names[i].name,
1047 values[i], port_expected[i]);
1052 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1055 /* ensure reset statistics are zero-ed */
1056 static const uint64_t port_expected_zero[] = {
1061 0 /* avg pkt cycles */,
1063 0 /* rx ring used */,
1064 4096 /* rx ring free */,
1065 0 /* cq ring used */,
1066 32 /* cq ring free */,
1067 0 /* dequeue calls */,
1068 /* 10 dequeue burst buckets */
1072 ret = rte_event_dev_xstats_get(evdev,
1073 RTE_EVENT_DEV_XSTATS_PORT,
1074 0, ids, values, num_stats);
1075 for (i = 0; (signed int)i < ret; i++) {
1076 if (port_expected_zero[i] != values[i]) {
1078 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1079 ", expect %"PRIu64"\n",
1080 __LINE__, i, ids[i], xstats_names[i].name,
1081 values[i], port_expected_zero[i]);
1086 /* QUEUE STATS TESTS */
1087 num_stats = rte_event_dev_xstats_names_get(evdev,
1088 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1089 xstats_names, ids, XSTATS_MAX);
1090 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1091 0, ids, values, num_stats);
1093 printf("xstats get returned %d\n", ret);
1096 if ((unsigned int)ret > XSTATS_MAX)
1097 printf("%s %d: more xstats available than space\n",
1098 __func__, __LINE__);
1100 static const uint64_t queue_expected[] = {
1106 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1107 /* QID-to-Port: pinned_flows, packets */
1113 for (i = 0; (signed int)i < ret; i++) {
1114 if (queue_expected[i] != values[i]) {
1116 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1117 ", expect %"PRIu64"\n",
1118 __LINE__, i, ids[i], xstats_names[i].name,
1119 values[i], queue_expected[i]);
1124 /* Reset the queue stats here */
1125 ret = rte_event_dev_xstats_reset(evdev,
1126 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1130 /* Verify that the resetable stats are reset, and others are not */
1131 static const uint64_t queue_expected_zero[] = {
1137 0, 0, 0, 0, /* 4 iq used */
1138 /* QID-to-Port: pinned_flows, packets */
1145 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1146 ids, values, num_stats);
1148 for (i = 0; (signed int)i < ret; i++) {
1149 if (queue_expected_zero[i] != values[i]) {
1151 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1152 ", expect %"PRIu64"\n",
1153 __LINE__, i, ids[i], xstats_names[i].name,
1154 values[i], queue_expected_zero[i]);
1159 printf("%d : %d of values were not as expected above\n",
1168 rte_event_dev_dump(0, stdout);
1175 xstats_id_abuse_tests(struct test *t)
1178 const uint32_t XSTATS_MAX = 1024;
1179 const uint32_t link_port = 2;
1181 uint32_t ids[XSTATS_MAX];
1182 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1184 /* Create instance with 4 ports */
1185 if (init(t, 1, 4) < 0 ||
1186 create_ports(t, 4) < 0 ||
1187 create_atomic_qids(t, 1) < 0) {
1188 printf("%d: Error initializing device\n", __LINE__);
1192 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1194 printf("%d: error mapping lb qid\n", __LINE__);
1198 if (rte_event_dev_start(evdev) < 0) {
1199 printf("%d: Error with start call\n", __LINE__);
1203 /* no test for device, as it ignores the port/q number */
1204 int num_stats = rte_event_dev_xstats_names_get(evdev,
1205 RTE_EVENT_DEV_XSTATS_PORT,
1206 UINT8_MAX-1, xstats_names, ids,
1208 if (num_stats != 0) {
1209 printf("%d: expected %d stats, got return %d\n", __LINE__,
1214 num_stats = rte_event_dev_xstats_names_get(evdev,
1215 RTE_EVENT_DEV_XSTATS_QUEUE,
1216 UINT8_MAX-1, xstats_names, ids,
1218 if (num_stats != 0) {
1219 printf("%d: expected %d stats, got return %d\n", __LINE__,
1232 port_reconfig_credits(struct test *t)
1234 if (init(t, 1, 1) < 0) {
1235 printf("%d: Error initializing device\n", __LINE__);
1240 const uint32_t NUM_ITERS = 32;
1241 for (i = 0; i < NUM_ITERS; i++) {
1242 const struct rte_event_queue_conf conf = {
1243 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1244 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1245 .nb_atomic_flows = 1024,
1246 .nb_atomic_order_sequences = 1024,
1248 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1249 printf("%d: error creating qid\n", __LINE__);
1254 static const struct rte_event_port_conf port_conf = {
1255 .new_event_threshold = 128,
1256 .dequeue_depth = 32,
1257 .enqueue_depth = 64,
1259 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1260 printf("%d Error setting up port\n", __LINE__);
1264 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1266 printf("%d: error mapping lb qid\n", __LINE__);
1270 if (rte_event_dev_start(evdev) < 0) {
1271 printf("%d: Error with start call\n", __LINE__);
1275 const uint32_t NPKTS = 1;
1277 for (j = 0; j < NPKTS; j++) {
1278 struct rte_event ev;
1279 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1281 printf("%d: gen of pkt failed\n", __LINE__);
1284 ev.queue_id = t->qid[0];
1285 ev.op = RTE_EVENT_OP_NEW;
1287 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1289 printf("%d: Failed to enqueue\n", __LINE__);
1290 rte_event_dev_dump(0, stdout);
1295 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1297 struct rte_event ev[NPKTS];
1298 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1301 printf("%d error; no packet dequeued\n", __LINE__);
1303 /* let cleanup below stop the device on last iter */
1304 if (i != NUM_ITERS-1)
1305 rte_event_dev_stop(evdev);
1316 port_single_lb_reconfig(struct test *t)
1318 if (init(t, 2, 2) < 0) {
1319 printf("%d: Error initializing device\n", __LINE__);
1323 static const struct rte_event_queue_conf conf_lb_atomic = {
1324 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1325 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1326 .nb_atomic_flows = 1024,
1327 .nb_atomic_order_sequences = 1024,
1329 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1330 printf("%d: error creating qid\n", __LINE__);
1334 static const struct rte_event_queue_conf conf_single_link = {
1335 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1336 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1338 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1339 printf("%d: error creating qid\n", __LINE__);
1343 struct rte_event_port_conf port_conf = {
1344 .new_event_threshold = 128,
1345 .dequeue_depth = 32,
1346 .enqueue_depth = 64,
1348 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1349 printf("%d Error setting up port\n", __LINE__);
1352 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1353 printf("%d Error setting up port\n", __LINE__);
1357 /* link port to lb queue */
1358 uint8_t queue_id = 0;
1359 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1360 printf("%d: error creating link for qid\n", __LINE__);
1364 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1366 printf("%d: Error unlinking lb port\n", __LINE__);
1371 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1372 printf("%d: error creating link for qid\n", __LINE__);
1377 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1379 printf("%d: error mapping lb qid\n", __LINE__);
1383 if (rte_event_dev_start(evdev) < 0) {
1384 printf("%d: Error with start call\n", __LINE__);
1396 xstats_brute_force(struct test *t)
1399 const uint32_t XSTATS_MAX = 1024;
1400 uint32_t ids[XSTATS_MAX];
1401 uint64_t values[XSTATS_MAX];
1402 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1405 /* Create instance with 4 ports */
1406 if (init(t, 1, 4) < 0 ||
1407 create_ports(t, 4) < 0 ||
1408 create_atomic_qids(t, 1) < 0) {
1409 printf("%d: Error initializing device\n", __LINE__);
1413 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1415 printf("%d: error mapping lb qid\n", __LINE__);
1419 if (rte_event_dev_start(evdev) < 0) {
1420 printf("%d: Error with start call\n", __LINE__);
1424 for (i = 0; i < XSTATS_MAX; i++)
1427 for (i = 0; i < 3; i++) {
1428 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1430 for (j = 0; j < UINT8_MAX; j++) {
1431 rte_event_dev_xstats_names_get(evdev, mode,
1432 j, xstats_names, ids, XSTATS_MAX);
1434 rte_event_dev_xstats_get(evdev, mode, j, ids,
1435 values, XSTATS_MAX);
1447 xstats_id_reset_tests(struct test *t)
1449 const int wrk_enq = 2;
1452 /* Create instance with 4 ports */
1453 if (init(t, 1, 4) < 0 ||
1454 create_ports(t, 4) < 0 ||
1455 create_atomic_qids(t, 1) < 0) {
1456 printf("%d: Error initializing device\n", __LINE__);
1460 /* CQ mapping to QID */
1461 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1463 printf("%d: error mapping lb qid\n", __LINE__);
1467 if (rte_event_dev_start(evdev) < 0) {
1468 printf("%d: Error with start call\n", __LINE__);
1472 #define XSTATS_MAX 1024
1475 uint32_t ids[XSTATS_MAX];
1476 uint64_t values[XSTATS_MAX];
1477 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1479 for (i = 0; i < XSTATS_MAX; i++)
1482 #define NUM_DEV_STATS 6
1483 /* Device names / values */
1484 int num_stats = rte_event_dev_xstats_names_get(evdev,
1485 RTE_EVENT_DEV_XSTATS_DEVICE,
1486 0, xstats_names, ids, XSTATS_MAX);
1487 if (num_stats != NUM_DEV_STATS) {
1488 printf("%d: expected %d stats, got return %d\n", __LINE__,
1489 NUM_DEV_STATS, num_stats);
1492 ret = rte_event_dev_xstats_get(evdev,
1493 RTE_EVENT_DEV_XSTATS_DEVICE,
1494 0, ids, values, num_stats);
1495 if (ret != NUM_DEV_STATS) {
1496 printf("%d: expected %d stats, got return %d\n", __LINE__,
1497 NUM_DEV_STATS, ret);
1502 for (i = 0; i < NPKTS; i++) {
1503 struct rte_event ev;
1504 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1506 printf("%d: gen of pkt failed\n", __LINE__);
1509 ev.queue_id = t->qid[i];
1510 ev.op = RTE_EVENT_OP_NEW;
1514 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1516 printf("%d: Failed to enqueue\n", __LINE__);
1521 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1523 static const char * const dev_names[] = {
1524 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1525 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1527 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1528 for (i = 0; (int)i < ret; i++) {
1530 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1534 printf("%d: %s id incorrect, expected %d got %d\n",
1535 __LINE__, dev_names[i], i, id);
1538 if (val != dev_expected[i]) {
1539 printf("%d: %s value incorrect, expected %"
1540 PRIu64" got %d\n", __LINE__, dev_names[i],
1541 dev_expected[i], id);
1545 int reset_ret = rte_event_dev_xstats_reset(evdev,
1546 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1550 printf("%d: failed to reset successfully\n", __LINE__);
1553 dev_expected[i] = 0;
1554 /* check value again */
1555 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1556 if (val != dev_expected[i]) {
1557 printf("%d: %s value incorrect, expected %"PRIu64
1558 " got %"PRIu64"\n", __LINE__, dev_names[i],
1559 dev_expected[i], val);
1564 /* 48 is stat offset from start of the devices whole xstats.
1565 * This WILL break every time we add a statistic to a port
1566 * or the device, but there is no other way to test
1569 /* num stats for the tested port. CQ size adds more stats to a port */
1570 #define NUM_PORT_STATS 21
1571 /* the port to test. */
1573 num_stats = rte_event_dev_xstats_names_get(evdev,
1574 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1575 xstats_names, ids, XSTATS_MAX);
1576 if (num_stats != NUM_PORT_STATS) {
1577 printf("%d: expected %d stats, got return %d\n",
1578 __LINE__, NUM_PORT_STATS, num_stats);
1581 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1582 ids, values, num_stats);
1584 if (ret != NUM_PORT_STATS) {
1585 printf("%d: expected %d stats, got return %d\n",
1586 __LINE__, NUM_PORT_STATS, ret);
1589 static const char * const port_names[] = {
1594 "port_2_avg_pkt_cycles",
1596 "port_2_rx_ring_used",
1597 "port_2_rx_ring_free",
1598 "port_2_cq_ring_used",
1599 "port_2_cq_ring_free",
1600 "port_2_dequeue_calls",
1601 "port_2_dequeues_returning_0",
1602 "port_2_dequeues_returning_1-4",
1603 "port_2_dequeues_returning_5-8",
1604 "port_2_dequeues_returning_9-12",
1605 "port_2_dequeues_returning_13-16",
1606 "port_2_dequeues_returning_17-20",
1607 "port_2_dequeues_returning_21-24",
1608 "port_2_dequeues_returning_25-28",
1609 "port_2_dequeues_returning_29-32",
1610 "port_2_dequeues_returning_33-36",
1612 uint64_t port_expected[] = {
1616 NPKTS, /* inflight */
1617 0, /* avg pkt cycles */
1619 0, /* rx ring used */
1620 4096, /* rx ring free */
1621 NPKTS, /* cq ring used */
1622 25, /* cq ring free */
1623 0, /* dequeue zero calls */
1624 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1627 uint64_t port_expected_zero[] = {
1631 NPKTS, /* inflight */
1632 0, /* avg pkt cycles */
1634 0, /* rx ring used */
1635 4096, /* rx ring free */
1636 NPKTS, /* cq ring used */
1637 25, /* cq ring free */
1638 0, /* dequeue zero calls */
1639 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1642 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1643 RTE_DIM(port_names) != NUM_PORT_STATS) {
1644 printf("%d: port array of wrong size\n", __LINE__);
1649 for (i = 0; (int)i < ret; i++) {
1651 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1654 if (id != i + PORT_OFF) {
1655 printf("%d: %s id incorrect, expected %d got %d\n",
1656 __LINE__, port_names[i], i+PORT_OFF,
1660 if (val != port_expected[i]) {
1661 printf("%d: %s value incorrect, expected %"PRIu64
1662 " got %d\n", __LINE__, port_names[i],
1663 port_expected[i], id);
1667 int reset_ret = rte_event_dev_xstats_reset(evdev,
1668 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1672 printf("%d: failed to reset successfully\n", __LINE__);
1675 /* check value again */
1676 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1677 if (val != port_expected_zero[i]) {
1678 printf("%d: %s value incorrect, expected %"PRIu64
1679 " got %"PRIu64"\n", __LINE__, port_names[i],
1680 port_expected_zero[i], val);
1687 /* num queue stats */
1688 #define NUM_Q_STATS 17
1689 /* queue offset from start of the devices whole xstats.
1690 * This will break every time we add a statistic to a device/port/queue
1692 #define QUEUE_OFF 90
1693 const uint32_t queue = 0;
1694 num_stats = rte_event_dev_xstats_names_get(evdev,
1695 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1696 xstats_names, ids, XSTATS_MAX);
1697 if (num_stats != NUM_Q_STATS) {
1698 printf("%d: expected %d stats, got return %d\n",
1699 __LINE__, NUM_Q_STATS, num_stats);
1702 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1703 queue, ids, values, num_stats);
1704 if (ret != NUM_Q_STATS) {
1705 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1708 static const char * const queue_names[] = {
1718 "qid_0_port_0_pinned_flows",
1719 "qid_0_port_0_packets",
1720 "qid_0_port_1_pinned_flows",
1721 "qid_0_port_1_packets",
1722 "qid_0_port_2_pinned_flows",
1723 "qid_0_port_2_packets",
1724 "qid_0_port_3_pinned_flows",
1725 "qid_0_port_3_packets",
1727 uint64_t queue_expected[] = {
1737 /* QID-to-Port: pinned_flows, packets */
1743 uint64_t queue_expected_zero[] = {
1753 /* QID-to-Port: pinned_flows, packets */
1759 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1760 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1761 RTE_DIM(queue_names) != NUM_Q_STATS) {
1762 printf("%d : queue array of wrong size\n", __LINE__);
1767 for (i = 0; (int)i < ret; i++) {
1769 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1772 if (id != i + QUEUE_OFF) {
1773 printf("%d: %s id incorrect, expected %d got %d\n",
1774 __LINE__, queue_names[i], i+QUEUE_OFF,
1778 if (val != queue_expected[i]) {
1779 printf("%d: %d: %s value , expected %"PRIu64
1780 " got %"PRIu64"\n", i, __LINE__,
1781 queue_names[i], queue_expected[i], val);
1785 int reset_ret = rte_event_dev_xstats_reset(evdev,
1786 RTE_EVENT_DEV_XSTATS_QUEUE,
1789 printf("%d: failed to reset successfully\n", __LINE__);
1792 /* check value again */
1793 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1795 if (val != queue_expected_zero[i]) {
1796 printf("%d: %s value incorrect, expected %"PRIu64
1797 " got %"PRIu64"\n", __LINE__, queue_names[i],
1798 queue_expected_zero[i], val);
1814 ordered_reconfigure(struct test *t)
1816 if (init(t, 1, 1) < 0 ||
1817 create_ports(t, 1) < 0) {
1818 printf("%d: Error initializing device\n", __LINE__);
1822 const struct rte_event_queue_conf conf = {
1823 .schedule_type = RTE_SCHED_TYPE_ORDERED,
1824 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1825 .nb_atomic_flows = 1024,
1826 .nb_atomic_order_sequences = 1024,
1829 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1830 printf("%d: error creating qid\n", __LINE__);
1834 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1835 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1839 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1840 if (rte_event_dev_start(evdev) < 0) {
1841 printf("%d: Error with start call\n", __LINE__);
1853 qid_priorities(struct test *t)
1855 /* Test works by having a CQ with enough empty space for all packets,
1856 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1857 * priority of the QID, not the ingress order, to pass the test
1860 /* Create instance with 1 ports, and 3 qids */
1861 if (init(t, 3, 1) < 0 ||
1862 create_ports(t, 1) < 0) {
1863 printf("%d: Error initializing device\n", __LINE__);
1867 for (i = 0; i < 3; i++) {
1869 const struct rte_event_queue_conf conf = {
1870 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1871 /* increase priority (0 == highest), as we go */
1872 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1873 .nb_atomic_flows = 1024,
1874 .nb_atomic_order_sequences = 1024,
1877 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1878 printf("%d: error creating qid %d\n", __LINE__, i);
1884 /* map all QIDs to port */
1885 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1887 if (rte_event_dev_start(evdev) < 0) {
1888 printf("%d: Error with start call\n", __LINE__);
1892 /* enqueue 3 packets, setting seqn and QID to check priority */
1893 for (i = 0; i < 3; i++) {
1894 struct rte_event ev;
1895 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1897 printf("%d: gen of pkt failed\n", __LINE__);
1900 ev.queue_id = t->qid[i];
1901 ev.op = RTE_EVENT_OP_NEW;
1905 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1907 printf("%d: Failed to enqueue\n", __LINE__);
1912 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1914 /* dequeue packets, verify priority was upheld */
1915 struct rte_event ev[32];
1917 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1918 if (deq_pkts != 3) {
1919 printf("%d: failed to deq packets\n", __LINE__);
1920 rte_event_dev_dump(evdev, stdout);
1923 for (i = 0; i < 3; i++) {
1924 if (ev[i].mbuf->seqn != 2-i) {
1926 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1936 load_balancing(struct test *t)
1938 const int rx_enq = 0;
1942 if (init(t, 1, 4) < 0 ||
1943 create_ports(t, 4) < 0 ||
1944 create_atomic_qids(t, 1) < 0) {
1945 printf("%d: Error initializing device\n", __LINE__);
1949 for (i = 0; i < 3; i++) {
1950 /* map port 1 - 3 inclusive */
1951 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1953 printf("%d: error mapping qid to port %d\n",
1959 if (rte_event_dev_start(evdev) < 0) {
1960 printf("%d: Error with start call\n", __LINE__);
1964 /************** FORWARD ****************/
1966 * Create a set of flows that test the load-balancing operation of the
1967 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1968 * with a new flow, which should be sent to the 3rd mapped CQ
1970 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1972 for (i = 0; i < RTE_DIM(flows); i++) {
1973 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1975 printf("%d: gen of pkt failed\n", __LINE__);
1979 struct rte_event ev = {
1980 .op = RTE_EVENT_OP_NEW,
1981 .queue_id = t->qid[0],
1982 .flow_id = flows[i],
1985 /* generate pkt and enqueue */
1986 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1988 printf("%d: Failed to enqueue\n", __LINE__);
1993 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1995 struct test_event_dev_stats stats;
1996 err = test_event_dev_stats_get(evdev, &stats);
1998 printf("%d: failed to get stats\n", __LINE__);
2002 if (stats.port_inflight[1] != 4) {
2003 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2007 if (stats.port_inflight[2] != 2) {
2008 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2012 if (stats.port_inflight[3] != 3) {
2013 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2023 load_balancing_history(struct test *t)
2025 struct test_event_dev_stats stats = {0};
2026 const int rx_enq = 0;
2030 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2031 if (init(t, 1, 4) < 0 ||
2032 create_ports(t, 4) < 0 ||
2033 create_atomic_qids(t, 1) < 0)
2036 /* CQ mapping to QID */
2037 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2038 printf("%d: error mapping port 1 qid\n", __LINE__);
2041 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2042 printf("%d: error mapping port 2 qid\n", __LINE__);
2045 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2046 printf("%d: error mapping port 3 qid\n", __LINE__);
2049 if (rte_event_dev_start(evdev) < 0) {
2050 printf("%d: Error with start call\n", __LINE__);
2055 * Create a set of flows that test the load-balancing operation of the
2056 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2057 * the packet from CQ 0, send in a new set of flows. Ensure that:
2058 * 1. The new flow 3 gets into the empty CQ0
2059 * 2. packets for existing flow gets added into CQ1
2060 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2061 * more outstanding pkts
2063 * This test makes sure that when a flow ends (i.e. all packets
2064 * have been completed for that flow), that the flow can be moved
2065 * to a different CQ when new packets come in for that flow.
2067 static uint32_t flows1[] = {0, 1, 1, 2};
2069 for (i = 0; i < RTE_DIM(flows1); i++) {
2070 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2071 struct rte_event ev = {
2072 .flow_id = flows1[i],
2073 .op = RTE_EVENT_OP_NEW,
2074 .queue_id = t->qid[0],
2075 .event_type = RTE_EVENT_TYPE_CPU,
2076 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2081 printf("%d: gen of pkt failed\n", __LINE__);
2084 arp->hash.rss = flows1[i];
2085 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2087 printf("%d: Failed to enqueue\n", __LINE__);
2092 /* call the scheduler */
2093 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2095 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2096 struct rte_event ev;
2097 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2098 printf("%d: failed to dequeue\n", __LINE__);
2101 if (ev.mbuf->hash.rss != flows1[0]) {
2102 printf("%d: unexpected flow received\n", __LINE__);
2106 /* drop the flow 0 packet from port 1 */
2107 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2109 /* call the scheduler */
2110 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2113 * Set up the next set of flows, first a new flow to fill up
2114 * CQ 0, so that the next flow 0 packet should go to CQ2
2116 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2118 for (i = 0; i < RTE_DIM(flows2); i++) {
2119 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2120 struct rte_event ev = {
2121 .flow_id = flows2[i],
2122 .op = RTE_EVENT_OP_NEW,
2123 .queue_id = t->qid[0],
2124 .event_type = RTE_EVENT_TYPE_CPU,
2125 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2130 printf("%d: gen of pkt failed\n", __LINE__);
2133 arp->hash.rss = flows2[i];
2135 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2137 printf("%d: Failed to enqueue\n", __LINE__);
2143 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2145 err = test_event_dev_stats_get(evdev, &stats);
2147 printf("%d:failed to get stats\n", __LINE__);
2152 * Now check the resulting inflights on each port.
2154 if (stats.port_inflight[1] != 3) {
2155 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2157 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2158 (unsigned int)stats.port_inflight[1],
2159 (unsigned int)stats.port_inflight[2],
2160 (unsigned int)stats.port_inflight[3]);
2163 if (stats.port_inflight[2] != 4) {
2164 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2166 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2167 (unsigned int)stats.port_inflight[1],
2168 (unsigned int)stats.port_inflight[2],
2169 (unsigned int)stats.port_inflight[3]);
2172 if (stats.port_inflight[3] != 2) {
2173 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2175 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2176 (unsigned int)stats.port_inflight[1],
2177 (unsigned int)stats.port_inflight[2],
2178 (unsigned int)stats.port_inflight[3]);
2182 for (i = 1; i <= 3; i++) {
2183 struct rte_event ev;
2184 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2185 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2187 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2194 invalid_qid(struct test *t)
2196 struct test_event_dev_stats stats;
2197 const int rx_enq = 0;
2201 if (init(t, 1, 4) < 0 ||
2202 create_ports(t, 4) < 0 ||
2203 create_atomic_qids(t, 1) < 0) {
2204 printf("%d: Error initializing device\n", __LINE__);
2208 /* CQ mapping to QID */
2209 for (i = 0; i < 4; i++) {
2210 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2213 printf("%d: error mapping port 1 qid\n", __LINE__);
2218 if (rte_event_dev_start(evdev) < 0) {
2219 printf("%d: Error with start call\n", __LINE__);
2224 * Send in a packet with an invalid qid to the scheduler.
2225 * We should see the packed enqueued OK, but the inflights for
2226 * that packet should not be incremented, and the rx_dropped
2227 * should be incremented.
2229 static uint32_t flows1[] = {20};
2231 for (i = 0; i < RTE_DIM(flows1); i++) {
2232 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2234 printf("%d: gen of pkt failed\n", __LINE__);
2238 struct rte_event ev = {
2239 .op = RTE_EVENT_OP_NEW,
2240 .queue_id = t->qid[0] + flows1[i],
2244 /* generate pkt and enqueue */
2245 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2247 printf("%d: Failed to enqueue\n", __LINE__);
2252 /* call the scheduler */
2253 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2255 err = test_event_dev_stats_get(evdev, &stats);
2257 printf("%d: failed to get stats\n", __LINE__);
2262 * Now check the resulting inflights on the port, and the rx_dropped.
2264 if (stats.port_inflight[0] != 0) {
2265 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2267 rte_event_dev_dump(evdev, stdout);
2270 if (stats.port_rx_dropped[0] != 1) {
2271 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2272 rte_event_dev_dump(evdev, stdout);
2275 /* each packet drop should only be counted in one place - port or dev */
2276 if (stats.rx_dropped != 0) {
2277 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2279 rte_event_dev_dump(evdev, stdout);
2288 single_packet(struct test *t)
2290 const uint32_t MAGIC_SEQN = 7321;
2291 struct rte_event ev;
2292 struct test_event_dev_stats stats;
2293 const int rx_enq = 0;
2294 const int wrk_enq = 2;
2297 /* Create instance with 4 ports */
2298 if (init(t, 1, 4) < 0 ||
2299 create_ports(t, 4) < 0 ||
2300 create_atomic_qids(t, 1) < 0) {
2301 printf("%d: Error initializing device\n", __LINE__);
2305 /* CQ mapping to QID */
2306 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2308 printf("%d: error mapping lb qid\n", __LINE__);
2313 if (rte_event_dev_start(evdev) < 0) {
2314 printf("%d: Error with start call\n", __LINE__);
2318 /************** Gen pkt and enqueue ****************/
2319 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2321 printf("%d: gen of pkt failed\n", __LINE__);
2325 ev.op = RTE_EVENT_OP_NEW;
2326 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2330 arp->seqn = MAGIC_SEQN;
2332 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2334 printf("%d: Failed to enqueue\n", __LINE__);
2338 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2340 err = test_event_dev_stats_get(evdev, &stats);
2342 printf("%d: failed to get stats\n", __LINE__);
2346 if (stats.rx_pkts != 1 ||
2347 stats.tx_pkts != 1 ||
2348 stats.port_inflight[wrk_enq] != 1) {
2349 printf("%d: Sched core didn't handle pkt as expected\n",
2351 rte_event_dev_dump(evdev, stdout);
2357 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2359 printf("%d: Failed to deq\n", __LINE__);
2363 err = test_event_dev_stats_get(evdev, &stats);
2365 printf("%d: failed to get stats\n", __LINE__);
2369 err = test_event_dev_stats_get(evdev, &stats);
2370 if (ev.mbuf->seqn != MAGIC_SEQN) {
2371 printf("%d: magic sequence number not dequeued\n", __LINE__);
2375 rte_pktmbuf_free(ev.mbuf);
2376 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2378 printf("%d: Failed to enqueue\n", __LINE__);
2381 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2383 err = test_event_dev_stats_get(evdev, &stats);
2384 if (stats.port_inflight[wrk_enq] != 0) {
2385 printf("%d: port inflight not correct\n", __LINE__);
2394 inflight_counts(struct test *t)
2396 struct rte_event ev;
2397 struct test_event_dev_stats stats;
2398 const int rx_enq = 0;
2404 /* Create instance with 4 ports */
2405 if (init(t, 2, 3) < 0 ||
2406 create_ports(t, 3) < 0 ||
2407 create_atomic_qids(t, 2) < 0) {
2408 printf("%d: Error initializing device\n", __LINE__);
2412 /* CQ mapping to QID */
2413 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2415 printf("%d: error mapping lb qid\n", __LINE__);
2419 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2421 printf("%d: error mapping lb qid\n", __LINE__);
2426 if (rte_event_dev_start(evdev) < 0) {
2427 printf("%d: Error with start call\n", __LINE__);
2431 /************** FORWARD ****************/
2433 for (i = 0; i < QID1_NUM; i++) {
2434 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2437 printf("%d: gen of pkt failed\n", __LINE__);
2441 ev.queue_id = t->qid[0];
2442 ev.op = RTE_EVENT_OP_NEW;
2444 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2446 printf("%d: Failed to enqueue\n", __LINE__);
2451 for (i = 0; i < QID2_NUM; i++) {
2452 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2455 printf("%d: gen of pkt failed\n", __LINE__);
2458 ev.queue_id = t->qid[1];
2459 ev.op = RTE_EVENT_OP_NEW;
2461 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2463 printf("%d: Failed to enqueue\n", __LINE__);
2469 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2471 err = test_event_dev_stats_get(evdev, &stats);
2473 printf("%d: failed to get stats\n", __LINE__);
2477 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2478 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2479 printf("%d: Sched core didn't handle pkt as expected\n",
2484 if (stats.port_inflight[p1] != QID1_NUM) {
2485 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2489 if (stats.port_inflight[p2] != QID2_NUM) {
2490 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2495 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2497 struct rte_event events[QID1_NUM + QID2_NUM];
2498 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2499 RTE_DIM(events), 0);
2501 if (deq_pkts != QID1_NUM) {
2502 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2505 err = test_event_dev_stats_get(evdev, &stats);
2506 if (stats.port_inflight[p1] != QID1_NUM) {
2507 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2511 for (i = 0; i < QID1_NUM; i++) {
2512 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2515 printf("%d: %s rte enqueue of inf release failed\n",
2516 __LINE__, __func__);
2522 * As the scheduler core decrements inflights, it needs to run to
2523 * process packets to act on the drop messages
2525 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2527 err = test_event_dev_stats_get(evdev, &stats);
2528 if (stats.port_inflight[p1] != 0) {
2529 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2534 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2535 RTE_DIM(events), 0);
2536 if (deq_pkts != QID2_NUM) {
2537 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2540 err = test_event_dev_stats_get(evdev, &stats);
2541 if (stats.port_inflight[p2] != QID2_NUM) {
2542 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2546 for (i = 0; i < QID2_NUM; i++) {
2547 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2550 printf("%d: %s rte enqueue of inf release failed\n",
2551 __LINE__, __func__);
2557 * As the scheduler core decrements inflights, it needs to run to
2558 * process packets to act on the drop messages
2560 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2562 err = test_event_dev_stats_get(evdev, &stats);
2563 if (stats.port_inflight[p2] != 0) {
2564 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2571 rte_event_dev_dump(evdev, stdout);
2577 parallel_basic(struct test *t, int check_order)
2579 const uint8_t rx_port = 0;
2580 const uint8_t w1_port = 1;
2581 const uint8_t w3_port = 3;
2582 const uint8_t tx_port = 4;
2585 uint32_t deq_pkts, j;
2586 struct rte_mbuf *mbufs[3];
2587 struct rte_mbuf *mbufs_out[3] = { 0 };
2588 const uint32_t MAGIC_SEQN = 1234;
2590 /* Create instance with 4 ports */
2591 if (init(t, 2, tx_port + 1) < 0 ||
2592 create_ports(t, tx_port + 1) < 0 ||
2593 (check_order ? create_ordered_qids(t, 1) :
2594 create_unordered_qids(t, 1)) < 0 ||
2595 create_directed_qids(t, 1, &tx_port)) {
2596 printf("%d: Error initializing device\n", __LINE__);
2602 * We need three ports, all mapped to the same ordered qid0. Then we'll
2603 * take a packet out to each port, re-enqueue in reverse order,
2604 * then make sure the reordering has taken place properly when we
2605 * dequeue from the tx_port.
2607 * Simplified test setup diagram:
2611 * qid0 - w2_port - qid1
2615 /* CQ mapping to QID for LB ports (directed mapped on create) */
2616 for (i = w1_port; i <= w3_port; i++) {
2617 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2620 printf("%d: error mapping lb qid\n", __LINE__);
2626 if (rte_event_dev_start(evdev) < 0) {
2627 printf("%d: Error with start call\n", __LINE__);
2631 /* Enqueue 3 packets to the rx port */
2632 for (i = 0; i < 3; i++) {
2633 struct rte_event ev;
2634 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2636 printf("%d: gen of pkt failed\n", __LINE__);
2640 ev.queue_id = t->qid[0];
2641 ev.op = RTE_EVENT_OP_NEW;
2643 mbufs[i]->seqn = MAGIC_SEQN + i;
2645 /* generate pkt and enqueue */
2646 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2648 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2654 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2656 /* use extra slot to make logic in loops easier */
2657 struct rte_event deq_ev[w3_port + 1];
2659 /* Dequeue the 3 packets, one from each worker port */
2660 for (i = w1_port; i <= w3_port; i++) {
2661 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2663 if (deq_pkts != 1) {
2664 printf("%d: Failed to deq\n", __LINE__);
2665 rte_event_dev_dump(evdev, stdout);
2670 /* Enqueue each packet in reverse order, flushing after each one */
2671 for (i = w3_port; i >= w1_port; i--) {
2673 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2674 deq_ev[i].queue_id = t->qid[1];
2675 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2677 printf("%d: Failed to enqueue\n", __LINE__);
2681 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2683 /* dequeue from the tx ports, we should get 3 packets */
2684 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2687 /* Check to see if we've got all 3 packets */
2688 if (deq_pkts != 3) {
2689 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2690 __LINE__, deq_pkts, tx_port);
2691 rte_event_dev_dump(evdev, stdout);
2695 /* Check to see if the sequence numbers are in expected order */
2697 for (j = 0 ; j < deq_pkts ; j++) {
2698 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2700 "%d: Incorrect sequence number(%d) from port %d\n",
2701 __LINE__, mbufs_out[j]->seqn, tx_port);
2707 /* Destroy the instance */
2713 ordered_basic(struct test *t)
2715 return parallel_basic(t, 1);
2719 unordered_basic(struct test *t)
2721 return parallel_basic(t, 0);
2725 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2727 const struct rte_event new_ev = {
2728 .op = RTE_EVENT_OP_NEW
2729 /* all other fields zero */
2731 struct rte_event ev = new_ev;
2732 unsigned int rx_port = 0; /* port we get the first flow on */
2733 char rx_port_used_stat[64];
2734 char rx_port_free_stat[64];
2735 char other_port_used_stat[64];
2737 if (init(t, 1, 2) < 0 ||
2738 create_ports(t, 2) < 0 ||
2739 create_atomic_qids(t, 1) < 0) {
2740 printf("%d: Error initializing device\n", __LINE__);
2743 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2744 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2746 printf("%d: Error links queue to ports\n", __LINE__);
2749 if (rte_event_dev_start(evdev) < 0) {
2750 printf("%d: Error with start call\n", __LINE__);
2754 /* send one packet and see where it goes, port 0 or 1 */
2755 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2756 printf("%d: Error doing first enqueue\n", __LINE__);
2759 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2761 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2765 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2766 "port_%u_cq_ring_used", rx_port);
2767 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2768 "port_%u_cq_ring_free", rx_port);
2769 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2770 "port_%u_cq_ring_used", rx_port ^ 1);
2771 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2773 printf("%d: Error, first event not scheduled\n", __LINE__);
2777 /* now fill up the rx port's queue with one flow to cause HOLB */
2780 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2781 printf("%d: Error with enqueue\n", __LINE__);
2784 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2785 } while (rte_event_dev_xstats_by_name_get(evdev,
2786 rx_port_free_stat, NULL) != 0);
2788 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2790 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2791 printf("%d: Error with enqueue\n", __LINE__);
2794 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2796 /* check that the other port still has an empty CQ */
2797 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2799 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2802 /* check IQ now has one packet */
2803 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2805 printf("%d: Error, QID does not have exactly 1 packet\n",
2810 /* send another flow, which should pass the other IQ entry */
2813 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2814 printf("%d: Error with enqueue\n", __LINE__);
2817 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2819 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2821 printf("%d: Error, second flow did not pass out first\n",
2826 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2828 printf("%d: Error, QID does not have exactly 1 packet\n",
2835 rte_event_dev_dump(evdev, stdout);
2841 worker_loopback_worker_fn(void *arg)
2843 struct test *t = arg;
2844 uint8_t port = t->port[1];
2849 * Takes packets from the input port and then loops them back through
2850 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2851 * so each packet goes through 8*16 = 128 times.
2853 printf("%d: \tWorker function started\n", __LINE__);
2854 while (count < NUM_PACKETS) {
2855 #define BURST_SIZE 32
2856 struct rte_event ev[BURST_SIZE];
2857 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2864 for (i = 0; i < nb_rx; i++) {
2866 if (ev[i].queue_id != 8) {
2867 ev[i].op = RTE_EVENT_OP_FORWARD;
2868 enqd = rte_event_enqueue_burst(evdev, port,
2871 printf("%d: Can't enqueue FWD!!\n",
2879 ev[i].mbuf->udata64++;
2880 if (ev[i].mbuf->udata64 != 16) {
2881 ev[i].op = RTE_EVENT_OP_FORWARD;
2882 enqd = rte_event_enqueue_burst(evdev, port,
2885 printf("%d: Can't enqueue FWD!!\n",
2891 /* we have hit 16 iterations through system - drop */
2892 rte_pktmbuf_free(ev[i].mbuf);
2894 ev[i].op = RTE_EVENT_OP_RELEASE;
2895 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2897 printf("%d drop enqueue failed\n", __LINE__);
2907 worker_loopback_producer_fn(void *arg)
2909 struct test *t = arg;
2910 uint8_t port = t->port[0];
2913 printf("%d: \tProducer function started\n", __LINE__);
2914 while (count < NUM_PACKETS) {
2915 struct rte_mbuf *m = 0;
2917 m = rte_pktmbuf_alloc(t->mbuf_pool);
2918 } while (m == NULL);
2922 struct rte_event ev = {
2923 .op = RTE_EVENT_OP_NEW,
2924 .queue_id = t->qid[0],
2925 .flow_id = (uintptr_t)m & 0xFFFF,
2929 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2930 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2942 worker_loopback(struct test *t)
2944 /* use a single producer core, and a worker core to see what happens
2945 * if the worker loops packets back multiple times
2947 struct test_event_dev_stats stats;
2948 uint64_t print_cycles = 0, cycles = 0;
2949 uint64_t tx_pkts = 0;
2951 int w_lcore, p_lcore;
2953 if (init(t, 8, 2) < 0 ||
2954 create_atomic_qids(t, 8) < 0) {
2955 printf("%d: Error initializing device\n", __LINE__);
2959 /* RX with low max events */
2960 static struct rte_event_port_conf conf = {
2961 .dequeue_depth = 32,
2962 .enqueue_depth = 64,
2964 /* beware: this cannot be initialized in the static above as it would
2965 * only be initialized once - and this needs to be set for multiple runs
2967 conf.new_event_threshold = 512;
2969 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2970 printf("Error setting up RX port\n");
2974 /* TX with higher max events */
2975 conf.new_event_threshold = 4096;
2976 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2977 printf("Error setting up TX port\n");
2982 /* CQ mapping to QID */
2983 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2984 if (err != 8) { /* should have mapped all queues*/
2985 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2989 if (rte_event_dev_start(evdev) < 0) {
2990 printf("%d: Error with start call\n", __LINE__);
2994 p_lcore = rte_get_next_lcore(
2995 /* start core */ -1,
2996 /* skip master */ 1,
2998 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3000 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3001 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3003 print_cycles = cycles = rte_get_timer_cycles();
3004 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3005 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3007 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3009 uint64_t new_cycles = rte_get_timer_cycles();
3011 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3012 test_event_dev_stats_get(evdev, &stats);
3014 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3015 __LINE__, stats.rx_pkts, stats.tx_pkts);
3017 print_cycles = new_cycles;
3019 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3020 test_event_dev_stats_get(evdev, &stats);
3021 if (stats.tx_pkts == tx_pkts) {
3022 rte_event_dev_dump(evdev, stdout);
3023 printf("Dumping xstats:\n");
3026 "%d: No schedules for seconds, deadlock\n",
3030 tx_pkts = stats.tx_pkts;
3031 cycles = new_cycles;
3034 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3035 /* ensure all completions are flushed */
3037 rte_eal_mp_wait_lcore();
3043 static struct rte_mempool *eventdev_func_mempool;
3046 test_sw_eventdev(void)
3048 struct test *t = malloc(sizeof(struct test));
3051 /* manually initialize the op, older gcc's complain on static
3052 * initialization of struct elements that are a bitfield.
3054 release_ev.op = RTE_EVENT_OP_RELEASE;
3056 const char *eventdev_name = "event_sw0";
3057 evdev = rte_event_dev_get_dev_id(eventdev_name);
3059 printf("%d: Eventdev %s not found - creating.\n",
3060 __LINE__, eventdev_name);
3061 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3062 printf("Error creating eventdev\n");
3065 evdev = rte_event_dev_get_dev_id(eventdev_name);
3067 printf("Error finding newly created eventdev\n");
3072 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3073 printf("Failed to get service ID for software event dev\n");
3077 rte_service_runstate_set(t->service_id, 1);
3078 rte_service_set_runstate_mapped_check(t->service_id, 0);
3080 /* Only create mbuf pool once, reuse for each test run */
3081 if (!eventdev_func_mempool) {
3082 eventdev_func_mempool = rte_pktmbuf_pool_create(
3083 "EVENTDEV_SW_SA_MBUF_POOL",
3084 (1<<12), /* 4k buffers */
3085 32 /*MBUF_CACHE_SIZE*/,
3087 512, /* use very small mbufs */
3089 if (!eventdev_func_mempool) {
3090 printf("ERROR creating mempool\n");
3094 t->mbuf_pool = eventdev_func_mempool;
3095 printf("*** Running Single Directed Packet test...\n");
3096 ret = test_single_directed_packet(t);
3098 printf("ERROR - Single Directed Packet test FAILED.\n");
3101 printf("*** Running Directed Forward Credit test...\n");
3102 ret = test_directed_forward_credits(t);
3104 printf("ERROR - Directed Forward Credit test FAILED.\n");
3107 printf("*** Running Single Load Balanced Packet test...\n");
3108 ret = single_packet(t);
3110 printf("ERROR - Single Packet test FAILED.\n");
3113 printf("*** Running Unordered Basic test...\n");
3114 ret = unordered_basic(t);
3116 printf("ERROR - Unordered Basic test FAILED.\n");
3119 printf("*** Running Ordered Basic test...\n");
3120 ret = ordered_basic(t);
3122 printf("ERROR - Ordered Basic test FAILED.\n");
3125 printf("*** Running Burst Packets test...\n");
3126 ret = burst_packets(t);
3128 printf("ERROR - Burst Packets test FAILED.\n");
3131 printf("*** Running Load Balancing test...\n");
3132 ret = load_balancing(t);
3134 printf("ERROR - Load Balancing test FAILED.\n");
3137 printf("*** Running Prioritized Directed test...\n");
3138 ret = test_priority_directed(t);
3140 printf("ERROR - Prioritized Directed test FAILED.\n");
3143 printf("*** Running Prioritized Atomic test...\n");
3144 ret = test_priority_atomic(t);
3146 printf("ERROR - Prioritized Atomic test FAILED.\n");
3150 printf("*** Running Prioritized Ordered test...\n");
3151 ret = test_priority_ordered(t);
3153 printf("ERROR - Prioritized Ordered test FAILED.\n");
3156 printf("*** Running Prioritized Unordered test...\n");
3157 ret = test_priority_unordered(t);
3159 printf("ERROR - Prioritized Unordered test FAILED.\n");
3162 printf("*** Running Invalid QID test...\n");
3163 ret = invalid_qid(t);
3165 printf("ERROR - Invalid QID test FAILED.\n");
3168 printf("*** Running Load Balancing History test...\n");
3169 ret = load_balancing_history(t);
3171 printf("ERROR - Load Balancing History test FAILED.\n");
3174 printf("*** Running Inflight Count test...\n");
3175 ret = inflight_counts(t);
3177 printf("ERROR - Inflight Count test FAILED.\n");
3180 printf("*** Running Abuse Inflights test...\n");
3181 ret = abuse_inflights(t);
3183 printf("ERROR - Abuse Inflights test FAILED.\n");
3186 printf("*** Running XStats test...\n");
3187 ret = xstats_tests(t);
3189 printf("ERROR - XStats test FAILED.\n");
3192 printf("*** Running XStats ID Reset test...\n");
3193 ret = xstats_id_reset_tests(t);
3195 printf("ERROR - XStats ID Reset test FAILED.\n");
3198 printf("*** Running XStats Brute Force test...\n");
3199 ret = xstats_brute_force(t);
3201 printf("ERROR - XStats Brute Force test FAILED.\n");
3204 printf("*** Running XStats ID Abuse test...\n");
3205 ret = xstats_id_abuse_tests(t);
3207 printf("ERROR - XStats ID Abuse test FAILED.\n");
3210 printf("*** Running QID Priority test...\n");
3211 ret = qid_priorities(t);
3213 printf("ERROR - QID Priority test FAILED.\n");
3216 printf("*** Running Ordered Reconfigure test...\n");
3217 ret = ordered_reconfigure(t);
3219 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3222 printf("*** Running Port LB Single Reconfig test...\n");
3223 ret = port_single_lb_reconfig(t);
3225 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3228 printf("*** Running Port Reconfig Credits test...\n");
3229 ret = port_reconfig_credits(t);
3231 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3234 printf("*** Running Head-of-line-blocking test...\n");
3237 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3240 if (rte_lcore_count() >= 3) {
3241 printf("*** Running Worker loopback test...\n");
3242 ret = worker_loopback(t);
3244 printf("ERROR - Worker loopback test FAILED.\n");
3248 printf("### Not enough cores for worker loopback test.\n");
3249 printf("### Need at least 3 cores for test.\n");
3252 * Free test instance, leaving mempool initialized, and a pointer to it
3253 * in static eventdev_func_mempool, as it is re-used on re-runs
3260 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);