4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
51 #include <rte_eventdev.h>
56 #define NUM_PACKETS (1<<18)
61 struct rte_mempool *mbuf_pool;
62 uint8_t port[MAX_PORTS];
63 uint8_t qid[MAX_QIDS];
67 static struct rte_event release_ev;
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
74 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
76 static const uint8_t arp_request[] = {
77 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 int pkt_len = sizeof(arp_request) - 1;
89 m = rte_pktmbuf_alloc(mp);
93 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94 arp_request, pkt_len);
95 rte_pktmbuf_pkt_len(m) = pkt_len;
96 rte_pktmbuf_data_len(m) = pkt_len;
106 const uint32_t XSTATS_MAX = 1024;
108 uint32_t ids[XSTATS_MAX];
109 uint64_t values[XSTATS_MAX];
110 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
112 for (i = 0; i < XSTATS_MAX; i++)
115 /* Device names / values */
116 int ret = rte_event_dev_xstats_names_get(evdev,
117 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
118 xstats_names, ids, XSTATS_MAX);
120 printf("%d: xstats names get() returned error\n",
124 ret = rte_event_dev_xstats_get(evdev,
125 RTE_EVENT_DEV_XSTATS_DEVICE,
126 0, ids, values, ret);
127 if (ret > (signed int)XSTATS_MAX)
128 printf("%s %d: more xstats available than space\n",
130 for (i = 0; (signed int)i < ret; i++) {
131 printf("%d : %s : %"PRIu64"\n",
132 i, xstats_names[i].name, values[i]);
135 /* Port names / values */
136 ret = rte_event_dev_xstats_names_get(evdev,
137 RTE_EVENT_DEV_XSTATS_PORT, 0,
138 xstats_names, ids, XSTATS_MAX);
139 ret = rte_event_dev_xstats_get(evdev,
140 RTE_EVENT_DEV_XSTATS_PORT, 1,
142 if (ret > (signed int)XSTATS_MAX)
143 printf("%s %d: more xstats available than space\n",
145 for (i = 0; (signed int)i < ret; i++) {
146 printf("%d : %s : %"PRIu64"\n",
147 i, xstats_names[i].name, values[i]);
150 /* Queue names / values */
151 ret = rte_event_dev_xstats_names_get(evdev,
152 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
153 xstats_names, ids, XSTATS_MAX);
154 ret = rte_event_dev_xstats_get(evdev,
155 RTE_EVENT_DEV_XSTATS_QUEUE,
156 1, ids, values, ret);
157 if (ret > (signed int)XSTATS_MAX)
158 printf("%s %d: more xstats available than space\n",
160 for (i = 0; (signed int)i < ret; i++) {
161 printf("%d : %s : %"PRIu64"\n",
162 i, xstats_names[i].name, values[i]);
166 /* initialization and config */
168 init(struct test *t, int nb_queues, int nb_ports)
170 struct rte_event_dev_config config = {
171 .nb_event_queues = nb_queues,
172 .nb_event_ports = nb_ports,
173 .nb_event_queue_flows = 1024,
174 .nb_events_limit = 4096,
175 .nb_event_port_dequeue_depth = 128,
176 .nb_event_port_enqueue_depth = 128,
180 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
182 memset(t, 0, sizeof(*t));
185 ret = rte_event_dev_configure(evdev, &config);
187 printf("%d: Error configuring device\n", __LINE__);
192 create_ports(struct test *t, int num_ports)
195 static const struct rte_event_port_conf conf = {
196 .new_event_threshold = 1024,
200 if (num_ports > MAX_PORTS)
203 for (i = 0; i < num_ports; i++) {
204 if (rte_event_port_setup(evdev, i, &conf) < 0) {
205 printf("Error setting up port %d\n", i);
215 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
220 const struct rte_event_queue_conf conf = {
221 .event_queue_cfg = flags,
222 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
223 .nb_atomic_flows = 1024,
224 .nb_atomic_order_sequences = 1024,
227 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
228 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
229 printf("%d: error creating qid %d\n", __LINE__, i);
234 t->nb_qids += num_qids;
235 if (t->nb_qids > MAX_QIDS)
242 create_atomic_qids(struct test *t, int num_qids)
244 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
248 create_ordered_qids(struct test *t, int num_qids)
250 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
255 create_unordered_qids(struct test *t, int num_qids)
257 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
261 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
266 static const struct rte_event_queue_conf conf = {
267 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
268 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
269 .nb_atomic_flows = 1024,
270 .nb_atomic_order_sequences = 1024,
273 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
274 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
275 printf("%d: error creating qid %d\n", __LINE__, i);
280 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
281 &t->qid[i], NULL, 1) != 1) {
282 printf("%d: error creating link for qid %d\n",
287 t->nb_qids += num_qids;
288 if (t->nb_qids > MAX_QIDS)
296 cleanup(struct test *t __rte_unused)
298 rte_event_dev_stop(evdev);
299 rte_event_dev_close(evdev);
303 struct test_event_dev_stats {
304 uint64_t rx_pkts; /**< Total packets received */
305 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
306 uint64_t tx_pkts; /**< Total packets transmitted */
308 /** Packets received on this port */
309 uint64_t port_rx_pkts[MAX_PORTS];
310 /** Packets dropped on this port */
311 uint64_t port_rx_dropped[MAX_PORTS];
312 /** Packets inflight on this port */
313 uint64_t port_inflight[MAX_PORTS];
314 /** Packets transmitted on this port */
315 uint64_t port_tx_pkts[MAX_PORTS];
316 /** Packets received on this qid */
317 uint64_t qid_rx_pkts[MAX_QIDS];
318 /** Packets dropped on this qid */
319 uint64_t qid_rx_dropped[MAX_QIDS];
320 /** Packets transmitted on this qid */
321 uint64_t qid_tx_pkts[MAX_QIDS];
325 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
328 static uint32_t total_ids[3]; /* rx, tx and drop */
329 static uint32_t port_rx_pkts_ids[MAX_PORTS];
330 static uint32_t port_rx_dropped_ids[MAX_PORTS];
331 static uint32_t port_inflight_ids[MAX_PORTS];
332 static uint32_t port_tx_pkts_ids[MAX_PORTS];
333 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
334 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
335 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
338 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
339 "dev_rx", &total_ids[0]);
340 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
341 "dev_drop", &total_ids[1]);
342 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
343 "dev_tx", &total_ids[2]);
344 for (i = 0; i < MAX_PORTS; i++) {
346 snprintf(name, sizeof(name), "port_%u_rx", i);
347 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
348 dev_id, name, &port_rx_pkts_ids[i]);
349 snprintf(name, sizeof(name), "port_%u_drop", i);
350 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
351 dev_id, name, &port_rx_dropped_ids[i]);
352 snprintf(name, sizeof(name), "port_%u_inflight", i);
353 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
354 dev_id, name, &port_inflight_ids[i]);
355 snprintf(name, sizeof(name), "port_%u_tx", i);
356 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
357 dev_id, name, &port_tx_pkts_ids[i]);
359 for (i = 0; i < MAX_QIDS; i++) {
361 snprintf(name, sizeof(name), "qid_%u_rx", i);
362 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
363 dev_id, name, &qid_rx_pkts_ids[i]);
364 snprintf(name, sizeof(name), "qid_%u_drop", i);
365 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
366 dev_id, name, &qid_rx_dropped_ids[i]);
367 snprintf(name, sizeof(name), "qid_%u_tx", i);
368 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
369 dev_id, name, &qid_tx_pkts_ids[i]);
375 /* run_prio_packet_test
376 * This performs a basic packet priority check on the test instance passed in.
377 * It is factored out of the main priority tests as the same tests must be
378 * performed to ensure prioritization of each type of QID.
381 * - An initialized test structure, including mempool
382 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
383 * - t->qid[0] is the QID to be tested
384 * - if LB QID, the CQ must be mapped to the QID.
387 run_prio_packet_test(struct test *t)
390 const uint32_t MAGIC_SEQN[] = {4711, 1234};
391 const uint32_t PRIORITY[] = {
392 RTE_EVENT_DEV_PRIORITY_NORMAL,
393 RTE_EVENT_DEV_PRIORITY_HIGHEST
396 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
397 /* generate pkt and enqueue */
399 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
401 printf("%d: gen of pkt failed\n", __LINE__);
404 arp->seqn = MAGIC_SEQN[i];
406 ev = (struct rte_event){
407 .priority = PRIORITY[i],
408 .op = RTE_EVENT_OP_NEW,
409 .queue_id = t->qid[0],
412 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
414 printf("%d: error failed to enqueue\n", __LINE__);
419 rte_event_schedule(evdev);
421 struct test_event_dev_stats stats;
422 err = test_event_dev_stats_get(evdev, &stats);
424 printf("%d: error failed to get stats\n", __LINE__);
428 if (stats.port_rx_pkts[t->port[0]] != 2) {
429 printf("%d: error stats incorrect for directed port\n",
431 rte_event_dev_dump(evdev, stdout);
435 struct rte_event ev, ev2;
437 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
439 printf("%d: error failed to deq\n", __LINE__);
440 rte_event_dev_dump(evdev, stdout);
443 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
444 printf("%d: first packet out not highest priority\n",
446 rte_event_dev_dump(evdev, stdout);
449 rte_pktmbuf_free(ev.mbuf);
451 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
453 printf("%d: error failed to deq\n", __LINE__);
454 rte_event_dev_dump(evdev, stdout);
457 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
458 printf("%d: second packet out not lower priority\n",
460 rte_event_dev_dump(evdev, stdout);
463 rte_pktmbuf_free(ev2.mbuf);
470 test_single_directed_packet(struct test *t)
472 const int rx_enq = 0;
473 const int wrk_enq = 2;
476 /* Create instance with 3 directed QIDs going to 3 ports */
477 if (init(t, 3, 3) < 0 ||
478 create_ports(t, 3) < 0 ||
479 create_directed_qids(t, 3, t->port) < 0)
482 if (rte_event_dev_start(evdev) < 0) {
483 printf("%d: Error with start call\n", __LINE__);
487 /************** FORWARD ****************/
488 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
489 struct rte_event ev = {
490 .op = RTE_EVENT_OP_NEW,
496 printf("%d: gen of pkt failed\n", __LINE__);
500 const uint32_t MAGIC_SEQN = 4711;
501 arp->seqn = MAGIC_SEQN;
503 /* generate pkt and enqueue */
504 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
506 printf("%d: error failed to enqueue\n", __LINE__);
510 /* Run schedule() as dir packets may need to be re-ordered */
511 rte_event_schedule(evdev);
513 struct test_event_dev_stats stats;
514 err = test_event_dev_stats_get(evdev, &stats);
516 printf("%d: error failed to get stats\n", __LINE__);
520 if (stats.port_rx_pkts[rx_enq] != 1) {
521 printf("%d: error stats incorrect for directed port\n",
527 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
529 printf("%d: error failed to deq\n", __LINE__);
533 err = test_event_dev_stats_get(evdev, &stats);
534 if (stats.port_rx_pkts[wrk_enq] != 0 &&
535 stats.port_rx_pkts[wrk_enq] != 1) {
536 printf("%d: error directed stats post-dequeue\n", __LINE__);
540 if (ev.mbuf->seqn != MAGIC_SEQN) {
541 printf("%d: error magic sequence number not dequeued\n",
546 rte_pktmbuf_free(ev.mbuf);
553 test_priority_directed(struct test *t)
555 if (init(t, 1, 1) < 0 ||
556 create_ports(t, 1) < 0 ||
557 create_directed_qids(t, 1, t->port) < 0) {
558 printf("%d: Error initializing device\n", __LINE__);
562 if (rte_event_dev_start(evdev) < 0) {
563 printf("%d: Error with start call\n", __LINE__);
567 return run_prio_packet_test(t);
571 test_priority_atomic(struct test *t)
573 if (init(t, 1, 1) < 0 ||
574 create_ports(t, 1) < 0 ||
575 create_atomic_qids(t, 1) < 0) {
576 printf("%d: Error initializing device\n", __LINE__);
581 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
582 printf("%d: error mapping qid to port\n", __LINE__);
585 if (rte_event_dev_start(evdev) < 0) {
586 printf("%d: Error with start call\n", __LINE__);
590 return run_prio_packet_test(t);
594 test_priority_ordered(struct test *t)
596 if (init(t, 1, 1) < 0 ||
597 create_ports(t, 1) < 0 ||
598 create_ordered_qids(t, 1) < 0) {
599 printf("%d: Error initializing device\n", __LINE__);
604 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
605 printf("%d: error mapping qid to port\n", __LINE__);
608 if (rte_event_dev_start(evdev) < 0) {
609 printf("%d: Error with start call\n", __LINE__);
613 return run_prio_packet_test(t);
617 test_priority_unordered(struct test *t)
619 if (init(t, 1, 1) < 0 ||
620 create_ports(t, 1) < 0 ||
621 create_unordered_qids(t, 1) < 0) {
622 printf("%d: Error initializing device\n", __LINE__);
627 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
628 printf("%d: error mapping qid to port\n", __LINE__);
631 if (rte_event_dev_start(evdev) < 0) {
632 printf("%d: Error with start call\n", __LINE__);
636 return run_prio_packet_test(t);
640 burst_packets(struct test *t)
642 /************** CONFIG ****************/
647 /* Create instance with 2 ports and 2 queues */
648 if (init(t, 2, 2) < 0 ||
649 create_ports(t, 2) < 0 ||
650 create_atomic_qids(t, 2) < 0) {
651 printf("%d: Error initializing device\n", __LINE__);
655 /* CQ mapping to QID */
656 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
658 printf("%d: error mapping lb qid0\n", __LINE__);
661 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
663 printf("%d: error mapping lb qid1\n", __LINE__);
667 if (rte_event_dev_start(evdev) < 0) {
668 printf("%d: Error with start call\n", __LINE__);
672 /************** FORWARD ****************/
673 const uint32_t rx_port = 0;
674 const uint32_t NUM_PKTS = 2;
676 for (i = 0; i < NUM_PKTS; i++) {
677 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
679 printf("%d: error generating pkt\n", __LINE__);
683 struct rte_event ev = {
684 .op = RTE_EVENT_OP_NEW,
689 /* generate pkt and enqueue */
690 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
692 printf("%d: Failed to enqueue\n", __LINE__);
696 rte_event_schedule(evdev);
698 /* Check stats for all NUM_PKTS arrived to sched core */
699 struct test_event_dev_stats stats;
701 err = test_event_dev_stats_get(evdev, &stats);
703 printf("%d: failed to get stats\n", __LINE__);
706 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
707 printf("%d: Sched core didn't receive all %d pkts\n",
709 rte_event_dev_dump(evdev, stdout);
717 /******** DEQ QID 1 *******/
720 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
722 rte_pktmbuf_free(ev.mbuf);
725 if (deq_pkts != NUM_PKTS/2) {
726 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
731 /******** DEQ QID 2 *******/
735 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
737 rte_pktmbuf_free(ev.mbuf);
739 if (deq_pkts != NUM_PKTS/2) {
740 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
750 abuse_inflights(struct test *t)
752 const int rx_enq = 0;
753 const int wrk_enq = 2;
756 /* Create instance with 4 ports */
757 if (init(t, 1, 4) < 0 ||
758 create_ports(t, 4) < 0 ||
759 create_atomic_qids(t, 1) < 0) {
760 printf("%d: Error initializing device\n", __LINE__);
764 /* CQ mapping to QID */
765 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
767 printf("%d: error mapping lb qid\n", __LINE__);
772 if (rte_event_dev_start(evdev) < 0) {
773 printf("%d: Error with start call\n", __LINE__);
777 /* Enqueue op only */
778 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
780 printf("%d: Failed to enqueue\n", __LINE__);
785 rte_event_schedule(evdev);
787 struct test_event_dev_stats stats;
789 err = test_event_dev_stats_get(evdev, &stats);
791 printf("%d: failed to get stats\n", __LINE__);
795 if (stats.rx_pkts != 0 ||
796 stats.tx_pkts != 0 ||
797 stats.port_inflight[wrk_enq] != 0) {
798 printf("%d: Sched core didn't handle pkt as expected\n",
808 xstats_tests(struct test *t)
810 const int wrk_enq = 2;
813 /* Create instance with 4 ports */
814 if (init(t, 1, 4) < 0 ||
815 create_ports(t, 4) < 0 ||
816 create_atomic_qids(t, 1) < 0) {
817 printf("%d: Error initializing device\n", __LINE__);
821 /* CQ mapping to QID */
822 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
824 printf("%d: error mapping lb qid\n", __LINE__);
829 if (rte_event_dev_start(evdev) < 0) {
830 printf("%d: Error with start call\n", __LINE__);
834 const uint32_t XSTATS_MAX = 1024;
837 uint32_t ids[XSTATS_MAX];
838 uint64_t values[XSTATS_MAX];
839 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
841 for (i = 0; i < XSTATS_MAX; i++)
844 /* Device names / values */
845 int ret = rte_event_dev_xstats_names_get(evdev,
846 RTE_EVENT_DEV_XSTATS_DEVICE,
847 0, xstats_names, ids, XSTATS_MAX);
849 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
852 ret = rte_event_dev_xstats_get(evdev,
853 RTE_EVENT_DEV_XSTATS_DEVICE,
854 0, ids, values, ret);
856 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
860 /* Port names / values */
861 ret = rte_event_dev_xstats_names_get(evdev,
862 RTE_EVENT_DEV_XSTATS_PORT, 0,
863 xstats_names, ids, XSTATS_MAX);
865 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
868 ret = rte_event_dev_xstats_get(evdev,
869 RTE_EVENT_DEV_XSTATS_PORT, 0,
872 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
876 /* Queue names / values */
877 ret = rte_event_dev_xstats_names_get(evdev,
878 RTE_EVENT_DEV_XSTATS_QUEUE,
879 0, xstats_names, ids, XSTATS_MAX);
881 printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
885 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
886 ret = rte_event_dev_xstats_get(evdev,
887 RTE_EVENT_DEV_XSTATS_QUEUE,
888 1, ids, values, ret);
889 if (ret != -EINVAL) {
890 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
894 ret = rte_event_dev_xstats_get(evdev,
895 RTE_EVENT_DEV_XSTATS_QUEUE,
896 0, ids, values, ret);
898 printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
902 /* enqueue packets to check values */
903 for (i = 0; i < 3; i++) {
905 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
907 printf("%d: gen of pkt failed\n", __LINE__);
910 ev.queue_id = t->qid[i];
911 ev.op = RTE_EVENT_OP_NEW;
916 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
918 printf("%d: Failed to enqueue\n", __LINE__);
923 rte_event_schedule(evdev);
925 /* Device names / values */
926 int num_stats = rte_event_dev_xstats_names_get(evdev,
927 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
928 xstats_names, ids, XSTATS_MAX);
931 ret = rte_event_dev_xstats_get(evdev,
932 RTE_EVENT_DEV_XSTATS_DEVICE,
933 0, ids, values, num_stats);
934 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
935 for (i = 0; (signed int)i < ret; i++) {
936 if (expected[i] != values[i]) {
938 "%d Error xstat %d (id %d) %s : %"PRIu64
939 ", expect %"PRIu64"\n",
940 __LINE__, i, ids[i], xstats_names[i].name,
941 values[i], expected[i]);
946 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
949 /* ensure reset statistics are zero-ed */
950 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
951 ret = rte_event_dev_xstats_get(evdev,
952 RTE_EVENT_DEV_XSTATS_DEVICE,
953 0, ids, values, num_stats);
954 for (i = 0; (signed int)i < ret; i++) {
955 if (expected_zero[i] != values[i]) {
957 "%d Error, xstat %d (id %d) %s : %"PRIu64
958 ", expect %"PRIu64"\n",
959 __LINE__, i, ids[i], xstats_names[i].name,
960 values[i], expected_zero[i]);
965 /* port reset checks */
966 num_stats = rte_event_dev_xstats_names_get(evdev,
967 RTE_EVENT_DEV_XSTATS_PORT, 0,
968 xstats_names, ids, XSTATS_MAX);
971 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
972 0, ids, values, num_stats);
974 static const uint64_t port_expected[] = {
979 0 /* avg pkt cycles */,
981 0 /* rx ring used */,
982 4096 /* rx ring free */,
983 0 /* cq ring used */,
984 32 /* cq ring free */,
985 0 /* dequeue calls */,
986 /* 10 dequeue burst buckets */
990 if (ret != RTE_DIM(port_expected)) {
992 "%s %d: wrong number of port stats (%d), expected %zu\n",
993 __func__, __LINE__, ret, RTE_DIM(port_expected));
996 for (i = 0; (signed int)i < ret; i++) {
997 if (port_expected[i] != values[i]) {
999 "%s : %d: Error stat %s is %"PRIu64
1000 ", expected %"PRIu64"\n",
1001 __func__, __LINE__, xstats_names[i].name,
1002 values[i], port_expected[i]);
1007 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1010 /* ensure reset statistics are zero-ed */
1011 static const uint64_t port_expected_zero[] = {
1016 0 /* avg pkt cycles */,
1018 0 /* rx ring used */,
1019 4096 /* rx ring free */,
1020 0 /* cq ring used */,
1021 32 /* cq ring free */,
1022 0 /* dequeue calls */,
1023 /* 10 dequeue burst buckets */
1027 ret = rte_event_dev_xstats_get(evdev,
1028 RTE_EVENT_DEV_XSTATS_PORT,
1029 0, ids, values, num_stats);
1030 for (i = 0; (signed int)i < ret; i++) {
1031 if (port_expected_zero[i] != values[i]) {
1033 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1034 ", expect %"PRIu64"\n",
1035 __LINE__, i, ids[i], xstats_names[i].name,
1036 values[i], port_expected_zero[i]);
1041 /* QUEUE STATS TESTS */
1042 num_stats = rte_event_dev_xstats_names_get(evdev,
1043 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1044 xstats_names, ids, XSTATS_MAX);
1045 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1046 0, ids, values, num_stats);
1048 printf("xstats get returned %d\n", ret);
1051 if ((unsigned int)ret > XSTATS_MAX)
1052 printf("%s %d: more xstats available than space\n",
1053 __func__, __LINE__);
1055 static const uint64_t queue_expected[] = {
1061 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1062 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
1064 for (i = 0; (signed int)i < ret; i++) {
1065 if (queue_expected[i] != values[i]) {
1067 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1068 ", expect %"PRIu64"\n",
1069 __LINE__, i, ids[i], xstats_names[i].name,
1070 values[i], queue_expected[i]);
1075 /* Reset the queue stats here */
1076 ret = rte_event_dev_xstats_reset(evdev,
1077 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1081 /* Verify that the resetable stats are reset, and others are not */
1082 static const uint64_t queue_expected_zero[] = {
1088 0, 0, 0, 0, /* 4 iq used */
1089 0, 0, 1, 0, /* qid to port pinned flows */
1092 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1093 ids, values, num_stats);
1095 for (i = 0; (signed int)i < ret; i++) {
1096 if (queue_expected_zero[i] != values[i]) {
1098 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1099 ", expect %"PRIu64"\n",
1100 __LINE__, i, ids[i], xstats_names[i].name,
1101 values[i], queue_expected_zero[i]);
1106 printf("%d : %d of values were not as expected above\n",
1115 rte_event_dev_dump(0, stdout);
1122 xstats_id_abuse_tests(struct test *t)
1125 const uint32_t XSTATS_MAX = 1024;
1126 const uint32_t link_port = 2;
1128 uint32_t ids[XSTATS_MAX];
1129 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1131 /* Create instance with 4 ports */
1132 if (init(t, 1, 4) < 0 ||
1133 create_ports(t, 4) < 0 ||
1134 create_atomic_qids(t, 1) < 0) {
1135 printf("%d: Error initializing device\n", __LINE__);
1139 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1141 printf("%d: error mapping lb qid\n", __LINE__);
1145 if (rte_event_dev_start(evdev) < 0) {
1146 printf("%d: Error with start call\n", __LINE__);
1150 /* no test for device, as it ignores the port/q number */
1151 int num_stats = rte_event_dev_xstats_names_get(evdev,
1152 RTE_EVENT_DEV_XSTATS_PORT,
1153 UINT8_MAX-1, xstats_names, ids,
1155 if (num_stats != 0) {
1156 printf("%d: expected %d stats, got return %d\n", __LINE__,
1161 num_stats = rte_event_dev_xstats_names_get(evdev,
1162 RTE_EVENT_DEV_XSTATS_QUEUE,
1163 UINT8_MAX-1, xstats_names, ids,
1165 if (num_stats != 0) {
1166 printf("%d: expected %d stats, got return %d\n", __LINE__,
1179 port_reconfig_credits(struct test *t)
1181 if (init(t, 1, 1) < 0) {
1182 printf("%d: Error initializing device\n", __LINE__);
1187 const uint32_t NUM_ITERS = 32;
1188 for (i = 0; i < NUM_ITERS; i++) {
1189 const struct rte_event_queue_conf conf = {
1190 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1191 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1192 .nb_atomic_flows = 1024,
1193 .nb_atomic_order_sequences = 1024,
1195 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1196 printf("%d: error creating qid\n", __LINE__);
1201 static const struct rte_event_port_conf port_conf = {
1202 .new_event_threshold = 128,
1203 .dequeue_depth = 32,
1204 .enqueue_depth = 64,
1206 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1207 printf("%d Error setting up port\n", __LINE__);
1211 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1213 printf("%d: error mapping lb qid\n", __LINE__);
1217 if (rte_event_dev_start(evdev) < 0) {
1218 printf("%d: Error with start call\n", __LINE__);
1222 const uint32_t NPKTS = 1;
1224 for (j = 0; j < NPKTS; j++) {
1225 struct rte_event ev;
1226 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1228 printf("%d: gen of pkt failed\n", __LINE__);
1231 ev.queue_id = t->qid[0];
1232 ev.op = RTE_EVENT_OP_NEW;
1234 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1236 printf("%d: Failed to enqueue\n", __LINE__);
1237 rte_event_dev_dump(0, stdout);
1242 rte_event_schedule(evdev);
1244 struct rte_event ev[NPKTS];
1245 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1248 printf("%d error; no packet dequeued\n", __LINE__);
1250 /* let cleanup below stop the device on last iter */
1251 if (i != NUM_ITERS-1)
1252 rte_event_dev_stop(evdev);
1263 port_single_lb_reconfig(struct test *t)
1265 if (init(t, 2, 2) < 0) {
1266 printf("%d: Error initializing device\n", __LINE__);
1270 static const struct rte_event_queue_conf conf_lb_atomic = {
1271 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1272 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1273 .nb_atomic_flows = 1024,
1274 .nb_atomic_order_sequences = 1024,
1276 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1277 printf("%d: error creating qid\n", __LINE__);
1281 static const struct rte_event_queue_conf conf_single_link = {
1282 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1283 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1284 .nb_atomic_flows = 1024,
1285 .nb_atomic_order_sequences = 1024,
1287 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1288 printf("%d: error creating qid\n", __LINE__);
1292 struct rte_event_port_conf port_conf = {
1293 .new_event_threshold = 128,
1294 .dequeue_depth = 32,
1295 .enqueue_depth = 64,
1297 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1298 printf("%d Error setting up port\n", __LINE__);
1301 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1302 printf("%d Error setting up port\n", __LINE__);
1306 /* link port to lb queue */
1307 uint8_t queue_id = 0;
1308 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1309 printf("%d: error creating link for qid\n", __LINE__);
1313 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1315 printf("%d: Error unlinking lb port\n", __LINE__);
1320 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1321 printf("%d: error creating link for qid\n", __LINE__);
1326 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1328 printf("%d: error mapping lb qid\n", __LINE__);
1332 if (rte_event_dev_start(evdev) < 0) {
1333 printf("%d: Error with start call\n", __LINE__);
1345 xstats_brute_force(struct test *t)
1348 const uint32_t XSTATS_MAX = 1024;
1349 uint32_t ids[XSTATS_MAX];
1350 uint64_t values[XSTATS_MAX];
1351 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1354 /* Create instance with 4 ports */
1355 if (init(t, 1, 4) < 0 ||
1356 create_ports(t, 4) < 0 ||
1357 create_atomic_qids(t, 1) < 0) {
1358 printf("%d: Error initializing device\n", __LINE__);
1362 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1364 printf("%d: error mapping lb qid\n", __LINE__);
1368 if (rte_event_dev_start(evdev) < 0) {
1369 printf("%d: Error with start call\n", __LINE__);
1373 for (i = 0; i < XSTATS_MAX; i++)
1376 for (i = 0; i < 3; i++) {
1377 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1379 for (j = 0; j < UINT8_MAX; j++) {
1380 rte_event_dev_xstats_names_get(evdev, mode,
1381 j, xstats_names, ids, XSTATS_MAX);
1383 rte_event_dev_xstats_get(evdev, mode, j, ids,
1384 values, XSTATS_MAX);
1396 xstats_id_reset_tests(struct test *t)
1398 const int wrk_enq = 2;
1401 /* Create instance with 4 ports */
1402 if (init(t, 1, 4) < 0 ||
1403 create_ports(t, 4) < 0 ||
1404 create_atomic_qids(t, 1) < 0) {
1405 printf("%d: Error initializing device\n", __LINE__);
1409 /* CQ mapping to QID */
1410 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1412 printf("%d: error mapping lb qid\n", __LINE__);
1416 if (rte_event_dev_start(evdev) < 0) {
1417 printf("%d: Error with start call\n", __LINE__);
1421 #define XSTATS_MAX 1024
1424 uint32_t ids[XSTATS_MAX];
1425 uint64_t values[XSTATS_MAX];
1426 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1428 for (i = 0; i < XSTATS_MAX; i++)
1431 #define NUM_DEV_STATS 6
1432 /* Device names / values */
1433 int num_stats = rte_event_dev_xstats_names_get(evdev,
1434 RTE_EVENT_DEV_XSTATS_DEVICE,
1435 0, xstats_names, ids, XSTATS_MAX);
1436 if (num_stats != NUM_DEV_STATS) {
1437 printf("%d: expected %d stats, got return %d\n", __LINE__,
1438 NUM_DEV_STATS, num_stats);
1441 ret = rte_event_dev_xstats_get(evdev,
1442 RTE_EVENT_DEV_XSTATS_DEVICE,
1443 0, ids, values, num_stats);
1444 if (ret != NUM_DEV_STATS) {
1445 printf("%d: expected %d stats, got return %d\n", __LINE__,
1446 NUM_DEV_STATS, ret);
1451 for (i = 0; i < NPKTS; i++) {
1452 struct rte_event ev;
1453 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1455 printf("%d: gen of pkt failed\n", __LINE__);
1458 ev.queue_id = t->qid[i];
1459 ev.op = RTE_EVENT_OP_NEW;
1463 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1465 printf("%d: Failed to enqueue\n", __LINE__);
1470 rte_event_schedule(evdev);
1472 static const char * const dev_names[] = {
1473 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1474 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1476 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1477 for (i = 0; (int)i < ret; i++) {
1479 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1483 printf("%d: %s id incorrect, expected %d got %d\n",
1484 __LINE__, dev_names[i], i, id);
1487 if (val != dev_expected[i]) {
1488 printf("%d: %s value incorrect, expected %"
1489 PRIu64" got %d\n", __LINE__, dev_names[i],
1490 dev_expected[i], id);
1494 int reset_ret = rte_event_dev_xstats_reset(evdev,
1495 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1499 printf("%d: failed to reset successfully\n", __LINE__);
1502 dev_expected[i] = 0;
1503 /* check value again */
1504 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1505 if (val != dev_expected[i]) {
1506 printf("%d: %s value incorrect, expected %"PRIu64
1507 " got %"PRIu64"\n", __LINE__, dev_names[i],
1508 dev_expected[i], val);
1513 /* 48 is stat offset from start of the devices whole xstats.
1514 * This WILL break every time we add a statistic to a port
1515 * or the device, but there is no other way to test
1518 /* num stats for the tested port. CQ size adds more stats to a port */
1519 #define NUM_PORT_STATS 21
1520 /* the port to test. */
1522 num_stats = rte_event_dev_xstats_names_get(evdev,
1523 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1524 xstats_names, ids, XSTATS_MAX);
1525 if (num_stats != NUM_PORT_STATS) {
1526 printf("%d: expected %d stats, got return %d\n",
1527 __LINE__, NUM_PORT_STATS, num_stats);
1530 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1531 ids, values, num_stats);
1533 if (ret != NUM_PORT_STATS) {
1534 printf("%d: expected %d stats, got return %d\n",
1535 __LINE__, NUM_PORT_STATS, ret);
1538 static const char * const port_names[] = {
1543 "port_2_avg_pkt_cycles",
1545 "port_2_rx_ring_used",
1546 "port_2_rx_ring_free",
1547 "port_2_cq_ring_used",
1548 "port_2_cq_ring_free",
1549 "port_2_dequeue_calls",
1550 "port_2_dequeues_returning_0",
1551 "port_2_dequeues_returning_1-4",
1552 "port_2_dequeues_returning_5-8",
1553 "port_2_dequeues_returning_9-12",
1554 "port_2_dequeues_returning_13-16",
1555 "port_2_dequeues_returning_17-20",
1556 "port_2_dequeues_returning_21-24",
1557 "port_2_dequeues_returning_25-28",
1558 "port_2_dequeues_returning_29-32",
1559 "port_2_dequeues_returning_33-36",
1561 uint64_t port_expected[] = {
1565 NPKTS, /* inflight */
1566 0, /* avg pkt cycles */
1568 0, /* rx ring used */
1569 4096, /* rx ring free */
1570 NPKTS, /* cq ring used */
1571 25, /* cq ring free */
1572 0, /* dequeue zero calls */
1573 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1576 uint64_t port_expected_zero[] = {
1580 NPKTS, /* inflight */
1581 0, /* avg pkt cycles */
1583 0, /* rx ring used */
1584 4096, /* rx ring free */
1585 NPKTS, /* cq ring used */
1586 25, /* cq ring free */
1587 0, /* dequeue zero calls */
1588 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1591 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1592 RTE_DIM(port_names) != NUM_PORT_STATS) {
1593 printf("%d: port array of wrong size\n", __LINE__);
1598 for (i = 0; (int)i < ret; i++) {
1600 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1603 if (id != i + PORT_OFF) {
1604 printf("%d: %s id incorrect, expected %d got %d\n",
1605 __LINE__, port_names[i], i+PORT_OFF,
1609 if (val != port_expected[i]) {
1610 printf("%d: %s value incorrect, expected %"PRIu64
1611 " got %d\n", __LINE__, port_names[i],
1612 port_expected[i], id);
1616 int reset_ret = rte_event_dev_xstats_reset(evdev,
1617 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1621 printf("%d: failed to reset successfully\n", __LINE__);
1624 /* check value again */
1625 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1626 if (val != port_expected_zero[i]) {
1627 printf("%d: %s value incorrect, expected %"PRIu64
1628 " got %"PRIu64"\n", __LINE__, port_names[i],
1629 port_expected_zero[i], val);
1636 /* num queue stats */
1637 #define NUM_Q_STATS 13
1638 /* queue offset from start of the devices whole xstats.
1639 * This will break every time we add a statistic to a device/port/queue
1641 #define QUEUE_OFF 90
1642 const uint32_t queue = 0;
1643 num_stats = rte_event_dev_xstats_names_get(evdev,
1644 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1645 xstats_names, ids, XSTATS_MAX);
1646 if (num_stats != NUM_Q_STATS) {
1647 printf("%d: expected %d stats, got return %d\n",
1648 __LINE__, NUM_Q_STATS, num_stats);
1651 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1652 queue, ids, values, num_stats);
1653 if (ret != NUM_Q_STATS) {
1654 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1657 static const char * const queue_names[] = {
1667 "qid_0_port_0_pinned_flows",
1668 "qid_0_port_1_pinned_flows",
1669 "qid_0_port_2_pinned_flows",
1670 "qid_0_port_3_pinned_flows",
1672 uint64_t queue_expected[] = {
1682 0, /* qid 0 port 0 pinned flows */
1683 0, /* qid 0 port 1 pinned flows */
1684 1, /* qid 0 port 2 pinned flows */
1685 0, /* qid 0 port 4 pinned flows */
1687 uint64_t queue_expected_zero[] = {
1697 0, /* qid 0 port 0 pinned flows */
1698 0, /* qid 0 port 1 pinned flows */
1699 1, /* qid 0 port 2 pinned flows */
1700 0, /* qid 0 port 4 pinned flows */
1702 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1703 RTE_DIM(queue_names) != NUM_Q_STATS) {
1704 printf("%d : queue array of wrong size\n", __LINE__);
1709 for (i = 0; (int)i < ret; i++) {
1711 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1714 if (id != i + QUEUE_OFF) {
1715 printf("%d: %s id incorrect, expected %d got %d\n",
1716 __LINE__, queue_names[i], i+QUEUE_OFF,
1720 if (val != queue_expected[i]) {
1721 printf("%d: %s value incorrect, expected %"PRIu64
1722 " got %d\n", __LINE__, queue_names[i],
1723 queue_expected[i], id);
1727 int reset_ret = rte_event_dev_xstats_reset(evdev,
1728 RTE_EVENT_DEV_XSTATS_QUEUE,
1731 printf("%d: failed to reset successfully\n", __LINE__);
1734 /* check value again */
1735 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1737 if (val != queue_expected_zero[i]) {
1738 printf("%d: %s value incorrect, expected %"PRIu64
1739 " got %"PRIu64"\n", __LINE__, queue_names[i],
1740 queue_expected_zero[i], val);
1756 ordered_reconfigure(struct test *t)
1758 if (init(t, 1, 1) < 0 ||
1759 create_ports(t, 1) < 0) {
1760 printf("%d: Error initializing device\n", __LINE__);
1764 const struct rte_event_queue_conf conf = {
1765 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
1766 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1767 .nb_atomic_flows = 1024,
1768 .nb_atomic_order_sequences = 1024,
1771 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1772 printf("%d: error creating qid\n", __LINE__);
1776 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1777 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1781 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1782 if (rte_event_dev_start(evdev) < 0) {
1783 printf("%d: Error with start call\n", __LINE__);
1795 qid_priorities(struct test *t)
1797 /* Test works by having a CQ with enough empty space for all packets,
1798 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1799 * priority of the QID, not the ingress order, to pass the test
1802 /* Create instance with 1 ports, and 3 qids */
1803 if (init(t, 3, 1) < 0 ||
1804 create_ports(t, 1) < 0) {
1805 printf("%d: Error initializing device\n", __LINE__);
1809 for (i = 0; i < 3; i++) {
1811 const struct rte_event_queue_conf conf = {
1812 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1813 /* increase priority (0 == highest), as we go */
1814 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1815 .nb_atomic_flows = 1024,
1816 .nb_atomic_order_sequences = 1024,
1819 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1820 printf("%d: error creating qid %d\n", __LINE__, i);
1826 /* map all QIDs to port */
1827 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1829 if (rte_event_dev_start(evdev) < 0) {
1830 printf("%d: Error with start call\n", __LINE__);
1834 /* enqueue 3 packets, setting seqn and QID to check priority */
1835 for (i = 0; i < 3; i++) {
1836 struct rte_event ev;
1837 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1839 printf("%d: gen of pkt failed\n", __LINE__);
1842 ev.queue_id = t->qid[i];
1843 ev.op = RTE_EVENT_OP_NEW;
1847 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1849 printf("%d: Failed to enqueue\n", __LINE__);
1854 rte_event_schedule(evdev);
1856 /* dequeue packets, verify priority was upheld */
1857 struct rte_event ev[32];
1859 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1860 if (deq_pkts != 3) {
1861 printf("%d: failed to deq packets\n", __LINE__);
1862 rte_event_dev_dump(evdev, stdout);
1865 for (i = 0; i < 3; i++) {
1866 if (ev[i].mbuf->seqn != 2-i) {
1868 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1878 load_balancing(struct test *t)
1880 const int rx_enq = 0;
1884 if (init(t, 1, 4) < 0 ||
1885 create_ports(t, 4) < 0 ||
1886 create_atomic_qids(t, 1) < 0) {
1887 printf("%d: Error initializing device\n", __LINE__);
1891 for (i = 0; i < 3; i++) {
1892 /* map port 1 - 3 inclusive */
1893 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1895 printf("%d: error mapping qid to port %d\n",
1901 if (rte_event_dev_start(evdev) < 0) {
1902 printf("%d: Error with start call\n", __LINE__);
1906 /************** FORWARD ****************/
1908 * Create a set of flows that test the load-balancing operation of the
1909 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1910 * with a new flow, which should be sent to the 3rd mapped CQ
1912 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1914 for (i = 0; i < RTE_DIM(flows); i++) {
1915 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1917 printf("%d: gen of pkt failed\n", __LINE__);
1921 struct rte_event ev = {
1922 .op = RTE_EVENT_OP_NEW,
1923 .queue_id = t->qid[0],
1924 .flow_id = flows[i],
1927 /* generate pkt and enqueue */
1928 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1930 printf("%d: Failed to enqueue\n", __LINE__);
1935 rte_event_schedule(evdev);
1937 struct test_event_dev_stats stats;
1938 err = test_event_dev_stats_get(evdev, &stats);
1940 printf("%d: failed to get stats\n", __LINE__);
1944 if (stats.port_inflight[1] != 4) {
1945 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1949 if (stats.port_inflight[2] != 2) {
1950 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1954 if (stats.port_inflight[3] != 3) {
1955 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1965 load_balancing_history(struct test *t)
1967 struct test_event_dev_stats stats = {0};
1968 const int rx_enq = 0;
1972 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
1973 if (init(t, 1, 4) < 0 ||
1974 create_ports(t, 4) < 0 ||
1975 create_atomic_qids(t, 1) < 0)
1978 /* CQ mapping to QID */
1979 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
1980 printf("%d: error mapping port 1 qid\n", __LINE__);
1983 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
1984 printf("%d: error mapping port 2 qid\n", __LINE__);
1987 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
1988 printf("%d: error mapping port 3 qid\n", __LINE__);
1991 if (rte_event_dev_start(evdev) < 0) {
1992 printf("%d: Error with start call\n", __LINE__);
1997 * Create a set of flows that test the load-balancing operation of the
1998 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
1999 * the packet from CQ 0, send in a new set of flows. Ensure that:
2000 * 1. The new flow 3 gets into the empty CQ0
2001 * 2. packets for existing flow gets added into CQ1
2002 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2003 * more outstanding pkts
2005 * This test makes sure that when a flow ends (i.e. all packets
2006 * have been completed for that flow), that the flow can be moved
2007 * to a different CQ when new packets come in for that flow.
2009 static uint32_t flows1[] = {0, 1, 1, 2};
2011 for (i = 0; i < RTE_DIM(flows1); i++) {
2012 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2013 struct rte_event ev = {
2014 .flow_id = flows1[i],
2015 .op = RTE_EVENT_OP_NEW,
2016 .queue_id = t->qid[0],
2017 .event_type = RTE_EVENT_TYPE_CPU,
2018 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2023 printf("%d: gen of pkt failed\n", __LINE__);
2026 arp->hash.rss = flows1[i];
2027 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2029 printf("%d: Failed to enqueue\n", __LINE__);
2034 /* call the scheduler */
2035 rte_event_schedule(evdev);
2037 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2038 struct rte_event ev;
2039 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2040 printf("%d: failed to dequeue\n", __LINE__);
2043 if (ev.mbuf->hash.rss != flows1[0]) {
2044 printf("%d: unexpected flow received\n", __LINE__);
2048 /* drop the flow 0 packet from port 1 */
2049 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2051 /* call the scheduler */
2052 rte_event_schedule(evdev);
2055 * Set up the next set of flows, first a new flow to fill up
2056 * CQ 0, so that the next flow 0 packet should go to CQ2
2058 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2060 for (i = 0; i < RTE_DIM(flows2); i++) {
2061 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2062 struct rte_event ev = {
2063 .flow_id = flows2[i],
2064 .op = RTE_EVENT_OP_NEW,
2065 .queue_id = t->qid[0],
2066 .event_type = RTE_EVENT_TYPE_CPU,
2067 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2072 printf("%d: gen of pkt failed\n", __LINE__);
2075 arp->hash.rss = flows2[i];
2077 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2079 printf("%d: Failed to enqueue\n", __LINE__);
2085 rte_event_schedule(evdev);
2087 err = test_event_dev_stats_get(evdev, &stats);
2089 printf("%d:failed to get stats\n", __LINE__);
2094 * Now check the resulting inflights on each port.
2096 if (stats.port_inflight[1] != 3) {
2097 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2099 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2100 (unsigned int)stats.port_inflight[1],
2101 (unsigned int)stats.port_inflight[2],
2102 (unsigned int)stats.port_inflight[3]);
2105 if (stats.port_inflight[2] != 4) {
2106 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2108 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2109 (unsigned int)stats.port_inflight[1],
2110 (unsigned int)stats.port_inflight[2],
2111 (unsigned int)stats.port_inflight[3]);
2114 if (stats.port_inflight[3] != 2) {
2115 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2117 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2118 (unsigned int)stats.port_inflight[1],
2119 (unsigned int)stats.port_inflight[2],
2120 (unsigned int)stats.port_inflight[3]);
2124 for (i = 1; i <= 3; i++) {
2125 struct rte_event ev;
2126 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2127 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2129 rte_event_schedule(evdev);
2136 invalid_qid(struct test *t)
2138 struct test_event_dev_stats stats;
2139 const int rx_enq = 0;
2143 if (init(t, 1, 4) < 0 ||
2144 create_ports(t, 4) < 0 ||
2145 create_atomic_qids(t, 1) < 0) {
2146 printf("%d: Error initializing device\n", __LINE__);
2150 /* CQ mapping to QID */
2151 for (i = 0; i < 4; i++) {
2152 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2155 printf("%d: error mapping port 1 qid\n", __LINE__);
2160 if (rte_event_dev_start(evdev) < 0) {
2161 printf("%d: Error with start call\n", __LINE__);
2166 * Send in a packet with an invalid qid to the scheduler.
2167 * We should see the packed enqueued OK, but the inflights for
2168 * that packet should not be incremented, and the rx_dropped
2169 * should be incremented.
2171 static uint32_t flows1[] = {20};
2173 for (i = 0; i < RTE_DIM(flows1); i++) {
2174 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2176 printf("%d: gen of pkt failed\n", __LINE__);
2180 struct rte_event ev = {
2181 .op = RTE_EVENT_OP_NEW,
2182 .queue_id = t->qid[0] + flows1[i],
2186 /* generate pkt and enqueue */
2187 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2189 printf("%d: Failed to enqueue\n", __LINE__);
2194 /* call the scheduler */
2195 rte_event_schedule(evdev);
2197 err = test_event_dev_stats_get(evdev, &stats);
2199 printf("%d: failed to get stats\n", __LINE__);
2204 * Now check the resulting inflights on the port, and the rx_dropped.
2206 if (stats.port_inflight[0] != 0) {
2207 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2209 rte_event_dev_dump(evdev, stdout);
2212 if (stats.port_rx_dropped[0] != 1) {
2213 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2214 rte_event_dev_dump(evdev, stdout);
2217 /* each packet drop should only be counted in one place - port or dev */
2218 if (stats.rx_dropped != 0) {
2219 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2221 rte_event_dev_dump(evdev, stdout);
2230 single_packet(struct test *t)
2232 const uint32_t MAGIC_SEQN = 7321;
2233 struct rte_event ev;
2234 struct test_event_dev_stats stats;
2235 const int rx_enq = 0;
2236 const int wrk_enq = 2;
2239 /* Create instance with 4 ports */
2240 if (init(t, 1, 4) < 0 ||
2241 create_ports(t, 4) < 0 ||
2242 create_atomic_qids(t, 1) < 0) {
2243 printf("%d: Error initializing device\n", __LINE__);
2247 /* CQ mapping to QID */
2248 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2250 printf("%d: error mapping lb qid\n", __LINE__);
2255 if (rte_event_dev_start(evdev) < 0) {
2256 printf("%d: Error with start call\n", __LINE__);
2260 /************** Gen pkt and enqueue ****************/
2261 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2263 printf("%d: gen of pkt failed\n", __LINE__);
2267 ev.op = RTE_EVENT_OP_NEW;
2268 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2272 arp->seqn = MAGIC_SEQN;
2274 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2276 printf("%d: Failed to enqueue\n", __LINE__);
2280 rte_event_schedule(evdev);
2282 err = test_event_dev_stats_get(evdev, &stats);
2284 printf("%d: failed to get stats\n", __LINE__);
2288 if (stats.rx_pkts != 1 ||
2289 stats.tx_pkts != 1 ||
2290 stats.port_inflight[wrk_enq] != 1) {
2291 printf("%d: Sched core didn't handle pkt as expected\n",
2293 rte_event_dev_dump(evdev, stdout);
2299 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2301 printf("%d: Failed to deq\n", __LINE__);
2305 err = test_event_dev_stats_get(evdev, &stats);
2307 printf("%d: failed to get stats\n", __LINE__);
2311 err = test_event_dev_stats_get(evdev, &stats);
2312 if (ev.mbuf->seqn != MAGIC_SEQN) {
2313 printf("%d: magic sequence number not dequeued\n", __LINE__);
2317 rte_pktmbuf_free(ev.mbuf);
2318 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2320 printf("%d: Failed to enqueue\n", __LINE__);
2323 rte_event_schedule(evdev);
2325 err = test_event_dev_stats_get(evdev, &stats);
2326 if (stats.port_inflight[wrk_enq] != 0) {
2327 printf("%d: port inflight not correct\n", __LINE__);
2336 inflight_counts(struct test *t)
2338 struct rte_event ev;
2339 struct test_event_dev_stats stats;
2340 const int rx_enq = 0;
2346 /* Create instance with 4 ports */
2347 if (init(t, 2, 3) < 0 ||
2348 create_ports(t, 3) < 0 ||
2349 create_atomic_qids(t, 2) < 0) {
2350 printf("%d: Error initializing device\n", __LINE__);
2354 /* CQ mapping to QID */
2355 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2357 printf("%d: error mapping lb qid\n", __LINE__);
2361 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2363 printf("%d: error mapping lb qid\n", __LINE__);
2368 if (rte_event_dev_start(evdev) < 0) {
2369 printf("%d: Error with start call\n", __LINE__);
2373 /************** FORWARD ****************/
2375 for (i = 0; i < QID1_NUM; i++) {
2376 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2379 printf("%d: gen of pkt failed\n", __LINE__);
2383 ev.queue_id = t->qid[0];
2384 ev.op = RTE_EVENT_OP_NEW;
2386 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2388 printf("%d: Failed to enqueue\n", __LINE__);
2393 for (i = 0; i < QID2_NUM; i++) {
2394 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2397 printf("%d: gen of pkt failed\n", __LINE__);
2400 ev.queue_id = t->qid[1];
2401 ev.op = RTE_EVENT_OP_NEW;
2403 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2405 printf("%d: Failed to enqueue\n", __LINE__);
2411 rte_event_schedule(evdev);
2413 err = test_event_dev_stats_get(evdev, &stats);
2415 printf("%d: failed to get stats\n", __LINE__);
2419 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2420 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2421 printf("%d: Sched core didn't handle pkt as expected\n",
2426 if (stats.port_inflight[p1] != QID1_NUM) {
2427 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2431 if (stats.port_inflight[p2] != QID2_NUM) {
2432 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2437 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2439 struct rte_event events[QID1_NUM + QID2_NUM];
2440 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2441 RTE_DIM(events), 0);
2443 if (deq_pkts != QID1_NUM) {
2444 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2447 err = test_event_dev_stats_get(evdev, &stats);
2448 if (stats.port_inflight[p1] != QID1_NUM) {
2449 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2453 for (i = 0; i < QID1_NUM; i++) {
2454 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2457 printf("%d: %s rte enqueue of inf release failed\n",
2458 __LINE__, __func__);
2464 * As the scheduler core decrements inflights, it needs to run to
2465 * process packets to act on the drop messages
2467 rte_event_schedule(evdev);
2469 err = test_event_dev_stats_get(evdev, &stats);
2470 if (stats.port_inflight[p1] != 0) {
2471 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2476 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2477 RTE_DIM(events), 0);
2478 if (deq_pkts != QID2_NUM) {
2479 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2482 err = test_event_dev_stats_get(evdev, &stats);
2483 if (stats.port_inflight[p2] != QID2_NUM) {
2484 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2488 for (i = 0; i < QID2_NUM; i++) {
2489 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2492 printf("%d: %s rte enqueue of inf release failed\n",
2493 __LINE__, __func__);
2499 * As the scheduler core decrements inflights, it needs to run to
2500 * process packets to act on the drop messages
2502 rte_event_schedule(evdev);
2504 err = test_event_dev_stats_get(evdev, &stats);
2505 if (stats.port_inflight[p2] != 0) {
2506 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2513 rte_event_dev_dump(evdev, stdout);
2519 parallel_basic(struct test *t, int check_order)
2521 const uint8_t rx_port = 0;
2522 const uint8_t w1_port = 1;
2523 const uint8_t w3_port = 3;
2524 const uint8_t tx_port = 4;
2527 uint32_t deq_pkts, j;
2528 struct rte_mbuf *mbufs[3];
2529 struct rte_mbuf *mbufs_out[3] = { 0 };
2530 const uint32_t MAGIC_SEQN = 1234;
2532 /* Create instance with 4 ports */
2533 if (init(t, 2, tx_port + 1) < 0 ||
2534 create_ports(t, tx_port + 1) < 0 ||
2535 (check_order ? create_ordered_qids(t, 1) :
2536 create_unordered_qids(t, 1)) < 0 ||
2537 create_directed_qids(t, 1, &tx_port)) {
2538 printf("%d: Error initializing device\n", __LINE__);
2544 * We need three ports, all mapped to the same ordered qid0. Then we'll
2545 * take a packet out to each port, re-enqueue in reverse order,
2546 * then make sure the reordering has taken place properly when we
2547 * dequeue from the tx_port.
2549 * Simplified test setup diagram:
2553 * qid0 - w2_port - qid1
2557 /* CQ mapping to QID for LB ports (directed mapped on create) */
2558 for (i = w1_port; i <= w3_port; i++) {
2559 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2562 printf("%d: error mapping lb qid\n", __LINE__);
2568 if (rte_event_dev_start(evdev) < 0) {
2569 printf("%d: Error with start call\n", __LINE__);
2573 /* Enqueue 3 packets to the rx port */
2574 for (i = 0; i < 3; i++) {
2575 struct rte_event ev;
2576 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2578 printf("%d: gen of pkt failed\n", __LINE__);
2582 ev.queue_id = t->qid[0];
2583 ev.op = RTE_EVENT_OP_NEW;
2585 mbufs[i]->seqn = MAGIC_SEQN + i;
2587 /* generate pkt and enqueue */
2588 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2590 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2596 rte_event_schedule(evdev);
2598 /* use extra slot to make logic in loops easier */
2599 struct rte_event deq_ev[w3_port + 1];
2601 /* Dequeue the 3 packets, one from each worker port */
2602 for (i = w1_port; i <= w3_port; i++) {
2603 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2605 if (deq_pkts != 1) {
2606 printf("%d: Failed to deq\n", __LINE__);
2607 rte_event_dev_dump(evdev, stdout);
2612 /* Enqueue each packet in reverse order, flushing after each one */
2613 for (i = w3_port; i >= w1_port; i--) {
2615 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2616 deq_ev[i].queue_id = t->qid[1];
2617 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2619 printf("%d: Failed to enqueue\n", __LINE__);
2623 rte_event_schedule(evdev);
2625 /* dequeue from the tx ports, we should get 3 packets */
2626 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2629 /* Check to see if we've got all 3 packets */
2630 if (deq_pkts != 3) {
2631 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2632 __LINE__, deq_pkts, tx_port);
2633 rte_event_dev_dump(evdev, stdout);
2637 /* Check to see if the sequence numbers are in expected order */
2639 for (j = 0 ; j < deq_pkts ; j++) {
2640 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2642 "%d: Incorrect sequence number(%d) from port %d\n",
2643 __LINE__, mbufs_out[j]->seqn, tx_port);
2649 /* Destroy the instance */
2655 ordered_basic(struct test *t)
2657 return parallel_basic(t, 1);
2661 unordered_basic(struct test *t)
2663 return parallel_basic(t, 0);
2667 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2669 const struct rte_event new_ev = {
2670 .op = RTE_EVENT_OP_NEW
2671 /* all other fields zero */
2673 struct rte_event ev = new_ev;
2674 unsigned int rx_port = 0; /* port we get the first flow on */
2675 char rx_port_used_stat[64];
2676 char rx_port_free_stat[64];
2677 char other_port_used_stat[64];
2679 if (init(t, 1, 2) < 0 ||
2680 create_ports(t, 2) < 0 ||
2681 create_atomic_qids(t, 1) < 0) {
2682 printf("%d: Error initializing device\n", __LINE__);
2685 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2686 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2688 printf("%d: Error links queue to ports\n", __LINE__);
2691 if (rte_event_dev_start(evdev) < 0) {
2692 printf("%d: Error with start call\n", __LINE__);
2696 /* send one packet and see where it goes, port 0 or 1 */
2697 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2698 printf("%d: Error doing first enqueue\n", __LINE__);
2701 rte_event_schedule(evdev);
2703 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2707 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2708 "port_%u_cq_ring_used", rx_port);
2709 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2710 "port_%u_cq_ring_free", rx_port);
2711 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2712 "port_%u_cq_ring_used", rx_port ^ 1);
2713 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2715 printf("%d: Error, first event not scheduled\n", __LINE__);
2719 /* now fill up the rx port's queue with one flow to cause HOLB */
2722 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2723 printf("%d: Error with enqueue\n", __LINE__);
2726 rte_event_schedule(evdev);
2727 } while (rte_event_dev_xstats_by_name_get(evdev,
2728 rx_port_free_stat, NULL) != 0);
2730 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2732 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2733 printf("%d: Error with enqueue\n", __LINE__);
2736 rte_event_schedule(evdev);
2738 /* check that the other port still has an empty CQ */
2739 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2741 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2744 /* check IQ now has one packet */
2745 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2747 printf("%d: Error, QID does not have exactly 1 packet\n",
2752 /* send another flow, which should pass the other IQ entry */
2755 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2756 printf("%d: Error with enqueue\n", __LINE__);
2759 rte_event_schedule(evdev);
2761 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2763 printf("%d: Error, second flow did not pass out first\n",
2768 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2770 printf("%d: Error, QID does not have exactly 1 packet\n",
2777 rte_event_dev_dump(evdev, stdout);
2783 worker_loopback_worker_fn(void *arg)
2785 struct test *t = arg;
2786 uint8_t port = t->port[1];
2791 * Takes packets from the input port and then loops them back through
2792 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2793 * so each packet goes through 8*16 = 128 times.
2795 printf("%d: \tWorker function started\n", __LINE__);
2796 while (count < NUM_PACKETS) {
2797 #define BURST_SIZE 32
2798 struct rte_event ev[BURST_SIZE];
2799 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2806 for (i = 0; i < nb_rx; i++) {
2808 if (ev[i].queue_id != 8) {
2809 ev[i].op = RTE_EVENT_OP_FORWARD;
2810 enqd = rte_event_enqueue_burst(evdev, port,
2813 printf("%d: Can't enqueue FWD!!\n",
2821 ev[i].mbuf->udata64++;
2822 if (ev[i].mbuf->udata64 != 16) {
2823 ev[i].op = RTE_EVENT_OP_FORWARD;
2824 enqd = rte_event_enqueue_burst(evdev, port,
2827 printf("%d: Can't enqueue FWD!!\n",
2833 /* we have hit 16 iterations through system - drop */
2834 rte_pktmbuf_free(ev[i].mbuf);
2836 ev[i].op = RTE_EVENT_OP_RELEASE;
2837 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2839 printf("%d drop enqueue failed\n", __LINE__);
2849 worker_loopback_producer_fn(void *arg)
2851 struct test *t = arg;
2852 uint8_t port = t->port[0];
2855 printf("%d: \tProducer function started\n", __LINE__);
2856 while (count < NUM_PACKETS) {
2857 struct rte_mbuf *m = 0;
2859 m = rte_pktmbuf_alloc(t->mbuf_pool);
2860 } while (m == NULL);
2864 struct rte_event ev = {
2865 .op = RTE_EVENT_OP_NEW,
2866 .queue_id = t->qid[0],
2867 .flow_id = (uintptr_t)m & 0xFFFF,
2871 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2872 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2884 worker_loopback(struct test *t)
2886 /* use a single producer core, and a worker core to see what happens
2887 * if the worker loops packets back multiple times
2889 struct test_event_dev_stats stats;
2890 uint64_t print_cycles = 0, cycles = 0;
2891 uint64_t tx_pkts = 0;
2893 int w_lcore, p_lcore;
2895 if (init(t, 8, 2) < 0 ||
2896 create_atomic_qids(t, 8) < 0) {
2897 printf("%d: Error initializing device\n", __LINE__);
2901 /* RX with low max events */
2902 static struct rte_event_port_conf conf = {
2903 .dequeue_depth = 32,
2904 .enqueue_depth = 64,
2906 /* beware: this cannot be initialized in the static above as it would
2907 * only be initialized once - and this needs to be set for multiple runs
2909 conf.new_event_threshold = 512;
2911 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2912 printf("Error setting up RX port\n");
2916 /* TX with higher max events */
2917 conf.new_event_threshold = 4096;
2918 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2919 printf("Error setting up TX port\n");
2924 /* CQ mapping to QID */
2925 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2926 if (err != 8) { /* should have mapped all queues*/
2927 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2931 if (rte_event_dev_start(evdev) < 0) {
2932 printf("%d: Error with start call\n", __LINE__);
2936 p_lcore = rte_get_next_lcore(
2937 /* start core */ -1,
2938 /* skip master */ 1,
2940 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2942 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
2943 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
2945 print_cycles = cycles = rte_get_timer_cycles();
2946 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
2947 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
2949 rte_event_schedule(evdev);
2951 uint64_t new_cycles = rte_get_timer_cycles();
2953 if (new_cycles - print_cycles > rte_get_timer_hz()) {
2954 test_event_dev_stats_get(evdev, &stats);
2956 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
2957 __LINE__, stats.rx_pkts, stats.tx_pkts);
2959 print_cycles = new_cycles;
2961 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
2962 test_event_dev_stats_get(evdev, &stats);
2963 if (stats.tx_pkts == tx_pkts) {
2964 rte_event_dev_dump(evdev, stdout);
2965 printf("Dumping xstats:\n");
2968 "%d: No schedules for seconds, deadlock\n",
2972 tx_pkts = stats.tx_pkts;
2973 cycles = new_cycles;
2976 rte_event_schedule(evdev); /* ensure all completions are flushed */
2978 rte_eal_mp_wait_lcore();
2984 static struct rte_mempool *eventdev_func_mempool;
2987 test_sw_eventdev(void)
2989 struct test *t = malloc(sizeof(struct test));
2992 /* manually initialize the op, older gcc's complain on static
2993 * initialization of struct elements that are a bitfield.
2995 release_ev.op = RTE_EVENT_OP_RELEASE;
2997 const char *eventdev_name = "event_sw0";
2998 evdev = rte_event_dev_get_dev_id(eventdev_name);
3000 printf("%d: Eventdev %s not found - creating.\n",
3001 __LINE__, eventdev_name);
3002 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3003 printf("Error creating eventdev\n");
3006 evdev = rte_event_dev_get_dev_id(eventdev_name);
3008 printf("Error finding newly created eventdev\n");
3013 /* Only create mbuf pool once, reuse for each test run */
3014 if (!eventdev_func_mempool) {
3015 eventdev_func_mempool = rte_pktmbuf_pool_create(
3016 "EVENTDEV_SW_SA_MBUF_POOL",
3017 (1<<12), /* 4k buffers */
3018 32 /*MBUF_CACHE_SIZE*/,
3020 512, /* use very small mbufs */
3022 if (!eventdev_func_mempool) {
3023 printf("ERROR creating mempool\n");
3027 t->mbuf_pool = eventdev_func_mempool;
3029 printf("*** Running Single Directed Packet test...\n");
3030 ret = test_single_directed_packet(t);
3032 printf("ERROR - Single Directed Packet test FAILED.\n");
3035 printf("*** Running Single Load Balanced Packet test...\n");
3036 ret = single_packet(t);
3038 printf("ERROR - Single Packet test FAILED.\n");
3041 printf("*** Running Unordered Basic test...\n");
3042 ret = unordered_basic(t);
3044 printf("ERROR - Unordered Basic test FAILED.\n");
3047 printf("*** Running Ordered Basic test...\n");
3048 ret = ordered_basic(t);
3050 printf("ERROR - Ordered Basic test FAILED.\n");
3053 printf("*** Running Burst Packets test...\n");
3054 ret = burst_packets(t);
3056 printf("ERROR - Burst Packets test FAILED.\n");
3059 printf("*** Running Load Balancing test...\n");
3060 ret = load_balancing(t);
3062 printf("ERROR - Load Balancing test FAILED.\n");
3065 printf("*** Running Prioritized Directed test...\n");
3066 ret = test_priority_directed(t);
3068 printf("ERROR - Prioritized Directed test FAILED.\n");
3071 printf("*** Running Prioritized Atomic test...\n");
3072 ret = test_priority_atomic(t);
3074 printf("ERROR - Prioritized Atomic test FAILED.\n");
3078 printf("*** Running Prioritized Ordered test...\n");
3079 ret = test_priority_ordered(t);
3081 printf("ERROR - Prioritized Ordered test FAILED.\n");
3084 printf("*** Running Prioritized Unordered test...\n");
3085 ret = test_priority_unordered(t);
3087 printf("ERROR - Prioritized Unordered test FAILED.\n");
3090 printf("*** Running Invalid QID test...\n");
3091 ret = invalid_qid(t);
3093 printf("ERROR - Invalid QID test FAILED.\n");
3096 printf("*** Running Load Balancing History test...\n");
3097 ret = load_balancing_history(t);
3099 printf("ERROR - Load Balancing History test FAILED.\n");
3102 printf("*** Running Inflight Count test...\n");
3103 ret = inflight_counts(t);
3105 printf("ERROR - Inflight Count test FAILED.\n");
3108 printf("*** Running Abuse Inflights test...\n");
3109 ret = abuse_inflights(t);
3111 printf("ERROR - Abuse Inflights test FAILED.\n");
3114 printf("*** Running XStats test...\n");
3115 ret = xstats_tests(t);
3117 printf("ERROR - XStats test FAILED.\n");
3120 printf("*** Running XStats ID Reset test...\n");
3121 ret = xstats_id_reset_tests(t);
3123 printf("ERROR - XStats ID Reset test FAILED.\n");
3126 printf("*** Running XStats Brute Force test...\n");
3127 ret = xstats_brute_force(t);
3129 printf("ERROR - XStats Brute Force test FAILED.\n");
3132 printf("*** Running XStats ID Abuse test...\n");
3133 ret = xstats_id_abuse_tests(t);
3135 printf("ERROR - XStats ID Abuse test FAILED.\n");
3138 printf("*** Running QID Priority test...\n");
3139 ret = qid_priorities(t);
3141 printf("ERROR - QID Priority test FAILED.\n");
3144 printf("*** Running Ordered Reconfigure test...\n");
3145 ret = ordered_reconfigure(t);
3147 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3150 printf("*** Running Port LB Single Reconfig test...\n");
3151 ret = port_single_lb_reconfig(t);
3153 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3156 printf("*** Running Port Reconfig Credits test...\n");
3157 ret = port_reconfig_credits(t);
3159 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3162 printf("*** Running Head-of-line-blocking test...\n");
3165 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3168 if (rte_lcore_count() >= 3) {
3169 printf("*** Running Worker loopback test...\n");
3170 ret = worker_loopback(t);
3172 printf("ERROR - Worker loopback test FAILED.\n");
3176 printf("### Not enough cores for worker loopback test.\n");
3177 printf("### Need at least 3 cores for test.\n");
3180 * Free test instance, leaving mempool initialized, and a pointer to it
3181 * in static eventdev_func_mempool, as it is re-used on re-runs
3188 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);