4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_launch.h>
44 #include <rte_per_lcore.h>
45 #include <rte_lcore.h>
46 #include <rte_debug.h>
47 #include <rte_ethdev.h>
48 #include <rte_cycles.h>
49 #include <rte_eventdev.h>
50 #include <rte_pause.h>
51 #include <rte_service.h>
52 #include <rte_service_component.h>
53 #include <rte_bus_vdev.h>
59 #define NUM_PACKETS (1<<18)
64 struct rte_mempool *mbuf_pool;
65 uint8_t port[MAX_PORTS];
66 uint8_t qid[MAX_QIDS];
71 static struct rte_event release_ev;
73 static inline struct rte_mbuf *
74 rte_gen_arp(int portid, struct rte_mempool *mp)
78 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
80 static const uint8_t arp_request[] = {
81 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
82 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
83 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
84 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
85 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
86 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
88 0x00, 0x00, 0x00, 0x00
91 int pkt_len = sizeof(arp_request) - 1;
93 m = rte_pktmbuf_alloc(mp);
97 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
98 arp_request, pkt_len);
99 rte_pktmbuf_pkt_len(m) = pkt_len;
100 rte_pktmbuf_data_len(m) = pkt_len;
102 RTE_SET_USED(portid);
110 const uint32_t XSTATS_MAX = 1024;
112 uint32_t ids[XSTATS_MAX];
113 uint64_t values[XSTATS_MAX];
114 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
116 for (i = 0; i < XSTATS_MAX; i++)
119 /* Device names / values */
120 int ret = rte_event_dev_xstats_names_get(evdev,
121 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
122 xstats_names, ids, XSTATS_MAX);
124 printf("%d: xstats names get() returned error\n",
128 ret = rte_event_dev_xstats_get(evdev,
129 RTE_EVENT_DEV_XSTATS_DEVICE,
130 0, ids, values, ret);
131 if (ret > (signed int)XSTATS_MAX)
132 printf("%s %d: more xstats available than space\n",
134 for (i = 0; (signed int)i < ret; i++) {
135 printf("%d : %s : %"PRIu64"\n",
136 i, xstats_names[i].name, values[i]);
139 /* Port names / values */
140 ret = rte_event_dev_xstats_names_get(evdev,
141 RTE_EVENT_DEV_XSTATS_PORT, 0,
142 xstats_names, ids, XSTATS_MAX);
143 ret = rte_event_dev_xstats_get(evdev,
144 RTE_EVENT_DEV_XSTATS_PORT, 1,
146 if (ret > (signed int)XSTATS_MAX)
147 printf("%s %d: more xstats available than space\n",
149 for (i = 0; (signed int)i < ret; i++) {
150 printf("%d : %s : %"PRIu64"\n",
151 i, xstats_names[i].name, values[i]);
154 /* Queue names / values */
155 ret = rte_event_dev_xstats_names_get(evdev,
156 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
157 xstats_names, ids, XSTATS_MAX);
158 ret = rte_event_dev_xstats_get(evdev,
159 RTE_EVENT_DEV_XSTATS_QUEUE,
160 1, ids, values, ret);
161 if (ret > (signed int)XSTATS_MAX)
162 printf("%s %d: more xstats available than space\n",
164 for (i = 0; (signed int)i < ret; i++) {
165 printf("%d : %s : %"PRIu64"\n",
166 i, xstats_names[i].name, values[i]);
170 /* initialization and config */
172 init(struct test *t, int nb_queues, int nb_ports)
174 struct rte_event_dev_config config = {
175 .nb_event_queues = nb_queues,
176 .nb_event_ports = nb_ports,
177 .nb_event_queue_flows = 1024,
178 .nb_events_limit = 4096,
179 .nb_event_port_dequeue_depth = 128,
180 .nb_event_port_enqueue_depth = 128,
184 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
186 memset(t, 0, sizeof(*t));
189 ret = rte_event_dev_configure(evdev, &config);
191 printf("%d: Error configuring device\n", __LINE__);
196 create_ports(struct test *t, int num_ports)
199 static const struct rte_event_port_conf conf = {
200 .new_event_threshold = 1024,
204 if (num_ports > MAX_PORTS)
207 for (i = 0; i < num_ports; i++) {
208 if (rte_event_port_setup(evdev, i, &conf) < 0) {
209 printf("Error setting up port %d\n", i);
219 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
224 const struct rte_event_queue_conf conf = {
225 .schedule_type = flags,
226 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
227 .nb_atomic_flows = 1024,
228 .nb_atomic_order_sequences = 1024,
231 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
232 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
233 printf("%d: error creating qid %d\n", __LINE__, i);
238 t->nb_qids += num_qids;
239 if (t->nb_qids > MAX_QIDS)
246 create_atomic_qids(struct test *t, int num_qids)
248 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
252 create_ordered_qids(struct test *t, int num_qids)
254 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
259 create_unordered_qids(struct test *t, int num_qids)
261 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
265 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
270 static const struct rte_event_queue_conf conf = {
271 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
272 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
275 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
276 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
277 printf("%d: error creating qid %d\n", __LINE__, i);
282 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
283 &t->qid[i], NULL, 1) != 1) {
284 printf("%d: error creating link for qid %d\n",
289 t->nb_qids += num_qids;
290 if (t->nb_qids > MAX_QIDS)
298 cleanup(struct test *t __rte_unused)
300 rte_event_dev_stop(evdev);
301 rte_event_dev_close(evdev);
305 struct test_event_dev_stats {
306 uint64_t rx_pkts; /**< Total packets received */
307 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
308 uint64_t tx_pkts; /**< Total packets transmitted */
310 /** Packets received on this port */
311 uint64_t port_rx_pkts[MAX_PORTS];
312 /** Packets dropped on this port */
313 uint64_t port_rx_dropped[MAX_PORTS];
314 /** Packets inflight on this port */
315 uint64_t port_inflight[MAX_PORTS];
316 /** Packets transmitted on this port */
317 uint64_t port_tx_pkts[MAX_PORTS];
318 /** Packets received on this qid */
319 uint64_t qid_rx_pkts[MAX_QIDS];
320 /** Packets dropped on this qid */
321 uint64_t qid_rx_dropped[MAX_QIDS];
322 /** Packets transmitted on this qid */
323 uint64_t qid_tx_pkts[MAX_QIDS];
327 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
330 static uint32_t total_ids[3]; /* rx, tx and drop */
331 static uint32_t port_rx_pkts_ids[MAX_PORTS];
332 static uint32_t port_rx_dropped_ids[MAX_PORTS];
333 static uint32_t port_inflight_ids[MAX_PORTS];
334 static uint32_t port_tx_pkts_ids[MAX_PORTS];
335 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
336 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
337 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
340 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
341 "dev_rx", &total_ids[0]);
342 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
343 "dev_drop", &total_ids[1]);
344 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
345 "dev_tx", &total_ids[2]);
346 for (i = 0; i < MAX_PORTS; i++) {
348 snprintf(name, sizeof(name), "port_%u_rx", i);
349 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
350 dev_id, name, &port_rx_pkts_ids[i]);
351 snprintf(name, sizeof(name), "port_%u_drop", i);
352 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
353 dev_id, name, &port_rx_dropped_ids[i]);
354 snprintf(name, sizeof(name), "port_%u_inflight", i);
355 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
356 dev_id, name, &port_inflight_ids[i]);
357 snprintf(name, sizeof(name), "port_%u_tx", i);
358 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
359 dev_id, name, &port_tx_pkts_ids[i]);
361 for (i = 0; i < MAX_QIDS; i++) {
363 snprintf(name, sizeof(name), "qid_%u_rx", i);
364 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
365 dev_id, name, &qid_rx_pkts_ids[i]);
366 snprintf(name, sizeof(name), "qid_%u_drop", i);
367 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
368 dev_id, name, &qid_rx_dropped_ids[i]);
369 snprintf(name, sizeof(name), "qid_%u_tx", i);
370 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
371 dev_id, name, &qid_tx_pkts_ids[i]);
377 /* run_prio_packet_test
378 * This performs a basic packet priority check on the test instance passed in.
379 * It is factored out of the main priority tests as the same tests must be
380 * performed to ensure prioritization of each type of QID.
383 * - An initialized test structure, including mempool
384 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
385 * - t->qid[0] is the QID to be tested
386 * - if LB QID, the CQ must be mapped to the QID.
389 run_prio_packet_test(struct test *t)
392 const uint32_t MAGIC_SEQN[] = {4711, 1234};
393 const uint32_t PRIORITY[] = {
394 RTE_EVENT_DEV_PRIORITY_NORMAL,
395 RTE_EVENT_DEV_PRIORITY_HIGHEST
398 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
399 /* generate pkt and enqueue */
401 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
403 printf("%d: gen of pkt failed\n", __LINE__);
406 arp->seqn = MAGIC_SEQN[i];
408 ev = (struct rte_event){
409 .priority = PRIORITY[i],
410 .op = RTE_EVENT_OP_NEW,
411 .queue_id = t->qid[0],
414 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
416 printf("%d: error failed to enqueue\n", __LINE__);
421 rte_service_run_iter_on_app_lcore(t->service_id, 1);
423 struct test_event_dev_stats stats;
424 err = test_event_dev_stats_get(evdev, &stats);
426 printf("%d: error failed to get stats\n", __LINE__);
430 if (stats.port_rx_pkts[t->port[0]] != 2) {
431 printf("%d: error stats incorrect for directed port\n",
433 rte_event_dev_dump(evdev, stdout);
437 struct rte_event ev, ev2;
439 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
441 printf("%d: error failed to deq\n", __LINE__);
442 rte_event_dev_dump(evdev, stdout);
445 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
446 printf("%d: first packet out not highest priority\n",
448 rte_event_dev_dump(evdev, stdout);
451 rte_pktmbuf_free(ev.mbuf);
453 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
455 printf("%d: error failed to deq\n", __LINE__);
456 rte_event_dev_dump(evdev, stdout);
459 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
460 printf("%d: second packet out not lower priority\n",
462 rte_event_dev_dump(evdev, stdout);
465 rte_pktmbuf_free(ev2.mbuf);
472 test_single_directed_packet(struct test *t)
474 const int rx_enq = 0;
475 const int wrk_enq = 2;
478 /* Create instance with 3 directed QIDs going to 3 ports */
479 if (init(t, 3, 3) < 0 ||
480 create_ports(t, 3) < 0 ||
481 create_directed_qids(t, 3, t->port) < 0)
484 if (rte_event_dev_start(evdev) < 0) {
485 printf("%d: Error with start call\n", __LINE__);
489 /************** FORWARD ****************/
490 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
491 struct rte_event ev = {
492 .op = RTE_EVENT_OP_NEW,
498 printf("%d: gen of pkt failed\n", __LINE__);
502 const uint32_t MAGIC_SEQN = 4711;
503 arp->seqn = MAGIC_SEQN;
505 /* generate pkt and enqueue */
506 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
508 printf("%d: error failed to enqueue\n", __LINE__);
512 /* Run schedule() as dir packets may need to be re-ordered */
513 rte_service_run_iter_on_app_lcore(t->service_id, 1);
515 struct test_event_dev_stats stats;
516 err = test_event_dev_stats_get(evdev, &stats);
518 printf("%d: error failed to get stats\n", __LINE__);
522 if (stats.port_rx_pkts[rx_enq] != 1) {
523 printf("%d: error stats incorrect for directed port\n",
529 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
531 printf("%d: error failed to deq\n", __LINE__);
535 err = test_event_dev_stats_get(evdev, &stats);
536 if (stats.port_rx_pkts[wrk_enq] != 0 &&
537 stats.port_rx_pkts[wrk_enq] != 1) {
538 printf("%d: error directed stats post-dequeue\n", __LINE__);
542 if (ev.mbuf->seqn != MAGIC_SEQN) {
543 printf("%d: error magic sequence number not dequeued\n",
548 rte_pktmbuf_free(ev.mbuf);
554 test_directed_forward_credits(struct test *t)
559 if (init(t, 1, 1) < 0 ||
560 create_ports(t, 1) < 0 ||
561 create_directed_qids(t, 1, t->port) < 0)
564 if (rte_event_dev_start(evdev) < 0) {
565 printf("%d: Error with start call\n", __LINE__);
569 struct rte_event ev = {
570 .op = RTE_EVENT_OP_NEW,
574 for (i = 0; i < 1000; i++) {
575 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
577 printf("%d: error failed to enqueue\n", __LINE__);
580 rte_service_run_iter_on_app_lcore(t->service_id, 1);
583 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
585 printf("%d: error failed to deq\n", __LINE__);
589 /* re-write event to be a forward, and continue looping it */
590 ev.op = RTE_EVENT_OP_FORWARD;
599 test_priority_directed(struct test *t)
601 if (init(t, 1, 1) < 0 ||
602 create_ports(t, 1) < 0 ||
603 create_directed_qids(t, 1, t->port) < 0) {
604 printf("%d: Error initializing device\n", __LINE__);
608 if (rte_event_dev_start(evdev) < 0) {
609 printf("%d: Error with start call\n", __LINE__);
613 return run_prio_packet_test(t);
617 test_priority_atomic(struct test *t)
619 if (init(t, 1, 1) < 0 ||
620 create_ports(t, 1) < 0 ||
621 create_atomic_qids(t, 1) < 0) {
622 printf("%d: Error initializing device\n", __LINE__);
627 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
628 printf("%d: error mapping qid to port\n", __LINE__);
631 if (rte_event_dev_start(evdev) < 0) {
632 printf("%d: Error with start call\n", __LINE__);
636 return run_prio_packet_test(t);
640 test_priority_ordered(struct test *t)
642 if (init(t, 1, 1) < 0 ||
643 create_ports(t, 1) < 0 ||
644 create_ordered_qids(t, 1) < 0) {
645 printf("%d: Error initializing device\n", __LINE__);
650 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
651 printf("%d: error mapping qid to port\n", __LINE__);
654 if (rte_event_dev_start(evdev) < 0) {
655 printf("%d: Error with start call\n", __LINE__);
659 return run_prio_packet_test(t);
663 test_priority_unordered(struct test *t)
665 if (init(t, 1, 1) < 0 ||
666 create_ports(t, 1) < 0 ||
667 create_unordered_qids(t, 1) < 0) {
668 printf("%d: Error initializing device\n", __LINE__);
673 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
674 printf("%d: error mapping qid to port\n", __LINE__);
677 if (rte_event_dev_start(evdev) < 0) {
678 printf("%d: Error with start call\n", __LINE__);
682 return run_prio_packet_test(t);
686 burst_packets(struct test *t)
688 /************** CONFIG ****************/
693 /* Create instance with 2 ports and 2 queues */
694 if (init(t, 2, 2) < 0 ||
695 create_ports(t, 2) < 0 ||
696 create_atomic_qids(t, 2) < 0) {
697 printf("%d: Error initializing device\n", __LINE__);
701 /* CQ mapping to QID */
702 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
704 printf("%d: error mapping lb qid0\n", __LINE__);
707 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
709 printf("%d: error mapping lb qid1\n", __LINE__);
713 if (rte_event_dev_start(evdev) < 0) {
714 printf("%d: Error with start call\n", __LINE__);
718 /************** FORWARD ****************/
719 const uint32_t rx_port = 0;
720 const uint32_t NUM_PKTS = 2;
722 for (i = 0; i < NUM_PKTS; i++) {
723 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
725 printf("%d: error generating pkt\n", __LINE__);
729 struct rte_event ev = {
730 .op = RTE_EVENT_OP_NEW,
735 /* generate pkt and enqueue */
736 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
738 printf("%d: Failed to enqueue\n", __LINE__);
742 rte_service_run_iter_on_app_lcore(t->service_id, 1);
744 /* Check stats for all NUM_PKTS arrived to sched core */
745 struct test_event_dev_stats stats;
747 err = test_event_dev_stats_get(evdev, &stats);
749 printf("%d: failed to get stats\n", __LINE__);
752 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
753 printf("%d: Sched core didn't receive all %d pkts\n",
755 rte_event_dev_dump(evdev, stdout);
763 /******** DEQ QID 1 *******/
766 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
768 rte_pktmbuf_free(ev.mbuf);
771 if (deq_pkts != NUM_PKTS/2) {
772 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
777 /******** DEQ QID 2 *******/
781 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
783 rte_pktmbuf_free(ev.mbuf);
785 if (deq_pkts != NUM_PKTS/2) {
786 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
796 abuse_inflights(struct test *t)
798 const int rx_enq = 0;
799 const int wrk_enq = 2;
802 /* Create instance with 4 ports */
803 if (init(t, 1, 4) < 0 ||
804 create_ports(t, 4) < 0 ||
805 create_atomic_qids(t, 1) < 0) {
806 printf("%d: Error initializing device\n", __LINE__);
810 /* CQ mapping to QID */
811 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
813 printf("%d: error mapping lb qid\n", __LINE__);
818 if (rte_event_dev_start(evdev) < 0) {
819 printf("%d: Error with start call\n", __LINE__);
823 /* Enqueue op only */
824 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
826 printf("%d: Failed to enqueue\n", __LINE__);
831 rte_service_run_iter_on_app_lcore(t->service_id, 1);
833 struct test_event_dev_stats stats;
835 err = test_event_dev_stats_get(evdev, &stats);
837 printf("%d: failed to get stats\n", __LINE__);
841 if (stats.rx_pkts != 0 ||
842 stats.tx_pkts != 0 ||
843 stats.port_inflight[wrk_enq] != 0) {
844 printf("%d: Sched core didn't handle pkt as expected\n",
854 xstats_tests(struct test *t)
856 const int wrk_enq = 2;
859 /* Create instance with 4 ports */
860 if (init(t, 1, 4) < 0 ||
861 create_ports(t, 4) < 0 ||
862 create_atomic_qids(t, 1) < 0) {
863 printf("%d: Error initializing device\n", __LINE__);
867 /* CQ mapping to QID */
868 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
870 printf("%d: error mapping lb qid\n", __LINE__);
875 if (rte_event_dev_start(evdev) < 0) {
876 printf("%d: Error with start call\n", __LINE__);
880 const uint32_t XSTATS_MAX = 1024;
883 uint32_t ids[XSTATS_MAX];
884 uint64_t values[XSTATS_MAX];
885 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
887 for (i = 0; i < XSTATS_MAX; i++)
890 /* Device names / values */
891 int ret = rte_event_dev_xstats_names_get(evdev,
892 RTE_EVENT_DEV_XSTATS_DEVICE,
893 0, xstats_names, ids, XSTATS_MAX);
895 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
898 ret = rte_event_dev_xstats_get(evdev,
899 RTE_EVENT_DEV_XSTATS_DEVICE,
900 0, ids, values, ret);
902 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
906 /* Port names / values */
907 ret = rte_event_dev_xstats_names_get(evdev,
908 RTE_EVENT_DEV_XSTATS_PORT, 0,
909 xstats_names, ids, XSTATS_MAX);
911 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
914 ret = rte_event_dev_xstats_get(evdev,
915 RTE_EVENT_DEV_XSTATS_PORT, 0,
918 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
922 /* Queue names / values */
923 ret = rte_event_dev_xstats_names_get(evdev,
924 RTE_EVENT_DEV_XSTATS_QUEUE,
925 0, xstats_names, ids, XSTATS_MAX);
927 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
931 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
932 ret = rte_event_dev_xstats_get(evdev,
933 RTE_EVENT_DEV_XSTATS_QUEUE,
934 1, ids, values, ret);
935 if (ret != -EINVAL) {
936 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
940 ret = rte_event_dev_xstats_get(evdev,
941 RTE_EVENT_DEV_XSTATS_QUEUE,
942 0, ids, values, ret);
944 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
948 /* enqueue packets to check values */
949 for (i = 0; i < 3; i++) {
951 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
953 printf("%d: gen of pkt failed\n", __LINE__);
956 ev.queue_id = t->qid[i];
957 ev.op = RTE_EVENT_OP_NEW;
962 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
964 printf("%d: Failed to enqueue\n", __LINE__);
969 rte_service_run_iter_on_app_lcore(t->service_id, 1);
971 /* Device names / values */
972 int num_stats = rte_event_dev_xstats_names_get(evdev,
973 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
974 xstats_names, ids, XSTATS_MAX);
977 ret = rte_event_dev_xstats_get(evdev,
978 RTE_EVENT_DEV_XSTATS_DEVICE,
979 0, ids, values, num_stats);
980 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
981 for (i = 0; (signed int)i < ret; i++) {
982 if (expected[i] != values[i]) {
984 "%d Error xstat %d (id %d) %s : %"PRIu64
985 ", expect %"PRIu64"\n",
986 __LINE__, i, ids[i], xstats_names[i].name,
987 values[i], expected[i]);
992 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
995 /* ensure reset statistics are zero-ed */
996 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
997 ret = rte_event_dev_xstats_get(evdev,
998 RTE_EVENT_DEV_XSTATS_DEVICE,
999 0, ids, values, num_stats);
1000 for (i = 0; (signed int)i < ret; i++) {
1001 if (expected_zero[i] != values[i]) {
1003 "%d Error, xstat %d (id %d) %s : %"PRIu64
1004 ", expect %"PRIu64"\n",
1005 __LINE__, i, ids[i], xstats_names[i].name,
1006 values[i], expected_zero[i]);
1011 /* port reset checks */
1012 num_stats = rte_event_dev_xstats_names_get(evdev,
1013 RTE_EVENT_DEV_XSTATS_PORT, 0,
1014 xstats_names, ids, XSTATS_MAX);
1017 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1018 0, ids, values, num_stats);
1020 static const uint64_t port_expected[] = {
1025 0 /* avg pkt cycles */,
1027 0 /* rx ring used */,
1028 4096 /* rx ring free */,
1029 0 /* cq ring used */,
1030 32 /* cq ring free */,
1031 0 /* dequeue calls */,
1032 /* 10 dequeue burst buckets */
1036 if (ret != RTE_DIM(port_expected)) {
1038 "%s %d: wrong number of port stats (%d), expected %zu\n",
1039 __func__, __LINE__, ret, RTE_DIM(port_expected));
1042 for (i = 0; (signed int)i < ret; i++) {
1043 if (port_expected[i] != values[i]) {
1045 "%s : %d: Error stat %s is %"PRIu64
1046 ", expected %"PRIu64"\n",
1047 __func__, __LINE__, xstats_names[i].name,
1048 values[i], port_expected[i]);
1053 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1056 /* ensure reset statistics are zero-ed */
1057 static const uint64_t port_expected_zero[] = {
1062 0 /* avg pkt cycles */,
1064 0 /* rx ring used */,
1065 4096 /* rx ring free */,
1066 0 /* cq ring used */,
1067 32 /* cq ring free */,
1068 0 /* dequeue calls */,
1069 /* 10 dequeue burst buckets */
1073 ret = rte_event_dev_xstats_get(evdev,
1074 RTE_EVENT_DEV_XSTATS_PORT,
1075 0, ids, values, num_stats);
1076 for (i = 0; (signed int)i < ret; i++) {
1077 if (port_expected_zero[i] != values[i]) {
1079 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1080 ", expect %"PRIu64"\n",
1081 __LINE__, i, ids[i], xstats_names[i].name,
1082 values[i], port_expected_zero[i]);
1087 /* QUEUE STATS TESTS */
1088 num_stats = rte_event_dev_xstats_names_get(evdev,
1089 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1090 xstats_names, ids, XSTATS_MAX);
1091 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1092 0, ids, values, num_stats);
1094 printf("xstats get returned %d\n", ret);
1097 if ((unsigned int)ret > XSTATS_MAX)
1098 printf("%s %d: more xstats available than space\n",
1099 __func__, __LINE__);
1101 static const uint64_t queue_expected[] = {
1107 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1108 /* QID-to-Port: pinned_flows, packets */
1114 for (i = 0; (signed int)i < ret; i++) {
1115 if (queue_expected[i] != values[i]) {
1117 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1118 ", expect %"PRIu64"\n",
1119 __LINE__, i, ids[i], xstats_names[i].name,
1120 values[i], queue_expected[i]);
1125 /* Reset the queue stats here */
1126 ret = rte_event_dev_xstats_reset(evdev,
1127 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1131 /* Verify that the resetable stats are reset, and others are not */
1132 static const uint64_t queue_expected_zero[] = {
1138 0, 0, 0, 0, /* 4 iq used */
1139 /* QID-to-Port: pinned_flows, packets */
1146 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1147 ids, values, num_stats);
1149 for (i = 0; (signed int)i < ret; i++) {
1150 if (queue_expected_zero[i] != values[i]) {
1152 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1153 ", expect %"PRIu64"\n",
1154 __LINE__, i, ids[i], xstats_names[i].name,
1155 values[i], queue_expected_zero[i]);
1160 printf("%d : %d of values were not as expected above\n",
1169 rte_event_dev_dump(0, stdout);
1176 xstats_id_abuse_tests(struct test *t)
1179 const uint32_t XSTATS_MAX = 1024;
1180 const uint32_t link_port = 2;
1182 uint32_t ids[XSTATS_MAX];
1183 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1185 /* Create instance with 4 ports */
1186 if (init(t, 1, 4) < 0 ||
1187 create_ports(t, 4) < 0 ||
1188 create_atomic_qids(t, 1) < 0) {
1189 printf("%d: Error initializing device\n", __LINE__);
1193 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1195 printf("%d: error mapping lb qid\n", __LINE__);
1199 if (rte_event_dev_start(evdev) < 0) {
1200 printf("%d: Error with start call\n", __LINE__);
1204 /* no test for device, as it ignores the port/q number */
1205 int num_stats = rte_event_dev_xstats_names_get(evdev,
1206 RTE_EVENT_DEV_XSTATS_PORT,
1207 UINT8_MAX-1, xstats_names, ids,
1209 if (num_stats != 0) {
1210 printf("%d: expected %d stats, got return %d\n", __LINE__,
1215 num_stats = rte_event_dev_xstats_names_get(evdev,
1216 RTE_EVENT_DEV_XSTATS_QUEUE,
1217 UINT8_MAX-1, xstats_names, ids,
1219 if (num_stats != 0) {
1220 printf("%d: expected %d stats, got return %d\n", __LINE__,
1233 port_reconfig_credits(struct test *t)
1235 if (init(t, 1, 1) < 0) {
1236 printf("%d: Error initializing device\n", __LINE__);
1241 const uint32_t NUM_ITERS = 32;
1242 for (i = 0; i < NUM_ITERS; i++) {
1243 const struct rte_event_queue_conf conf = {
1244 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1245 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1246 .nb_atomic_flows = 1024,
1247 .nb_atomic_order_sequences = 1024,
1249 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1250 printf("%d: error creating qid\n", __LINE__);
1255 static const struct rte_event_port_conf port_conf = {
1256 .new_event_threshold = 128,
1257 .dequeue_depth = 32,
1258 .enqueue_depth = 64,
1260 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1261 printf("%d Error setting up port\n", __LINE__);
1265 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1267 printf("%d: error mapping lb qid\n", __LINE__);
1271 if (rte_event_dev_start(evdev) < 0) {
1272 printf("%d: Error with start call\n", __LINE__);
1276 const uint32_t NPKTS = 1;
1278 for (j = 0; j < NPKTS; j++) {
1279 struct rte_event ev;
1280 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1282 printf("%d: gen of pkt failed\n", __LINE__);
1285 ev.queue_id = t->qid[0];
1286 ev.op = RTE_EVENT_OP_NEW;
1288 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1290 printf("%d: Failed to enqueue\n", __LINE__);
1291 rte_event_dev_dump(0, stdout);
1296 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1298 struct rte_event ev[NPKTS];
1299 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1302 printf("%d error; no packet dequeued\n", __LINE__);
1304 /* let cleanup below stop the device on last iter */
1305 if (i != NUM_ITERS-1)
1306 rte_event_dev_stop(evdev);
1317 port_single_lb_reconfig(struct test *t)
1319 if (init(t, 2, 2) < 0) {
1320 printf("%d: Error initializing device\n", __LINE__);
1324 static const struct rte_event_queue_conf conf_lb_atomic = {
1325 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1326 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1327 .nb_atomic_flows = 1024,
1328 .nb_atomic_order_sequences = 1024,
1330 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1331 printf("%d: error creating qid\n", __LINE__);
1335 static const struct rte_event_queue_conf conf_single_link = {
1336 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1337 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1339 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1340 printf("%d: error creating qid\n", __LINE__);
1344 struct rte_event_port_conf port_conf = {
1345 .new_event_threshold = 128,
1346 .dequeue_depth = 32,
1347 .enqueue_depth = 64,
1349 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1350 printf("%d Error setting up port\n", __LINE__);
1353 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1354 printf("%d Error setting up port\n", __LINE__);
1358 /* link port to lb queue */
1359 uint8_t queue_id = 0;
1360 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1361 printf("%d: error creating link for qid\n", __LINE__);
1365 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1367 printf("%d: Error unlinking lb port\n", __LINE__);
1372 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1373 printf("%d: error creating link for qid\n", __LINE__);
1378 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1380 printf("%d: error mapping lb qid\n", __LINE__);
1384 if (rte_event_dev_start(evdev) < 0) {
1385 printf("%d: Error with start call\n", __LINE__);
1397 xstats_brute_force(struct test *t)
1400 const uint32_t XSTATS_MAX = 1024;
1401 uint32_t ids[XSTATS_MAX];
1402 uint64_t values[XSTATS_MAX];
1403 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1406 /* Create instance with 4 ports */
1407 if (init(t, 1, 4) < 0 ||
1408 create_ports(t, 4) < 0 ||
1409 create_atomic_qids(t, 1) < 0) {
1410 printf("%d: Error initializing device\n", __LINE__);
1414 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1416 printf("%d: error mapping lb qid\n", __LINE__);
1420 if (rte_event_dev_start(evdev) < 0) {
1421 printf("%d: Error with start call\n", __LINE__);
1425 for (i = 0; i < XSTATS_MAX; i++)
1428 for (i = 0; i < 3; i++) {
1429 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1431 for (j = 0; j < UINT8_MAX; j++) {
1432 rte_event_dev_xstats_names_get(evdev, mode,
1433 j, xstats_names, ids, XSTATS_MAX);
1435 rte_event_dev_xstats_get(evdev, mode, j, ids,
1436 values, XSTATS_MAX);
1448 xstats_id_reset_tests(struct test *t)
1450 const int wrk_enq = 2;
1453 /* Create instance with 4 ports */
1454 if (init(t, 1, 4) < 0 ||
1455 create_ports(t, 4) < 0 ||
1456 create_atomic_qids(t, 1) < 0) {
1457 printf("%d: Error initializing device\n", __LINE__);
1461 /* CQ mapping to QID */
1462 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1464 printf("%d: error mapping lb qid\n", __LINE__);
1468 if (rte_event_dev_start(evdev) < 0) {
1469 printf("%d: Error with start call\n", __LINE__);
1473 #define XSTATS_MAX 1024
1476 uint32_t ids[XSTATS_MAX];
1477 uint64_t values[XSTATS_MAX];
1478 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1480 for (i = 0; i < XSTATS_MAX; i++)
1483 #define NUM_DEV_STATS 6
1484 /* Device names / values */
1485 int num_stats = rte_event_dev_xstats_names_get(evdev,
1486 RTE_EVENT_DEV_XSTATS_DEVICE,
1487 0, xstats_names, ids, XSTATS_MAX);
1488 if (num_stats != NUM_DEV_STATS) {
1489 printf("%d: expected %d stats, got return %d\n", __LINE__,
1490 NUM_DEV_STATS, num_stats);
1493 ret = rte_event_dev_xstats_get(evdev,
1494 RTE_EVENT_DEV_XSTATS_DEVICE,
1495 0, ids, values, num_stats);
1496 if (ret != NUM_DEV_STATS) {
1497 printf("%d: expected %d stats, got return %d\n", __LINE__,
1498 NUM_DEV_STATS, ret);
1503 for (i = 0; i < NPKTS; i++) {
1504 struct rte_event ev;
1505 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1507 printf("%d: gen of pkt failed\n", __LINE__);
1510 ev.queue_id = t->qid[i];
1511 ev.op = RTE_EVENT_OP_NEW;
1515 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1517 printf("%d: Failed to enqueue\n", __LINE__);
1522 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1524 static const char * const dev_names[] = {
1525 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1526 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1528 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1529 for (i = 0; (int)i < ret; i++) {
1531 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1535 printf("%d: %s id incorrect, expected %d got %d\n",
1536 __LINE__, dev_names[i], i, id);
1539 if (val != dev_expected[i]) {
1540 printf("%d: %s value incorrect, expected %"
1541 PRIu64" got %d\n", __LINE__, dev_names[i],
1542 dev_expected[i], id);
1546 int reset_ret = rte_event_dev_xstats_reset(evdev,
1547 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1551 printf("%d: failed to reset successfully\n", __LINE__);
1554 dev_expected[i] = 0;
1555 /* check value again */
1556 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1557 if (val != dev_expected[i]) {
1558 printf("%d: %s value incorrect, expected %"PRIu64
1559 " got %"PRIu64"\n", __LINE__, dev_names[i],
1560 dev_expected[i], val);
1565 /* 48 is stat offset from start of the devices whole xstats.
1566 * This WILL break every time we add a statistic to a port
1567 * or the device, but there is no other way to test
1570 /* num stats for the tested port. CQ size adds more stats to a port */
1571 #define NUM_PORT_STATS 21
1572 /* the port to test. */
1574 num_stats = rte_event_dev_xstats_names_get(evdev,
1575 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1576 xstats_names, ids, XSTATS_MAX);
1577 if (num_stats != NUM_PORT_STATS) {
1578 printf("%d: expected %d stats, got return %d\n",
1579 __LINE__, NUM_PORT_STATS, num_stats);
1582 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1583 ids, values, num_stats);
1585 if (ret != NUM_PORT_STATS) {
1586 printf("%d: expected %d stats, got return %d\n",
1587 __LINE__, NUM_PORT_STATS, ret);
1590 static const char * const port_names[] = {
1595 "port_2_avg_pkt_cycles",
1597 "port_2_rx_ring_used",
1598 "port_2_rx_ring_free",
1599 "port_2_cq_ring_used",
1600 "port_2_cq_ring_free",
1601 "port_2_dequeue_calls",
1602 "port_2_dequeues_returning_0",
1603 "port_2_dequeues_returning_1-4",
1604 "port_2_dequeues_returning_5-8",
1605 "port_2_dequeues_returning_9-12",
1606 "port_2_dequeues_returning_13-16",
1607 "port_2_dequeues_returning_17-20",
1608 "port_2_dequeues_returning_21-24",
1609 "port_2_dequeues_returning_25-28",
1610 "port_2_dequeues_returning_29-32",
1611 "port_2_dequeues_returning_33-36",
1613 uint64_t port_expected[] = {
1617 NPKTS, /* inflight */
1618 0, /* avg pkt cycles */
1620 0, /* rx ring used */
1621 4096, /* rx ring free */
1622 NPKTS, /* cq ring used */
1623 25, /* cq ring free */
1624 0, /* dequeue zero calls */
1625 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1628 uint64_t port_expected_zero[] = {
1632 NPKTS, /* inflight */
1633 0, /* avg pkt cycles */
1635 0, /* rx ring used */
1636 4096, /* rx ring free */
1637 NPKTS, /* cq ring used */
1638 25, /* cq ring free */
1639 0, /* dequeue zero calls */
1640 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1643 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1644 RTE_DIM(port_names) != NUM_PORT_STATS) {
1645 printf("%d: port array of wrong size\n", __LINE__);
1650 for (i = 0; (int)i < ret; i++) {
1652 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1655 if (id != i + PORT_OFF) {
1656 printf("%d: %s id incorrect, expected %d got %d\n",
1657 __LINE__, port_names[i], i+PORT_OFF,
1661 if (val != port_expected[i]) {
1662 printf("%d: %s value incorrect, expected %"PRIu64
1663 " got %d\n", __LINE__, port_names[i],
1664 port_expected[i], id);
1668 int reset_ret = rte_event_dev_xstats_reset(evdev,
1669 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1673 printf("%d: failed to reset successfully\n", __LINE__);
1676 /* check value again */
1677 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1678 if (val != port_expected_zero[i]) {
1679 printf("%d: %s value incorrect, expected %"PRIu64
1680 " got %"PRIu64"\n", __LINE__, port_names[i],
1681 port_expected_zero[i], val);
1688 /* num queue stats */
1689 #define NUM_Q_STATS 17
1690 /* queue offset from start of the devices whole xstats.
1691 * This will break every time we add a statistic to a device/port/queue
1693 #define QUEUE_OFF 90
1694 const uint32_t queue = 0;
1695 num_stats = rte_event_dev_xstats_names_get(evdev,
1696 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1697 xstats_names, ids, XSTATS_MAX);
1698 if (num_stats != NUM_Q_STATS) {
1699 printf("%d: expected %d stats, got return %d\n",
1700 __LINE__, NUM_Q_STATS, num_stats);
1703 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1704 queue, ids, values, num_stats);
1705 if (ret != NUM_Q_STATS) {
1706 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1709 static const char * const queue_names[] = {
1719 "qid_0_port_0_pinned_flows",
1720 "qid_0_port_0_packets",
1721 "qid_0_port_1_pinned_flows",
1722 "qid_0_port_1_packets",
1723 "qid_0_port_2_pinned_flows",
1724 "qid_0_port_2_packets",
1725 "qid_0_port_3_pinned_flows",
1726 "qid_0_port_3_packets",
1728 uint64_t queue_expected[] = {
1738 /* QID-to-Port: pinned_flows, packets */
1744 uint64_t queue_expected_zero[] = {
1754 /* QID-to-Port: pinned_flows, packets */
1760 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1761 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1762 RTE_DIM(queue_names) != NUM_Q_STATS) {
1763 printf("%d : queue array of wrong size\n", __LINE__);
1768 for (i = 0; (int)i < ret; i++) {
1770 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1773 if (id != i + QUEUE_OFF) {
1774 printf("%d: %s id incorrect, expected %d got %d\n",
1775 __LINE__, queue_names[i], i+QUEUE_OFF,
1779 if (val != queue_expected[i]) {
1780 printf("%d: %d: %s value , expected %"PRIu64
1781 " got %"PRIu64"\n", i, __LINE__,
1782 queue_names[i], queue_expected[i], val);
1786 int reset_ret = rte_event_dev_xstats_reset(evdev,
1787 RTE_EVENT_DEV_XSTATS_QUEUE,
1790 printf("%d: failed to reset successfully\n", __LINE__);
1793 /* check value again */
1794 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1796 if (val != queue_expected_zero[i]) {
1797 printf("%d: %s value incorrect, expected %"PRIu64
1798 " got %"PRIu64"\n", __LINE__, queue_names[i],
1799 queue_expected_zero[i], val);
1815 ordered_reconfigure(struct test *t)
1817 if (init(t, 1, 1) < 0 ||
1818 create_ports(t, 1) < 0) {
1819 printf("%d: Error initializing device\n", __LINE__);
1823 const struct rte_event_queue_conf conf = {
1824 .schedule_type = RTE_SCHED_TYPE_ORDERED,
1825 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1826 .nb_atomic_flows = 1024,
1827 .nb_atomic_order_sequences = 1024,
1830 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1831 printf("%d: error creating qid\n", __LINE__);
1835 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1836 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1840 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1841 if (rte_event_dev_start(evdev) < 0) {
1842 printf("%d: Error with start call\n", __LINE__);
1854 qid_priorities(struct test *t)
1856 /* Test works by having a CQ with enough empty space for all packets,
1857 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1858 * priority of the QID, not the ingress order, to pass the test
1861 /* Create instance with 1 ports, and 3 qids */
1862 if (init(t, 3, 1) < 0 ||
1863 create_ports(t, 1) < 0) {
1864 printf("%d: Error initializing device\n", __LINE__);
1868 for (i = 0; i < 3; i++) {
1870 const struct rte_event_queue_conf conf = {
1871 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1872 /* increase priority (0 == highest), as we go */
1873 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1874 .nb_atomic_flows = 1024,
1875 .nb_atomic_order_sequences = 1024,
1878 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1879 printf("%d: error creating qid %d\n", __LINE__, i);
1885 /* map all QIDs to port */
1886 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1888 if (rte_event_dev_start(evdev) < 0) {
1889 printf("%d: Error with start call\n", __LINE__);
1893 /* enqueue 3 packets, setting seqn and QID to check priority */
1894 for (i = 0; i < 3; i++) {
1895 struct rte_event ev;
1896 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1898 printf("%d: gen of pkt failed\n", __LINE__);
1901 ev.queue_id = t->qid[i];
1902 ev.op = RTE_EVENT_OP_NEW;
1906 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1908 printf("%d: Failed to enqueue\n", __LINE__);
1913 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1915 /* dequeue packets, verify priority was upheld */
1916 struct rte_event ev[32];
1918 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1919 if (deq_pkts != 3) {
1920 printf("%d: failed to deq packets\n", __LINE__);
1921 rte_event_dev_dump(evdev, stdout);
1924 for (i = 0; i < 3; i++) {
1925 if (ev[i].mbuf->seqn != 2-i) {
1927 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1937 load_balancing(struct test *t)
1939 const int rx_enq = 0;
1943 if (init(t, 1, 4) < 0 ||
1944 create_ports(t, 4) < 0 ||
1945 create_atomic_qids(t, 1) < 0) {
1946 printf("%d: Error initializing device\n", __LINE__);
1950 for (i = 0; i < 3; i++) {
1951 /* map port 1 - 3 inclusive */
1952 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1954 printf("%d: error mapping qid to port %d\n",
1960 if (rte_event_dev_start(evdev) < 0) {
1961 printf("%d: Error with start call\n", __LINE__);
1965 /************** FORWARD ****************/
1967 * Create a set of flows that test the load-balancing operation of the
1968 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1969 * with a new flow, which should be sent to the 3rd mapped CQ
1971 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1973 for (i = 0; i < RTE_DIM(flows); i++) {
1974 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1976 printf("%d: gen of pkt failed\n", __LINE__);
1980 struct rte_event ev = {
1981 .op = RTE_EVENT_OP_NEW,
1982 .queue_id = t->qid[0],
1983 .flow_id = flows[i],
1986 /* generate pkt and enqueue */
1987 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1989 printf("%d: Failed to enqueue\n", __LINE__);
1994 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1996 struct test_event_dev_stats stats;
1997 err = test_event_dev_stats_get(evdev, &stats);
1999 printf("%d: failed to get stats\n", __LINE__);
2003 if (stats.port_inflight[1] != 4) {
2004 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2008 if (stats.port_inflight[2] != 2) {
2009 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2013 if (stats.port_inflight[3] != 3) {
2014 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2024 load_balancing_history(struct test *t)
2026 struct test_event_dev_stats stats = {0};
2027 const int rx_enq = 0;
2031 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2032 if (init(t, 1, 4) < 0 ||
2033 create_ports(t, 4) < 0 ||
2034 create_atomic_qids(t, 1) < 0)
2037 /* CQ mapping to QID */
2038 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2039 printf("%d: error mapping port 1 qid\n", __LINE__);
2042 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2043 printf("%d: error mapping port 2 qid\n", __LINE__);
2046 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2047 printf("%d: error mapping port 3 qid\n", __LINE__);
2050 if (rte_event_dev_start(evdev) < 0) {
2051 printf("%d: Error with start call\n", __LINE__);
2056 * Create a set of flows that test the load-balancing operation of the
2057 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2058 * the packet from CQ 0, send in a new set of flows. Ensure that:
2059 * 1. The new flow 3 gets into the empty CQ0
2060 * 2. packets for existing flow gets added into CQ1
2061 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2062 * more outstanding pkts
2064 * This test makes sure that when a flow ends (i.e. all packets
2065 * have been completed for that flow), that the flow can be moved
2066 * to a different CQ when new packets come in for that flow.
2068 static uint32_t flows1[] = {0, 1, 1, 2};
2070 for (i = 0; i < RTE_DIM(flows1); i++) {
2071 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2072 struct rte_event ev = {
2073 .flow_id = flows1[i],
2074 .op = RTE_EVENT_OP_NEW,
2075 .queue_id = t->qid[0],
2076 .event_type = RTE_EVENT_TYPE_CPU,
2077 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2082 printf("%d: gen of pkt failed\n", __LINE__);
2085 arp->hash.rss = flows1[i];
2086 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2088 printf("%d: Failed to enqueue\n", __LINE__);
2093 /* call the scheduler */
2094 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2096 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2097 struct rte_event ev;
2098 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2099 printf("%d: failed to dequeue\n", __LINE__);
2102 if (ev.mbuf->hash.rss != flows1[0]) {
2103 printf("%d: unexpected flow received\n", __LINE__);
2107 /* drop the flow 0 packet from port 1 */
2108 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2110 /* call the scheduler */
2111 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2114 * Set up the next set of flows, first a new flow to fill up
2115 * CQ 0, so that the next flow 0 packet should go to CQ2
2117 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2119 for (i = 0; i < RTE_DIM(flows2); i++) {
2120 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2121 struct rte_event ev = {
2122 .flow_id = flows2[i],
2123 .op = RTE_EVENT_OP_NEW,
2124 .queue_id = t->qid[0],
2125 .event_type = RTE_EVENT_TYPE_CPU,
2126 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2131 printf("%d: gen of pkt failed\n", __LINE__);
2134 arp->hash.rss = flows2[i];
2136 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2138 printf("%d: Failed to enqueue\n", __LINE__);
2144 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2146 err = test_event_dev_stats_get(evdev, &stats);
2148 printf("%d:failed to get stats\n", __LINE__);
2153 * Now check the resulting inflights on each port.
2155 if (stats.port_inflight[1] != 3) {
2156 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2158 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2159 (unsigned int)stats.port_inflight[1],
2160 (unsigned int)stats.port_inflight[2],
2161 (unsigned int)stats.port_inflight[3]);
2164 if (stats.port_inflight[2] != 4) {
2165 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2167 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2168 (unsigned int)stats.port_inflight[1],
2169 (unsigned int)stats.port_inflight[2],
2170 (unsigned int)stats.port_inflight[3]);
2173 if (stats.port_inflight[3] != 2) {
2174 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2176 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2177 (unsigned int)stats.port_inflight[1],
2178 (unsigned int)stats.port_inflight[2],
2179 (unsigned int)stats.port_inflight[3]);
2183 for (i = 1; i <= 3; i++) {
2184 struct rte_event ev;
2185 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2186 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2188 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2195 invalid_qid(struct test *t)
2197 struct test_event_dev_stats stats;
2198 const int rx_enq = 0;
2202 if (init(t, 1, 4) < 0 ||
2203 create_ports(t, 4) < 0 ||
2204 create_atomic_qids(t, 1) < 0) {
2205 printf("%d: Error initializing device\n", __LINE__);
2209 /* CQ mapping to QID */
2210 for (i = 0; i < 4; i++) {
2211 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2214 printf("%d: error mapping port 1 qid\n", __LINE__);
2219 if (rte_event_dev_start(evdev) < 0) {
2220 printf("%d: Error with start call\n", __LINE__);
2225 * Send in a packet with an invalid qid to the scheduler.
2226 * We should see the packed enqueued OK, but the inflights for
2227 * that packet should not be incremented, and the rx_dropped
2228 * should be incremented.
2230 static uint32_t flows1[] = {20};
2232 for (i = 0; i < RTE_DIM(flows1); i++) {
2233 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2235 printf("%d: gen of pkt failed\n", __LINE__);
2239 struct rte_event ev = {
2240 .op = RTE_EVENT_OP_NEW,
2241 .queue_id = t->qid[0] + flows1[i],
2245 /* generate pkt and enqueue */
2246 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2248 printf("%d: Failed to enqueue\n", __LINE__);
2253 /* call the scheduler */
2254 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2256 err = test_event_dev_stats_get(evdev, &stats);
2258 printf("%d: failed to get stats\n", __LINE__);
2263 * Now check the resulting inflights on the port, and the rx_dropped.
2265 if (stats.port_inflight[0] != 0) {
2266 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2268 rte_event_dev_dump(evdev, stdout);
2271 if (stats.port_rx_dropped[0] != 1) {
2272 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2273 rte_event_dev_dump(evdev, stdout);
2276 /* each packet drop should only be counted in one place - port or dev */
2277 if (stats.rx_dropped != 0) {
2278 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2280 rte_event_dev_dump(evdev, stdout);
2289 single_packet(struct test *t)
2291 const uint32_t MAGIC_SEQN = 7321;
2292 struct rte_event ev;
2293 struct test_event_dev_stats stats;
2294 const int rx_enq = 0;
2295 const int wrk_enq = 2;
2298 /* Create instance with 4 ports */
2299 if (init(t, 1, 4) < 0 ||
2300 create_ports(t, 4) < 0 ||
2301 create_atomic_qids(t, 1) < 0) {
2302 printf("%d: Error initializing device\n", __LINE__);
2306 /* CQ mapping to QID */
2307 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2309 printf("%d: error mapping lb qid\n", __LINE__);
2314 if (rte_event_dev_start(evdev) < 0) {
2315 printf("%d: Error with start call\n", __LINE__);
2319 /************** Gen pkt and enqueue ****************/
2320 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2322 printf("%d: gen of pkt failed\n", __LINE__);
2326 ev.op = RTE_EVENT_OP_NEW;
2327 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2331 arp->seqn = MAGIC_SEQN;
2333 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2335 printf("%d: Failed to enqueue\n", __LINE__);
2339 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2341 err = test_event_dev_stats_get(evdev, &stats);
2343 printf("%d: failed to get stats\n", __LINE__);
2347 if (stats.rx_pkts != 1 ||
2348 stats.tx_pkts != 1 ||
2349 stats.port_inflight[wrk_enq] != 1) {
2350 printf("%d: Sched core didn't handle pkt as expected\n",
2352 rte_event_dev_dump(evdev, stdout);
2358 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2360 printf("%d: Failed to deq\n", __LINE__);
2364 err = test_event_dev_stats_get(evdev, &stats);
2366 printf("%d: failed to get stats\n", __LINE__);
2370 err = test_event_dev_stats_get(evdev, &stats);
2371 if (ev.mbuf->seqn != MAGIC_SEQN) {
2372 printf("%d: magic sequence number not dequeued\n", __LINE__);
2376 rte_pktmbuf_free(ev.mbuf);
2377 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2379 printf("%d: Failed to enqueue\n", __LINE__);
2382 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2384 err = test_event_dev_stats_get(evdev, &stats);
2385 if (stats.port_inflight[wrk_enq] != 0) {
2386 printf("%d: port inflight not correct\n", __LINE__);
2395 inflight_counts(struct test *t)
2397 struct rte_event ev;
2398 struct test_event_dev_stats stats;
2399 const int rx_enq = 0;
2405 /* Create instance with 4 ports */
2406 if (init(t, 2, 3) < 0 ||
2407 create_ports(t, 3) < 0 ||
2408 create_atomic_qids(t, 2) < 0) {
2409 printf("%d: Error initializing device\n", __LINE__);
2413 /* CQ mapping to QID */
2414 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2416 printf("%d: error mapping lb qid\n", __LINE__);
2420 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2422 printf("%d: error mapping lb qid\n", __LINE__);
2427 if (rte_event_dev_start(evdev) < 0) {
2428 printf("%d: Error with start call\n", __LINE__);
2432 /************** FORWARD ****************/
2434 for (i = 0; i < QID1_NUM; i++) {
2435 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2438 printf("%d: gen of pkt failed\n", __LINE__);
2442 ev.queue_id = t->qid[0];
2443 ev.op = RTE_EVENT_OP_NEW;
2445 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2447 printf("%d: Failed to enqueue\n", __LINE__);
2452 for (i = 0; i < QID2_NUM; i++) {
2453 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2456 printf("%d: gen of pkt failed\n", __LINE__);
2459 ev.queue_id = t->qid[1];
2460 ev.op = RTE_EVENT_OP_NEW;
2462 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2464 printf("%d: Failed to enqueue\n", __LINE__);
2470 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2472 err = test_event_dev_stats_get(evdev, &stats);
2474 printf("%d: failed to get stats\n", __LINE__);
2478 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2479 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2480 printf("%d: Sched core didn't handle pkt as expected\n",
2485 if (stats.port_inflight[p1] != QID1_NUM) {
2486 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2490 if (stats.port_inflight[p2] != QID2_NUM) {
2491 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2496 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2498 struct rte_event events[QID1_NUM + QID2_NUM];
2499 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2500 RTE_DIM(events), 0);
2502 if (deq_pkts != QID1_NUM) {
2503 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2506 err = test_event_dev_stats_get(evdev, &stats);
2507 if (stats.port_inflight[p1] != QID1_NUM) {
2508 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2512 for (i = 0; i < QID1_NUM; i++) {
2513 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2516 printf("%d: %s rte enqueue of inf release failed\n",
2517 __LINE__, __func__);
2523 * As the scheduler core decrements inflights, it needs to run to
2524 * process packets to act on the drop messages
2526 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2528 err = test_event_dev_stats_get(evdev, &stats);
2529 if (stats.port_inflight[p1] != 0) {
2530 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2535 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2536 RTE_DIM(events), 0);
2537 if (deq_pkts != QID2_NUM) {
2538 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2541 err = test_event_dev_stats_get(evdev, &stats);
2542 if (stats.port_inflight[p2] != QID2_NUM) {
2543 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2547 for (i = 0; i < QID2_NUM; i++) {
2548 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2551 printf("%d: %s rte enqueue of inf release failed\n",
2552 __LINE__, __func__);
2558 * As the scheduler core decrements inflights, it needs to run to
2559 * process packets to act on the drop messages
2561 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2563 err = test_event_dev_stats_get(evdev, &stats);
2564 if (stats.port_inflight[p2] != 0) {
2565 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2572 rte_event_dev_dump(evdev, stdout);
2578 parallel_basic(struct test *t, int check_order)
2580 const uint8_t rx_port = 0;
2581 const uint8_t w1_port = 1;
2582 const uint8_t w3_port = 3;
2583 const uint8_t tx_port = 4;
2586 uint32_t deq_pkts, j;
2587 struct rte_mbuf *mbufs[3];
2588 struct rte_mbuf *mbufs_out[3] = { 0 };
2589 const uint32_t MAGIC_SEQN = 1234;
2591 /* Create instance with 4 ports */
2592 if (init(t, 2, tx_port + 1) < 0 ||
2593 create_ports(t, tx_port + 1) < 0 ||
2594 (check_order ? create_ordered_qids(t, 1) :
2595 create_unordered_qids(t, 1)) < 0 ||
2596 create_directed_qids(t, 1, &tx_port)) {
2597 printf("%d: Error initializing device\n", __LINE__);
2603 * We need three ports, all mapped to the same ordered qid0. Then we'll
2604 * take a packet out to each port, re-enqueue in reverse order,
2605 * then make sure the reordering has taken place properly when we
2606 * dequeue from the tx_port.
2608 * Simplified test setup diagram:
2612 * qid0 - w2_port - qid1
2616 /* CQ mapping to QID for LB ports (directed mapped on create) */
2617 for (i = w1_port; i <= w3_port; i++) {
2618 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2621 printf("%d: error mapping lb qid\n", __LINE__);
2627 if (rte_event_dev_start(evdev) < 0) {
2628 printf("%d: Error with start call\n", __LINE__);
2632 /* Enqueue 3 packets to the rx port */
2633 for (i = 0; i < 3; i++) {
2634 struct rte_event ev;
2635 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2637 printf("%d: gen of pkt failed\n", __LINE__);
2641 ev.queue_id = t->qid[0];
2642 ev.op = RTE_EVENT_OP_NEW;
2644 mbufs[i]->seqn = MAGIC_SEQN + i;
2646 /* generate pkt and enqueue */
2647 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2649 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2655 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2657 /* use extra slot to make logic in loops easier */
2658 struct rte_event deq_ev[w3_port + 1];
2660 /* Dequeue the 3 packets, one from each worker port */
2661 for (i = w1_port; i <= w3_port; i++) {
2662 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2664 if (deq_pkts != 1) {
2665 printf("%d: Failed to deq\n", __LINE__);
2666 rte_event_dev_dump(evdev, stdout);
2671 /* Enqueue each packet in reverse order, flushing after each one */
2672 for (i = w3_port; i >= w1_port; i--) {
2674 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2675 deq_ev[i].queue_id = t->qid[1];
2676 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2678 printf("%d: Failed to enqueue\n", __LINE__);
2682 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2684 /* dequeue from the tx ports, we should get 3 packets */
2685 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2688 /* Check to see if we've got all 3 packets */
2689 if (deq_pkts != 3) {
2690 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2691 __LINE__, deq_pkts, tx_port);
2692 rte_event_dev_dump(evdev, stdout);
2696 /* Check to see if the sequence numbers are in expected order */
2698 for (j = 0 ; j < deq_pkts ; j++) {
2699 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2701 "%d: Incorrect sequence number(%d) from port %d\n",
2702 __LINE__, mbufs_out[j]->seqn, tx_port);
2708 /* Destroy the instance */
2714 ordered_basic(struct test *t)
2716 return parallel_basic(t, 1);
2720 unordered_basic(struct test *t)
2722 return parallel_basic(t, 0);
2726 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2728 const struct rte_event new_ev = {
2729 .op = RTE_EVENT_OP_NEW
2730 /* all other fields zero */
2732 struct rte_event ev = new_ev;
2733 unsigned int rx_port = 0; /* port we get the first flow on */
2734 char rx_port_used_stat[64];
2735 char rx_port_free_stat[64];
2736 char other_port_used_stat[64];
2738 if (init(t, 1, 2) < 0 ||
2739 create_ports(t, 2) < 0 ||
2740 create_atomic_qids(t, 1) < 0) {
2741 printf("%d: Error initializing device\n", __LINE__);
2744 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2745 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2747 printf("%d: Error links queue to ports\n", __LINE__);
2750 if (rte_event_dev_start(evdev) < 0) {
2751 printf("%d: Error with start call\n", __LINE__);
2755 /* send one packet and see where it goes, port 0 or 1 */
2756 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2757 printf("%d: Error doing first enqueue\n", __LINE__);
2760 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2762 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2766 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2767 "port_%u_cq_ring_used", rx_port);
2768 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2769 "port_%u_cq_ring_free", rx_port);
2770 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2771 "port_%u_cq_ring_used", rx_port ^ 1);
2772 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2774 printf("%d: Error, first event not scheduled\n", __LINE__);
2778 /* now fill up the rx port's queue with one flow to cause HOLB */
2781 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2782 printf("%d: Error with enqueue\n", __LINE__);
2785 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2786 } while (rte_event_dev_xstats_by_name_get(evdev,
2787 rx_port_free_stat, NULL) != 0);
2789 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2791 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2792 printf("%d: Error with enqueue\n", __LINE__);
2795 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2797 /* check that the other port still has an empty CQ */
2798 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2800 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2803 /* check IQ now has one packet */
2804 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2806 printf("%d: Error, QID does not have exactly 1 packet\n",
2811 /* send another flow, which should pass the other IQ entry */
2814 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2815 printf("%d: Error with enqueue\n", __LINE__);
2818 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2820 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2822 printf("%d: Error, second flow did not pass out first\n",
2827 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2829 printf("%d: Error, QID does not have exactly 1 packet\n",
2836 rte_event_dev_dump(evdev, stdout);
2842 worker_loopback_worker_fn(void *arg)
2844 struct test *t = arg;
2845 uint8_t port = t->port[1];
2850 * Takes packets from the input port and then loops them back through
2851 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2852 * so each packet goes through 8*16 = 128 times.
2854 printf("%d: \tWorker function started\n", __LINE__);
2855 while (count < NUM_PACKETS) {
2856 #define BURST_SIZE 32
2857 struct rte_event ev[BURST_SIZE];
2858 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2865 for (i = 0; i < nb_rx; i++) {
2867 if (ev[i].queue_id != 8) {
2868 ev[i].op = RTE_EVENT_OP_FORWARD;
2869 enqd = rte_event_enqueue_burst(evdev, port,
2872 printf("%d: Can't enqueue FWD!!\n",
2880 ev[i].mbuf->udata64++;
2881 if (ev[i].mbuf->udata64 != 16) {
2882 ev[i].op = RTE_EVENT_OP_FORWARD;
2883 enqd = rte_event_enqueue_burst(evdev, port,
2886 printf("%d: Can't enqueue FWD!!\n",
2892 /* we have hit 16 iterations through system - drop */
2893 rte_pktmbuf_free(ev[i].mbuf);
2895 ev[i].op = RTE_EVENT_OP_RELEASE;
2896 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2898 printf("%d drop enqueue failed\n", __LINE__);
2908 worker_loopback_producer_fn(void *arg)
2910 struct test *t = arg;
2911 uint8_t port = t->port[0];
2914 printf("%d: \tProducer function started\n", __LINE__);
2915 while (count < NUM_PACKETS) {
2916 struct rte_mbuf *m = 0;
2918 m = rte_pktmbuf_alloc(t->mbuf_pool);
2919 } while (m == NULL);
2923 struct rte_event ev = {
2924 .op = RTE_EVENT_OP_NEW,
2925 .queue_id = t->qid[0],
2926 .flow_id = (uintptr_t)m & 0xFFFF,
2930 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2931 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2943 worker_loopback(struct test *t)
2945 /* use a single producer core, and a worker core to see what happens
2946 * if the worker loops packets back multiple times
2948 struct test_event_dev_stats stats;
2949 uint64_t print_cycles = 0, cycles = 0;
2950 uint64_t tx_pkts = 0;
2952 int w_lcore, p_lcore;
2954 if (init(t, 8, 2) < 0 ||
2955 create_atomic_qids(t, 8) < 0) {
2956 printf("%d: Error initializing device\n", __LINE__);
2960 /* RX with low max events */
2961 static struct rte_event_port_conf conf = {
2962 .dequeue_depth = 32,
2963 .enqueue_depth = 64,
2965 /* beware: this cannot be initialized in the static above as it would
2966 * only be initialized once - and this needs to be set for multiple runs
2968 conf.new_event_threshold = 512;
2970 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2971 printf("Error setting up RX port\n");
2975 /* TX with higher max events */
2976 conf.new_event_threshold = 4096;
2977 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2978 printf("Error setting up TX port\n");
2983 /* CQ mapping to QID */
2984 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2985 if (err != 8) { /* should have mapped all queues*/
2986 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2990 if (rte_event_dev_start(evdev) < 0) {
2991 printf("%d: Error with start call\n", __LINE__);
2995 p_lcore = rte_get_next_lcore(
2996 /* start core */ -1,
2997 /* skip master */ 1,
2999 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3001 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3002 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3004 print_cycles = cycles = rte_get_timer_cycles();
3005 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3006 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3008 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3010 uint64_t new_cycles = rte_get_timer_cycles();
3012 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3013 test_event_dev_stats_get(evdev, &stats);
3015 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3016 __LINE__, stats.rx_pkts, stats.tx_pkts);
3018 print_cycles = new_cycles;
3020 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3021 test_event_dev_stats_get(evdev, &stats);
3022 if (stats.tx_pkts == tx_pkts) {
3023 rte_event_dev_dump(evdev, stdout);
3024 printf("Dumping xstats:\n");
3027 "%d: No schedules for seconds, deadlock\n",
3031 tx_pkts = stats.tx_pkts;
3032 cycles = new_cycles;
3035 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3036 /* ensure all completions are flushed */
3038 rte_eal_mp_wait_lcore();
3044 static struct rte_mempool *eventdev_func_mempool;
3047 test_sw_eventdev(void)
3049 struct test *t = malloc(sizeof(struct test));
3052 /* manually initialize the op, older gcc's complain on static
3053 * initialization of struct elements that are a bitfield.
3055 release_ev.op = RTE_EVENT_OP_RELEASE;
3057 const char *eventdev_name = "event_sw0";
3058 evdev = rte_event_dev_get_dev_id(eventdev_name);
3060 printf("%d: Eventdev %s not found - creating.\n",
3061 __LINE__, eventdev_name);
3062 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3063 printf("Error creating eventdev\n");
3066 evdev = rte_event_dev_get_dev_id(eventdev_name);
3068 printf("Error finding newly created eventdev\n");
3073 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3074 printf("Failed to get service ID for software event dev\n");
3078 rte_service_runstate_set(t->service_id, 1);
3079 rte_service_set_runstate_mapped_check(t->service_id, 0);
3081 /* Only create mbuf pool once, reuse for each test run */
3082 if (!eventdev_func_mempool) {
3083 eventdev_func_mempool = rte_pktmbuf_pool_create(
3084 "EVENTDEV_SW_SA_MBUF_POOL",
3085 (1<<12), /* 4k buffers */
3086 32 /*MBUF_CACHE_SIZE*/,
3088 512, /* use very small mbufs */
3090 if (!eventdev_func_mempool) {
3091 printf("ERROR creating mempool\n");
3095 t->mbuf_pool = eventdev_func_mempool;
3096 printf("*** Running Single Directed Packet test...\n");
3097 ret = test_single_directed_packet(t);
3099 printf("ERROR - Single Directed Packet test FAILED.\n");
3102 printf("*** Running Directed Forward Credit test...\n");
3103 ret = test_directed_forward_credits(t);
3105 printf("ERROR - Directed Forward Credit test FAILED.\n");
3108 printf("*** Running Single Load Balanced Packet test...\n");
3109 ret = single_packet(t);
3111 printf("ERROR - Single Packet test FAILED.\n");
3114 printf("*** Running Unordered Basic test...\n");
3115 ret = unordered_basic(t);
3117 printf("ERROR - Unordered Basic test FAILED.\n");
3120 printf("*** Running Ordered Basic test...\n");
3121 ret = ordered_basic(t);
3123 printf("ERROR - Ordered Basic test FAILED.\n");
3126 printf("*** Running Burst Packets test...\n");
3127 ret = burst_packets(t);
3129 printf("ERROR - Burst Packets test FAILED.\n");
3132 printf("*** Running Load Balancing test...\n");
3133 ret = load_balancing(t);
3135 printf("ERROR - Load Balancing test FAILED.\n");
3138 printf("*** Running Prioritized Directed test...\n");
3139 ret = test_priority_directed(t);
3141 printf("ERROR - Prioritized Directed test FAILED.\n");
3144 printf("*** Running Prioritized Atomic test...\n");
3145 ret = test_priority_atomic(t);
3147 printf("ERROR - Prioritized Atomic test FAILED.\n");
3151 printf("*** Running Prioritized Ordered test...\n");
3152 ret = test_priority_ordered(t);
3154 printf("ERROR - Prioritized Ordered test FAILED.\n");
3157 printf("*** Running Prioritized Unordered test...\n");
3158 ret = test_priority_unordered(t);
3160 printf("ERROR - Prioritized Unordered test FAILED.\n");
3163 printf("*** Running Invalid QID test...\n");
3164 ret = invalid_qid(t);
3166 printf("ERROR - Invalid QID test FAILED.\n");
3169 printf("*** Running Load Balancing History test...\n");
3170 ret = load_balancing_history(t);
3172 printf("ERROR - Load Balancing History test FAILED.\n");
3175 printf("*** Running Inflight Count test...\n");
3176 ret = inflight_counts(t);
3178 printf("ERROR - Inflight Count test FAILED.\n");
3181 printf("*** Running Abuse Inflights test...\n");
3182 ret = abuse_inflights(t);
3184 printf("ERROR - Abuse Inflights test FAILED.\n");
3187 printf("*** Running XStats test...\n");
3188 ret = xstats_tests(t);
3190 printf("ERROR - XStats test FAILED.\n");
3193 printf("*** Running XStats ID Reset test...\n");
3194 ret = xstats_id_reset_tests(t);
3196 printf("ERROR - XStats ID Reset test FAILED.\n");
3199 printf("*** Running XStats Brute Force test...\n");
3200 ret = xstats_brute_force(t);
3202 printf("ERROR - XStats Brute Force test FAILED.\n");
3205 printf("*** Running XStats ID Abuse test...\n");
3206 ret = xstats_id_abuse_tests(t);
3208 printf("ERROR - XStats ID Abuse test FAILED.\n");
3211 printf("*** Running QID Priority test...\n");
3212 ret = qid_priorities(t);
3214 printf("ERROR - QID Priority test FAILED.\n");
3217 printf("*** Running Ordered Reconfigure test...\n");
3218 ret = ordered_reconfigure(t);
3220 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3223 printf("*** Running Port LB Single Reconfig test...\n");
3224 ret = port_single_lb_reconfig(t);
3226 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3229 printf("*** Running Port Reconfig Credits test...\n");
3230 ret = port_reconfig_credits(t);
3232 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3235 printf("*** Running Head-of-line-blocking test...\n");
3238 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3241 if (rte_lcore_count() >= 3) {
3242 printf("*** Running Worker loopback test...\n");
3243 ret = worker_loopback(t);
3245 printf("ERROR - Worker loopback test FAILED.\n");
3249 printf("### Not enough cores for worker loopback test.\n");
3250 printf("### Need at least 3 cores for test.\n");
3253 * Free test instance, leaving mempool initialized, and a pointer to it
3254 * in static eventdev_func_mempool, as it is re-used on re-runs
3261 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);