1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_launch.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
30 #define NUM_PACKETS (1<<18)
31 #define DEQUEUE_DEPTH 128
36 struct rte_mempool *mbuf_pool;
37 uint8_t port[MAX_PORTS];
38 uint8_t qid[MAX_QIDS];
43 static struct rte_event release_ev;
45 static inline struct rte_mbuf *
46 rte_gen_arp(int portid, struct rte_mempool *mp)
50 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
52 static const uint8_t arp_request[] = {
53 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
54 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
55 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
56 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
57 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
58 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
59 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00
63 int pkt_len = sizeof(arp_request) - 1;
65 m = rte_pktmbuf_alloc(mp);
69 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
70 arp_request, pkt_len);
71 rte_pktmbuf_pkt_len(m) = pkt_len;
72 rte_pktmbuf_data_len(m) = pkt_len;
82 const uint32_t XSTATS_MAX = 1024;
84 uint32_t ids[XSTATS_MAX];
85 uint64_t values[XSTATS_MAX];
86 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
88 for (i = 0; i < XSTATS_MAX; i++)
91 /* Device names / values */
92 int ret = rte_event_dev_xstats_names_get(evdev,
93 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
94 xstats_names, ids, XSTATS_MAX);
96 printf("%d: xstats names get() returned error\n",
100 ret = rte_event_dev_xstats_get(evdev,
101 RTE_EVENT_DEV_XSTATS_DEVICE,
102 0, ids, values, ret);
103 if (ret > (signed int)XSTATS_MAX)
104 printf("%s %d: more xstats available than space\n",
106 for (i = 0; (signed int)i < ret; i++) {
107 printf("%d : %s : %"PRIu64"\n",
108 i, xstats_names[i].name, values[i]);
111 /* Port names / values */
112 ret = rte_event_dev_xstats_names_get(evdev,
113 RTE_EVENT_DEV_XSTATS_PORT, 0,
114 xstats_names, ids, XSTATS_MAX);
115 ret = rte_event_dev_xstats_get(evdev,
116 RTE_EVENT_DEV_XSTATS_PORT, 1,
118 if (ret > (signed int)XSTATS_MAX)
119 printf("%s %d: more xstats available than space\n",
121 for (i = 0; (signed int)i < ret; i++) {
122 printf("%d : %s : %"PRIu64"\n",
123 i, xstats_names[i].name, values[i]);
126 /* Queue names / values */
127 ret = rte_event_dev_xstats_names_get(evdev,
128 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
129 xstats_names, ids, XSTATS_MAX);
130 ret = rte_event_dev_xstats_get(evdev,
131 RTE_EVENT_DEV_XSTATS_QUEUE,
132 1, ids, values, ret);
133 if (ret > (signed int)XSTATS_MAX)
134 printf("%s %d: more xstats available than space\n",
136 for (i = 0; (signed int)i < ret; i++) {
137 printf("%d : %s : %"PRIu64"\n",
138 i, xstats_names[i].name, values[i]);
142 /* initialization and config */
144 init(struct test *t, int nb_queues, int nb_ports)
146 struct rte_event_dev_config config = {
147 .nb_event_queues = nb_queues,
148 .nb_event_ports = nb_ports,
149 .nb_event_queue_flows = 1024,
150 .nb_events_limit = 4096,
151 .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
152 .nb_event_port_enqueue_depth = 128,
156 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
158 memset(t, 0, sizeof(*t));
161 ret = rte_event_dev_configure(evdev, &config);
163 printf("%d: Error configuring device\n", __LINE__);
168 create_ports(struct test *t, int num_ports)
171 static const struct rte_event_port_conf conf = {
172 .new_event_threshold = 1024,
176 if (num_ports > MAX_PORTS)
179 for (i = 0; i < num_ports; i++) {
180 if (rte_event_port_setup(evdev, i, &conf) < 0) {
181 printf("Error setting up port %d\n", i);
191 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
196 const struct rte_event_queue_conf conf = {
197 .schedule_type = flags,
198 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
199 .nb_atomic_flows = 1024,
200 .nb_atomic_order_sequences = 1024,
203 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
204 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
205 printf("%d: error creating qid %d\n", __LINE__, i);
210 t->nb_qids += num_qids;
211 if (t->nb_qids > MAX_QIDS)
218 create_atomic_qids(struct test *t, int num_qids)
220 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
224 create_ordered_qids(struct test *t, int num_qids)
226 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
231 create_unordered_qids(struct test *t, int num_qids)
233 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
237 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
242 static const struct rte_event_queue_conf conf = {
243 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
244 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
247 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
248 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
249 printf("%d: error creating qid %d\n", __LINE__, i);
254 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
255 &t->qid[i], NULL, 1) != 1) {
256 printf("%d: error creating link for qid %d\n",
261 t->nb_qids += num_qids;
262 if (t->nb_qids > MAX_QIDS)
270 cleanup(struct test *t __rte_unused)
272 rte_event_dev_stop(evdev);
273 rte_event_dev_close(evdev);
277 struct test_event_dev_stats {
278 uint64_t rx_pkts; /**< Total packets received */
279 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
280 uint64_t tx_pkts; /**< Total packets transmitted */
282 /** Packets received on this port */
283 uint64_t port_rx_pkts[MAX_PORTS];
284 /** Packets dropped on this port */
285 uint64_t port_rx_dropped[MAX_PORTS];
286 /** Packets inflight on this port */
287 uint64_t port_inflight[MAX_PORTS];
288 /** Packets transmitted on this port */
289 uint64_t port_tx_pkts[MAX_PORTS];
290 /** Packets received on this qid */
291 uint64_t qid_rx_pkts[MAX_QIDS];
292 /** Packets dropped on this qid */
293 uint64_t qid_rx_dropped[MAX_QIDS];
294 /** Packets transmitted on this qid */
295 uint64_t qid_tx_pkts[MAX_QIDS];
299 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
302 static uint32_t total_ids[3]; /* rx, tx and drop */
303 static uint32_t port_rx_pkts_ids[MAX_PORTS];
304 static uint32_t port_rx_dropped_ids[MAX_PORTS];
305 static uint32_t port_inflight_ids[MAX_PORTS];
306 static uint32_t port_tx_pkts_ids[MAX_PORTS];
307 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
308 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
309 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
312 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
313 "dev_rx", &total_ids[0]);
314 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
315 "dev_drop", &total_ids[1]);
316 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
317 "dev_tx", &total_ids[2]);
318 for (i = 0; i < MAX_PORTS; i++) {
320 snprintf(name, sizeof(name), "port_%u_rx", i);
321 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
322 dev_id, name, &port_rx_pkts_ids[i]);
323 snprintf(name, sizeof(name), "port_%u_drop", i);
324 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
325 dev_id, name, &port_rx_dropped_ids[i]);
326 snprintf(name, sizeof(name), "port_%u_inflight", i);
327 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
328 dev_id, name, &port_inflight_ids[i]);
329 snprintf(name, sizeof(name), "port_%u_tx", i);
330 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
331 dev_id, name, &port_tx_pkts_ids[i]);
333 for (i = 0; i < MAX_QIDS; i++) {
335 snprintf(name, sizeof(name), "qid_%u_rx", i);
336 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
337 dev_id, name, &qid_rx_pkts_ids[i]);
338 snprintf(name, sizeof(name), "qid_%u_drop", i);
339 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
340 dev_id, name, &qid_rx_dropped_ids[i]);
341 snprintf(name, sizeof(name), "qid_%u_tx", i);
342 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
343 dev_id, name, &qid_tx_pkts_ids[i]);
349 /* run_prio_packet_test
350 * This performs a basic packet priority check on the test instance passed in.
351 * It is factored out of the main priority tests as the same tests must be
352 * performed to ensure prioritization of each type of QID.
355 * - An initialized test structure, including mempool
356 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
357 * - t->qid[0] is the QID to be tested
358 * - if LB QID, the CQ must be mapped to the QID.
361 run_prio_packet_test(struct test *t)
364 const uint32_t MAGIC_SEQN[] = {4711, 1234};
365 const uint32_t PRIORITY[] = {
366 RTE_EVENT_DEV_PRIORITY_NORMAL,
367 RTE_EVENT_DEV_PRIORITY_HIGHEST
370 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
371 /* generate pkt and enqueue */
373 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
375 printf("%d: gen of pkt failed\n", __LINE__);
378 arp->seqn = MAGIC_SEQN[i];
380 ev = (struct rte_event){
381 .priority = PRIORITY[i],
382 .op = RTE_EVENT_OP_NEW,
383 .queue_id = t->qid[0],
386 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
388 printf("%d: error failed to enqueue\n", __LINE__);
393 rte_service_run_iter_on_app_lcore(t->service_id, 1);
395 struct test_event_dev_stats stats;
396 err = test_event_dev_stats_get(evdev, &stats);
398 printf("%d: error failed to get stats\n", __LINE__);
402 if (stats.port_rx_pkts[t->port[0]] != 2) {
403 printf("%d: error stats incorrect for directed port\n",
405 rte_event_dev_dump(evdev, stdout);
409 struct rte_event ev, ev2;
411 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
413 printf("%d: error failed to deq\n", __LINE__);
414 rte_event_dev_dump(evdev, stdout);
417 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
418 printf("%d: first packet out not highest priority\n",
420 rte_event_dev_dump(evdev, stdout);
423 rte_pktmbuf_free(ev.mbuf);
425 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
427 printf("%d: error failed to deq\n", __LINE__);
428 rte_event_dev_dump(evdev, stdout);
431 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
432 printf("%d: second packet out not lower priority\n",
434 rte_event_dev_dump(evdev, stdout);
437 rte_pktmbuf_free(ev2.mbuf);
444 test_single_directed_packet(struct test *t)
446 const int rx_enq = 0;
447 const int wrk_enq = 2;
450 /* Create instance with 3 directed QIDs going to 3 ports */
451 if (init(t, 3, 3) < 0 ||
452 create_ports(t, 3) < 0 ||
453 create_directed_qids(t, 3, t->port) < 0)
456 if (rte_event_dev_start(evdev) < 0) {
457 printf("%d: Error with start call\n", __LINE__);
461 /************** FORWARD ****************/
462 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
463 struct rte_event ev = {
464 .op = RTE_EVENT_OP_NEW,
470 printf("%d: gen of pkt failed\n", __LINE__);
474 const uint32_t MAGIC_SEQN = 4711;
475 arp->seqn = MAGIC_SEQN;
477 /* generate pkt and enqueue */
478 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
480 printf("%d: error failed to enqueue\n", __LINE__);
484 /* Run schedule() as dir packets may need to be re-ordered */
485 rte_service_run_iter_on_app_lcore(t->service_id, 1);
487 struct test_event_dev_stats stats;
488 err = test_event_dev_stats_get(evdev, &stats);
490 printf("%d: error failed to get stats\n", __LINE__);
494 if (stats.port_rx_pkts[rx_enq] != 1) {
495 printf("%d: error stats incorrect for directed port\n",
501 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
503 printf("%d: error failed to deq\n", __LINE__);
507 err = test_event_dev_stats_get(evdev, &stats);
508 if (stats.port_rx_pkts[wrk_enq] != 0 &&
509 stats.port_rx_pkts[wrk_enq] != 1) {
510 printf("%d: error directed stats post-dequeue\n", __LINE__);
514 if (ev.mbuf->seqn != MAGIC_SEQN) {
515 printf("%d: error magic sequence number not dequeued\n",
520 rte_pktmbuf_free(ev.mbuf);
526 test_directed_forward_credits(struct test *t)
531 if (init(t, 1, 1) < 0 ||
532 create_ports(t, 1) < 0 ||
533 create_directed_qids(t, 1, t->port) < 0)
536 if (rte_event_dev_start(evdev) < 0) {
537 printf("%d: Error with start call\n", __LINE__);
541 struct rte_event ev = {
542 .op = RTE_EVENT_OP_NEW,
546 for (i = 0; i < 1000; i++) {
547 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
549 printf("%d: error failed to enqueue\n", __LINE__);
552 rte_service_run_iter_on_app_lcore(t->service_id, 1);
555 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
557 printf("%d: error failed to deq\n", __LINE__);
561 /* re-write event to be a forward, and continue looping it */
562 ev.op = RTE_EVENT_OP_FORWARD;
571 test_priority_directed(struct test *t)
573 if (init(t, 1, 1) < 0 ||
574 create_ports(t, 1) < 0 ||
575 create_directed_qids(t, 1, t->port) < 0) {
576 printf("%d: Error initializing device\n", __LINE__);
580 if (rte_event_dev_start(evdev) < 0) {
581 printf("%d: Error with start call\n", __LINE__);
585 return run_prio_packet_test(t);
589 test_priority_atomic(struct test *t)
591 if (init(t, 1, 1) < 0 ||
592 create_ports(t, 1) < 0 ||
593 create_atomic_qids(t, 1) < 0) {
594 printf("%d: Error initializing device\n", __LINE__);
599 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
600 printf("%d: error mapping qid to port\n", __LINE__);
603 if (rte_event_dev_start(evdev) < 0) {
604 printf("%d: Error with start call\n", __LINE__);
608 return run_prio_packet_test(t);
612 test_priority_ordered(struct test *t)
614 if (init(t, 1, 1) < 0 ||
615 create_ports(t, 1) < 0 ||
616 create_ordered_qids(t, 1) < 0) {
617 printf("%d: Error initializing device\n", __LINE__);
622 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
623 printf("%d: error mapping qid to port\n", __LINE__);
626 if (rte_event_dev_start(evdev) < 0) {
627 printf("%d: Error with start call\n", __LINE__);
631 return run_prio_packet_test(t);
635 test_priority_unordered(struct test *t)
637 if (init(t, 1, 1) < 0 ||
638 create_ports(t, 1) < 0 ||
639 create_unordered_qids(t, 1) < 0) {
640 printf("%d: Error initializing device\n", __LINE__);
645 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
646 printf("%d: error mapping qid to port\n", __LINE__);
649 if (rte_event_dev_start(evdev) < 0) {
650 printf("%d: Error with start call\n", __LINE__);
654 return run_prio_packet_test(t);
658 burst_packets(struct test *t)
660 /************** CONFIG ****************/
665 /* Create instance with 2 ports and 2 queues */
666 if (init(t, 2, 2) < 0 ||
667 create_ports(t, 2) < 0 ||
668 create_atomic_qids(t, 2) < 0) {
669 printf("%d: Error initializing device\n", __LINE__);
673 /* CQ mapping to QID */
674 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
676 printf("%d: error mapping lb qid0\n", __LINE__);
679 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
681 printf("%d: error mapping lb qid1\n", __LINE__);
685 if (rte_event_dev_start(evdev) < 0) {
686 printf("%d: Error with start call\n", __LINE__);
690 /************** FORWARD ****************/
691 const uint32_t rx_port = 0;
692 const uint32_t NUM_PKTS = 2;
694 for (i = 0; i < NUM_PKTS; i++) {
695 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
697 printf("%d: error generating pkt\n", __LINE__);
701 struct rte_event ev = {
702 .op = RTE_EVENT_OP_NEW,
707 /* generate pkt and enqueue */
708 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
710 printf("%d: Failed to enqueue\n", __LINE__);
714 rte_service_run_iter_on_app_lcore(t->service_id, 1);
716 /* Check stats for all NUM_PKTS arrived to sched core */
717 struct test_event_dev_stats stats;
719 err = test_event_dev_stats_get(evdev, &stats);
721 printf("%d: failed to get stats\n", __LINE__);
724 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
725 printf("%d: Sched core didn't receive all %d pkts\n",
727 rte_event_dev_dump(evdev, stdout);
735 /******** DEQ QID 1 *******/
738 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
740 rte_pktmbuf_free(ev.mbuf);
743 if (deq_pkts != NUM_PKTS/2) {
744 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
749 /******** DEQ QID 2 *******/
753 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
755 rte_pktmbuf_free(ev.mbuf);
757 if (deq_pkts != NUM_PKTS/2) {
758 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
768 abuse_inflights(struct test *t)
770 const int rx_enq = 0;
771 const int wrk_enq = 2;
774 /* Create instance with 4 ports */
775 if (init(t, 1, 4) < 0 ||
776 create_ports(t, 4) < 0 ||
777 create_atomic_qids(t, 1) < 0) {
778 printf("%d: Error initializing device\n", __LINE__);
782 /* CQ mapping to QID */
783 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
785 printf("%d: error mapping lb qid\n", __LINE__);
790 if (rte_event_dev_start(evdev) < 0) {
791 printf("%d: Error with start call\n", __LINE__);
795 /* Enqueue op only */
796 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
798 printf("%d: Failed to enqueue\n", __LINE__);
803 rte_service_run_iter_on_app_lcore(t->service_id, 1);
805 struct test_event_dev_stats stats;
807 err = test_event_dev_stats_get(evdev, &stats);
809 printf("%d: failed to get stats\n", __LINE__);
813 if (stats.rx_pkts != 0 ||
814 stats.tx_pkts != 0 ||
815 stats.port_inflight[wrk_enq] != 0) {
816 printf("%d: Sched core didn't handle pkt as expected\n",
826 xstats_tests(struct test *t)
828 const int wrk_enq = 2;
831 /* Create instance with 4 ports */
832 if (init(t, 1, 4) < 0 ||
833 create_ports(t, 4) < 0 ||
834 create_atomic_qids(t, 1) < 0) {
835 printf("%d: Error initializing device\n", __LINE__);
839 /* CQ mapping to QID */
840 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
842 printf("%d: error mapping lb qid\n", __LINE__);
847 if (rte_event_dev_start(evdev) < 0) {
848 printf("%d: Error with start call\n", __LINE__);
852 const uint32_t XSTATS_MAX = 1024;
855 uint32_t ids[XSTATS_MAX];
856 uint64_t values[XSTATS_MAX];
857 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
859 for (i = 0; i < XSTATS_MAX; i++)
862 /* Device names / values */
863 int ret = rte_event_dev_xstats_names_get(evdev,
864 RTE_EVENT_DEV_XSTATS_DEVICE,
865 0, xstats_names, ids, XSTATS_MAX);
867 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
870 ret = rte_event_dev_xstats_get(evdev,
871 RTE_EVENT_DEV_XSTATS_DEVICE,
872 0, ids, values, ret);
874 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
878 /* Port names / values */
879 ret = rte_event_dev_xstats_names_get(evdev,
880 RTE_EVENT_DEV_XSTATS_PORT, 0,
881 xstats_names, ids, XSTATS_MAX);
883 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
886 ret = rte_event_dev_xstats_get(evdev,
887 RTE_EVENT_DEV_XSTATS_PORT, 0,
890 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
894 /* Queue names / values */
895 ret = rte_event_dev_xstats_names_get(evdev,
896 RTE_EVENT_DEV_XSTATS_QUEUE,
897 0, xstats_names, ids, XSTATS_MAX);
899 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
903 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
904 ret = rte_event_dev_xstats_get(evdev,
905 RTE_EVENT_DEV_XSTATS_QUEUE,
906 1, ids, values, ret);
907 if (ret != -EINVAL) {
908 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
912 ret = rte_event_dev_xstats_get(evdev,
913 RTE_EVENT_DEV_XSTATS_QUEUE,
914 0, ids, values, ret);
916 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
920 /* enqueue packets to check values */
921 for (i = 0; i < 3; i++) {
923 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
925 printf("%d: gen of pkt failed\n", __LINE__);
928 ev.queue_id = t->qid[i];
929 ev.op = RTE_EVENT_OP_NEW;
934 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
936 printf("%d: Failed to enqueue\n", __LINE__);
941 rte_service_run_iter_on_app_lcore(t->service_id, 1);
943 /* Device names / values */
944 int num_stats = rte_event_dev_xstats_names_get(evdev,
945 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
946 xstats_names, ids, XSTATS_MAX);
949 ret = rte_event_dev_xstats_get(evdev,
950 RTE_EVENT_DEV_XSTATS_DEVICE,
951 0, ids, values, num_stats);
952 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
953 for (i = 0; (signed int)i < ret; i++) {
954 if (expected[i] != values[i]) {
956 "%d Error xstat %d (id %d) %s : %"PRIu64
957 ", expect %"PRIu64"\n",
958 __LINE__, i, ids[i], xstats_names[i].name,
959 values[i], expected[i]);
964 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
967 /* ensure reset statistics are zero-ed */
968 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
969 ret = rte_event_dev_xstats_get(evdev,
970 RTE_EVENT_DEV_XSTATS_DEVICE,
971 0, ids, values, num_stats);
972 for (i = 0; (signed int)i < ret; i++) {
973 if (expected_zero[i] != values[i]) {
975 "%d Error, xstat %d (id %d) %s : %"PRIu64
976 ", expect %"PRIu64"\n",
977 __LINE__, i, ids[i], xstats_names[i].name,
978 values[i], expected_zero[i]);
983 /* port reset checks */
984 num_stats = rte_event_dev_xstats_names_get(evdev,
985 RTE_EVENT_DEV_XSTATS_PORT, 0,
986 xstats_names, ids, XSTATS_MAX);
989 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
990 0, ids, values, num_stats);
992 static const uint64_t port_expected[] = {
997 0 /* avg pkt cycles */,
999 0 /* rx ring used */,
1000 4096 /* rx ring free */,
1001 0 /* cq ring used */,
1002 32 /* cq ring free */,
1003 0 /* dequeue calls */,
1004 /* 10 dequeue burst buckets */
1008 if (ret != RTE_DIM(port_expected)) {
1010 "%s %d: wrong number of port stats (%d), expected %zu\n",
1011 __func__, __LINE__, ret, RTE_DIM(port_expected));
1014 for (i = 0; (signed int)i < ret; i++) {
1015 if (port_expected[i] != values[i]) {
1017 "%s : %d: Error stat %s is %"PRIu64
1018 ", expected %"PRIu64"\n",
1019 __func__, __LINE__, xstats_names[i].name,
1020 values[i], port_expected[i]);
1025 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1028 /* ensure reset statistics are zero-ed */
1029 static const uint64_t port_expected_zero[] = {
1034 0 /* avg pkt cycles */,
1036 0 /* rx ring used */,
1037 4096 /* rx ring free */,
1038 0 /* cq ring used */,
1039 32 /* cq ring free */,
1040 0 /* dequeue calls */,
1041 /* 10 dequeue burst buckets */
1045 ret = rte_event_dev_xstats_get(evdev,
1046 RTE_EVENT_DEV_XSTATS_PORT,
1047 0, ids, values, num_stats);
1048 for (i = 0; (signed int)i < ret; i++) {
1049 if (port_expected_zero[i] != values[i]) {
1051 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1052 ", expect %"PRIu64"\n",
1053 __LINE__, i, ids[i], xstats_names[i].name,
1054 values[i], port_expected_zero[i]);
1059 /* QUEUE STATS TESTS */
1060 num_stats = rte_event_dev_xstats_names_get(evdev,
1061 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1062 xstats_names, ids, XSTATS_MAX);
1063 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1064 0, ids, values, num_stats);
1066 printf("xstats get returned %d\n", ret);
1069 if ((unsigned int)ret > XSTATS_MAX)
1070 printf("%s %d: more xstats available than space\n",
1071 __func__, __LINE__);
1073 static const uint64_t queue_expected[] = {
1078 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1079 /* QID-to-Port: pinned_flows, packets */
1085 for (i = 0; (signed int)i < ret; i++) {
1086 if (queue_expected[i] != values[i]) {
1088 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1089 ", expect %"PRIu64"\n",
1090 __LINE__, i, ids[i], xstats_names[i].name,
1091 values[i], queue_expected[i]);
1096 /* Reset the queue stats here */
1097 ret = rte_event_dev_xstats_reset(evdev,
1098 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1102 /* Verify that the resetable stats are reset, and others are not */
1103 static const uint64_t queue_expected_zero[] = {
1108 0, 0, 0, 0, /* 4 iq used */
1109 /* QID-to-Port: pinned_flows, packets */
1116 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1117 ids, values, num_stats);
1119 for (i = 0; (signed int)i < ret; i++) {
1120 if (queue_expected_zero[i] != values[i]) {
1122 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1123 ", expect %"PRIu64"\n",
1124 __LINE__, i, ids[i], xstats_names[i].name,
1125 values[i], queue_expected_zero[i]);
1130 printf("%d : %d of values were not as expected above\n",
1139 rte_event_dev_dump(0, stdout);
1146 xstats_id_abuse_tests(struct test *t)
1149 const uint32_t XSTATS_MAX = 1024;
1150 const uint32_t link_port = 2;
1152 uint32_t ids[XSTATS_MAX];
1153 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1155 /* Create instance with 4 ports */
1156 if (init(t, 1, 4) < 0 ||
1157 create_ports(t, 4) < 0 ||
1158 create_atomic_qids(t, 1) < 0) {
1159 printf("%d: Error initializing device\n", __LINE__);
1163 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1165 printf("%d: error mapping lb qid\n", __LINE__);
1169 if (rte_event_dev_start(evdev) < 0) {
1170 printf("%d: Error with start call\n", __LINE__);
1174 /* no test for device, as it ignores the port/q number */
1175 int num_stats = rte_event_dev_xstats_names_get(evdev,
1176 RTE_EVENT_DEV_XSTATS_PORT,
1177 UINT8_MAX-1, xstats_names, ids,
1179 if (num_stats != 0) {
1180 printf("%d: expected %d stats, got return %d\n", __LINE__,
1185 num_stats = rte_event_dev_xstats_names_get(evdev,
1186 RTE_EVENT_DEV_XSTATS_QUEUE,
1187 UINT8_MAX-1, xstats_names, ids,
1189 if (num_stats != 0) {
1190 printf("%d: expected %d stats, got return %d\n", __LINE__,
1203 port_reconfig_credits(struct test *t)
1205 if (init(t, 1, 1) < 0) {
1206 printf("%d: Error initializing device\n", __LINE__);
1211 const uint32_t NUM_ITERS = 32;
1212 for (i = 0; i < NUM_ITERS; i++) {
1213 const struct rte_event_queue_conf conf = {
1214 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1215 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1216 .nb_atomic_flows = 1024,
1217 .nb_atomic_order_sequences = 1024,
1219 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1220 printf("%d: error creating qid\n", __LINE__);
1225 static const struct rte_event_port_conf port_conf = {
1226 .new_event_threshold = 128,
1227 .dequeue_depth = 32,
1228 .enqueue_depth = 64,
1230 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1231 printf("%d Error setting up port\n", __LINE__);
1235 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1237 printf("%d: error mapping lb qid\n", __LINE__);
1241 if (rte_event_dev_start(evdev) < 0) {
1242 printf("%d: Error with start call\n", __LINE__);
1246 const uint32_t NPKTS = 1;
1248 for (j = 0; j < NPKTS; j++) {
1249 struct rte_event ev;
1250 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1252 printf("%d: gen of pkt failed\n", __LINE__);
1255 ev.queue_id = t->qid[0];
1256 ev.op = RTE_EVENT_OP_NEW;
1258 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1260 printf("%d: Failed to enqueue\n", __LINE__);
1261 rte_event_dev_dump(0, stdout);
1266 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1268 struct rte_event ev[NPKTS];
1269 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1272 printf("%d error; no packet dequeued\n", __LINE__);
1274 /* let cleanup below stop the device on last iter */
1275 if (i != NUM_ITERS-1)
1276 rte_event_dev_stop(evdev);
1287 port_single_lb_reconfig(struct test *t)
1289 if (init(t, 2, 2) < 0) {
1290 printf("%d: Error initializing device\n", __LINE__);
1294 static const struct rte_event_queue_conf conf_lb_atomic = {
1295 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1296 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1297 .nb_atomic_flows = 1024,
1298 .nb_atomic_order_sequences = 1024,
1300 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1301 printf("%d: error creating qid\n", __LINE__);
1305 static const struct rte_event_queue_conf conf_single_link = {
1306 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1307 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1309 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1310 printf("%d: error creating qid\n", __LINE__);
1314 struct rte_event_port_conf port_conf = {
1315 .new_event_threshold = 128,
1316 .dequeue_depth = 32,
1317 .enqueue_depth = 64,
1319 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1320 printf("%d Error setting up port\n", __LINE__);
1323 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1324 printf("%d Error setting up port\n", __LINE__);
1328 /* link port to lb queue */
1329 uint8_t queue_id = 0;
1330 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1331 printf("%d: error creating link for qid\n", __LINE__);
1335 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1337 printf("%d: Error unlinking lb port\n", __LINE__);
1342 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1343 printf("%d: error creating link for qid\n", __LINE__);
1348 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1350 printf("%d: error mapping lb qid\n", __LINE__);
1354 if (rte_event_dev_start(evdev) < 0) {
1355 printf("%d: Error with start call\n", __LINE__);
1367 xstats_brute_force(struct test *t)
1370 const uint32_t XSTATS_MAX = 1024;
1371 uint32_t ids[XSTATS_MAX];
1372 uint64_t values[XSTATS_MAX];
1373 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1376 /* Create instance with 4 ports */
1377 if (init(t, 1, 4) < 0 ||
1378 create_ports(t, 4) < 0 ||
1379 create_atomic_qids(t, 1) < 0) {
1380 printf("%d: Error initializing device\n", __LINE__);
1384 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1386 printf("%d: error mapping lb qid\n", __LINE__);
1390 if (rte_event_dev_start(evdev) < 0) {
1391 printf("%d: Error with start call\n", __LINE__);
1395 for (i = 0; i < XSTATS_MAX; i++)
1398 for (i = 0; i < 3; i++) {
1399 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1401 for (j = 0; j < UINT8_MAX; j++) {
1402 rte_event_dev_xstats_names_get(evdev, mode,
1403 j, xstats_names, ids, XSTATS_MAX);
1405 rte_event_dev_xstats_get(evdev, mode, j, ids,
1406 values, XSTATS_MAX);
1418 xstats_id_reset_tests(struct test *t)
1420 const int wrk_enq = 2;
1423 /* Create instance with 4 ports */
1424 if (init(t, 1, 4) < 0 ||
1425 create_ports(t, 4) < 0 ||
1426 create_atomic_qids(t, 1) < 0) {
1427 printf("%d: Error initializing device\n", __LINE__);
1431 /* CQ mapping to QID */
1432 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1434 printf("%d: error mapping lb qid\n", __LINE__);
1438 if (rte_event_dev_start(evdev) < 0) {
1439 printf("%d: Error with start call\n", __LINE__);
1443 #define XSTATS_MAX 1024
1446 uint32_t ids[XSTATS_MAX];
1447 uint64_t values[XSTATS_MAX];
1448 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1450 for (i = 0; i < XSTATS_MAX; i++)
1453 #define NUM_DEV_STATS 6
1454 /* Device names / values */
1455 int num_stats = rte_event_dev_xstats_names_get(evdev,
1456 RTE_EVENT_DEV_XSTATS_DEVICE,
1457 0, xstats_names, ids, XSTATS_MAX);
1458 if (num_stats != NUM_DEV_STATS) {
1459 printf("%d: expected %d stats, got return %d\n", __LINE__,
1460 NUM_DEV_STATS, num_stats);
1463 ret = rte_event_dev_xstats_get(evdev,
1464 RTE_EVENT_DEV_XSTATS_DEVICE,
1465 0, ids, values, num_stats);
1466 if (ret != NUM_DEV_STATS) {
1467 printf("%d: expected %d stats, got return %d\n", __LINE__,
1468 NUM_DEV_STATS, ret);
1473 for (i = 0; i < NPKTS; i++) {
1474 struct rte_event ev;
1475 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1477 printf("%d: gen of pkt failed\n", __LINE__);
1480 ev.queue_id = t->qid[i];
1481 ev.op = RTE_EVENT_OP_NEW;
1485 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1487 printf("%d: Failed to enqueue\n", __LINE__);
1492 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1494 static const char * const dev_names[] = {
1495 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1496 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1498 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1499 for (i = 0; (int)i < ret; i++) {
1501 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1505 printf("%d: %s id incorrect, expected %d got %d\n",
1506 __LINE__, dev_names[i], i, id);
1509 if (val != dev_expected[i]) {
1510 printf("%d: %s value incorrect, expected %"
1511 PRIu64" got %d\n", __LINE__, dev_names[i],
1512 dev_expected[i], id);
1516 int reset_ret = rte_event_dev_xstats_reset(evdev,
1517 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1521 printf("%d: failed to reset successfully\n", __LINE__);
1524 dev_expected[i] = 0;
1525 /* check value again */
1526 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1527 if (val != dev_expected[i]) {
1528 printf("%d: %s value incorrect, expected %"PRIu64
1529 " got %"PRIu64"\n", __LINE__, dev_names[i],
1530 dev_expected[i], val);
1535 /* 48 is stat offset from start of the devices whole xstats.
1536 * This WILL break every time we add a statistic to a port
1537 * or the device, but there is no other way to test
1540 /* num stats for the tested port. CQ size adds more stats to a port */
1541 #define NUM_PORT_STATS 21
1542 /* the port to test. */
1544 num_stats = rte_event_dev_xstats_names_get(evdev,
1545 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1546 xstats_names, ids, XSTATS_MAX);
1547 if (num_stats != NUM_PORT_STATS) {
1548 printf("%d: expected %d stats, got return %d\n",
1549 __LINE__, NUM_PORT_STATS, num_stats);
1552 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1553 ids, values, num_stats);
1555 if (ret != NUM_PORT_STATS) {
1556 printf("%d: expected %d stats, got return %d\n",
1557 __LINE__, NUM_PORT_STATS, ret);
1560 static const char * const port_names[] = {
1565 "port_2_avg_pkt_cycles",
1567 "port_2_rx_ring_used",
1568 "port_2_rx_ring_free",
1569 "port_2_cq_ring_used",
1570 "port_2_cq_ring_free",
1571 "port_2_dequeue_calls",
1572 "port_2_dequeues_returning_0",
1573 "port_2_dequeues_returning_1-4",
1574 "port_2_dequeues_returning_5-8",
1575 "port_2_dequeues_returning_9-12",
1576 "port_2_dequeues_returning_13-16",
1577 "port_2_dequeues_returning_17-20",
1578 "port_2_dequeues_returning_21-24",
1579 "port_2_dequeues_returning_25-28",
1580 "port_2_dequeues_returning_29-32",
1581 "port_2_dequeues_returning_33-36",
1583 uint64_t port_expected[] = {
1587 NPKTS, /* inflight */
1588 0, /* avg pkt cycles */
1590 0, /* rx ring used */
1591 4096, /* rx ring free */
1592 NPKTS, /* cq ring used */
1593 25, /* cq ring free */
1594 0, /* dequeue zero calls */
1595 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1598 uint64_t port_expected_zero[] = {
1602 NPKTS, /* inflight */
1603 0, /* avg pkt cycles */
1605 0, /* rx ring used */
1606 4096, /* rx ring free */
1607 NPKTS, /* cq ring used */
1608 25, /* cq ring free */
1609 0, /* dequeue zero calls */
1610 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1613 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1614 RTE_DIM(port_names) != NUM_PORT_STATS) {
1615 printf("%d: port array of wrong size\n", __LINE__);
1620 for (i = 0; (int)i < ret; i++) {
1622 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1625 if (id != i + PORT_OFF) {
1626 printf("%d: %s id incorrect, expected %d got %d\n",
1627 __LINE__, port_names[i], i+PORT_OFF,
1631 if (val != port_expected[i]) {
1632 printf("%d: %s value incorrect, expected %"PRIu64
1633 " got %d\n", __LINE__, port_names[i],
1634 port_expected[i], id);
1638 int reset_ret = rte_event_dev_xstats_reset(evdev,
1639 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1643 printf("%d: failed to reset successfully\n", __LINE__);
1646 /* check value again */
1647 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1648 if (val != port_expected_zero[i]) {
1649 printf("%d: %s value incorrect, expected %"PRIu64
1650 " got %"PRIu64"\n", __LINE__, port_names[i],
1651 port_expected_zero[i], val);
1658 /* num queue stats */
1659 #define NUM_Q_STATS 16
1660 /* queue offset from start of the devices whole xstats.
1661 * This will break every time we add a statistic to a device/port/queue
1663 #define QUEUE_OFF 90
1664 const uint32_t queue = 0;
1665 num_stats = rte_event_dev_xstats_names_get(evdev,
1666 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1667 xstats_names, ids, XSTATS_MAX);
1668 if (num_stats != NUM_Q_STATS) {
1669 printf("%d: expected %d stats, got return %d\n",
1670 __LINE__, NUM_Q_STATS, num_stats);
1673 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1674 queue, ids, values, num_stats);
1675 if (ret != NUM_Q_STATS) {
1676 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1679 static const char * const queue_names[] = {
1688 "qid_0_port_0_pinned_flows",
1689 "qid_0_port_0_packets",
1690 "qid_0_port_1_pinned_flows",
1691 "qid_0_port_1_packets",
1692 "qid_0_port_2_pinned_flows",
1693 "qid_0_port_2_packets",
1694 "qid_0_port_3_pinned_flows",
1695 "qid_0_port_3_packets",
1697 uint64_t queue_expected[] = {
1706 /* QID-to-Port: pinned_flows, packets */
1712 uint64_t queue_expected_zero[] = {
1721 /* QID-to-Port: pinned_flows, packets */
1727 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1728 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1729 RTE_DIM(queue_names) != NUM_Q_STATS) {
1730 printf("%d : queue array of wrong size\n", __LINE__);
1735 for (i = 0; (int)i < ret; i++) {
1737 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1740 if (id != i + QUEUE_OFF) {
1741 printf("%d: %s id incorrect, expected %d got %d\n",
1742 __LINE__, queue_names[i], i+QUEUE_OFF,
1746 if (val != queue_expected[i]) {
1747 printf("%d: %d: %s value , expected %"PRIu64
1748 " got %"PRIu64"\n", i, __LINE__,
1749 queue_names[i], queue_expected[i], val);
1753 int reset_ret = rte_event_dev_xstats_reset(evdev,
1754 RTE_EVENT_DEV_XSTATS_QUEUE,
1757 printf("%d: failed to reset successfully\n", __LINE__);
1760 /* check value again */
1761 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1763 if (val != queue_expected_zero[i]) {
1764 printf("%d: %s value incorrect, expected %"PRIu64
1765 " got %"PRIu64"\n", __LINE__, queue_names[i],
1766 queue_expected_zero[i], val);
1782 ordered_reconfigure(struct test *t)
1784 if (init(t, 1, 1) < 0 ||
1785 create_ports(t, 1) < 0) {
1786 printf("%d: Error initializing device\n", __LINE__);
1790 const struct rte_event_queue_conf conf = {
1791 .schedule_type = RTE_SCHED_TYPE_ORDERED,
1792 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1793 .nb_atomic_flows = 1024,
1794 .nb_atomic_order_sequences = 1024,
1797 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1798 printf("%d: error creating qid\n", __LINE__);
1802 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1803 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1807 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1808 if (rte_event_dev_start(evdev) < 0) {
1809 printf("%d: Error with start call\n", __LINE__);
1821 qid_priorities(struct test *t)
1823 /* Test works by having a CQ with enough empty space for all packets,
1824 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1825 * priority of the QID, not the ingress order, to pass the test
1828 /* Create instance with 1 ports, and 3 qids */
1829 if (init(t, 3, 1) < 0 ||
1830 create_ports(t, 1) < 0) {
1831 printf("%d: Error initializing device\n", __LINE__);
1835 for (i = 0; i < 3; i++) {
1837 const struct rte_event_queue_conf conf = {
1838 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1839 /* increase priority (0 == highest), as we go */
1840 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1841 .nb_atomic_flows = 1024,
1842 .nb_atomic_order_sequences = 1024,
1845 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1846 printf("%d: error creating qid %d\n", __LINE__, i);
1852 /* map all QIDs to port */
1853 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1855 if (rte_event_dev_start(evdev) < 0) {
1856 printf("%d: Error with start call\n", __LINE__);
1860 /* enqueue 3 packets, setting seqn and QID to check priority */
1861 for (i = 0; i < 3; i++) {
1862 struct rte_event ev;
1863 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1865 printf("%d: gen of pkt failed\n", __LINE__);
1868 ev.queue_id = t->qid[i];
1869 ev.op = RTE_EVENT_OP_NEW;
1873 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1875 printf("%d: Failed to enqueue\n", __LINE__);
1880 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1882 /* dequeue packets, verify priority was upheld */
1883 struct rte_event ev[32];
1885 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1886 if (deq_pkts != 3) {
1887 printf("%d: failed to deq packets\n", __LINE__);
1888 rte_event_dev_dump(evdev, stdout);
1891 for (i = 0; i < 3; i++) {
1892 if (ev[i].mbuf->seqn != 2-i) {
1894 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1904 unlink_in_progress(struct test *t)
1906 /* Test unlinking API, in particular that when an unlink request has
1907 * not yet been seen by the scheduler thread, that the
1908 * unlink_in_progress() function returns the number of unlinks.
1911 /* Create instance with 1 ports, and 3 qids */
1912 if (init(t, 3, 1) < 0 ||
1913 create_ports(t, 1) < 0) {
1914 printf("%d: Error initializing device\n", __LINE__);
1918 for (i = 0; i < 3; i++) {
1920 const struct rte_event_queue_conf conf = {
1921 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1922 /* increase priority (0 == highest), as we go */
1923 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1924 .nb_atomic_flows = 1024,
1925 .nb_atomic_order_sequences = 1024,
1928 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1929 printf("%d: error creating qid %d\n", __LINE__, i);
1935 /* map all QIDs to port */
1936 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1938 if (rte_event_dev_start(evdev) < 0) {
1939 printf("%d: Error with start call\n", __LINE__);
1943 /* unlink all ports to have outstanding unlink requests */
1944 int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1946 printf("%d: Failed to unlink queues\n", __LINE__);
1950 /* get active unlinks here, expect 3 */
1951 int unlinks_in_progress =
1952 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1953 if (unlinks_in_progress != 3) {
1954 printf("%d: Expected num unlinks in progress == 3, got %d\n",
1955 __LINE__, unlinks_in_progress);
1959 /* run scheduler service on this thread to ack the unlinks */
1960 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1962 /* active unlinks expected as 0 as scheduler thread has acked */
1963 unlinks_in_progress =
1964 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1965 if (unlinks_in_progress != 0) {
1966 printf("%d: Expected num unlinks in progress == 0, got %d\n",
1967 __LINE__, unlinks_in_progress);
1975 load_balancing(struct test *t)
1977 const int rx_enq = 0;
1981 if (init(t, 1, 4) < 0 ||
1982 create_ports(t, 4) < 0 ||
1983 create_atomic_qids(t, 1) < 0) {
1984 printf("%d: Error initializing device\n", __LINE__);
1988 for (i = 0; i < 3; i++) {
1989 /* map port 1 - 3 inclusive */
1990 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1992 printf("%d: error mapping qid to port %d\n",
1998 if (rte_event_dev_start(evdev) < 0) {
1999 printf("%d: Error with start call\n", __LINE__);
2003 /************** FORWARD ****************/
2005 * Create a set of flows that test the load-balancing operation of the
2006 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2007 * with a new flow, which should be sent to the 3rd mapped CQ
2009 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2011 for (i = 0; i < RTE_DIM(flows); i++) {
2012 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2014 printf("%d: gen of pkt failed\n", __LINE__);
2018 struct rte_event ev = {
2019 .op = RTE_EVENT_OP_NEW,
2020 .queue_id = t->qid[0],
2021 .flow_id = flows[i],
2024 /* generate pkt and enqueue */
2025 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2027 printf("%d: Failed to enqueue\n", __LINE__);
2032 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2034 struct test_event_dev_stats stats;
2035 err = test_event_dev_stats_get(evdev, &stats);
2037 printf("%d: failed to get stats\n", __LINE__);
2041 if (stats.port_inflight[1] != 4) {
2042 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2046 if (stats.port_inflight[2] != 2) {
2047 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2051 if (stats.port_inflight[3] != 3) {
2052 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2062 load_balancing_history(struct test *t)
2064 struct test_event_dev_stats stats = {0};
2065 const int rx_enq = 0;
2069 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2070 if (init(t, 1, 4) < 0 ||
2071 create_ports(t, 4) < 0 ||
2072 create_atomic_qids(t, 1) < 0)
2075 /* CQ mapping to QID */
2076 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2077 printf("%d: error mapping port 1 qid\n", __LINE__);
2080 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2081 printf("%d: error mapping port 2 qid\n", __LINE__);
2084 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2085 printf("%d: error mapping port 3 qid\n", __LINE__);
2088 if (rte_event_dev_start(evdev) < 0) {
2089 printf("%d: Error with start call\n", __LINE__);
2094 * Create a set of flows that test the load-balancing operation of the
2095 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2096 * the packet from CQ 0, send in a new set of flows. Ensure that:
2097 * 1. The new flow 3 gets into the empty CQ0
2098 * 2. packets for existing flow gets added into CQ1
2099 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2100 * more outstanding pkts
2102 * This test makes sure that when a flow ends (i.e. all packets
2103 * have been completed for that flow), that the flow can be moved
2104 * to a different CQ when new packets come in for that flow.
2106 static uint32_t flows1[] = {0, 1, 1, 2};
2108 for (i = 0; i < RTE_DIM(flows1); i++) {
2109 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2110 struct rte_event ev = {
2111 .flow_id = flows1[i],
2112 .op = RTE_EVENT_OP_NEW,
2113 .queue_id = t->qid[0],
2114 .event_type = RTE_EVENT_TYPE_CPU,
2115 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2120 printf("%d: gen of pkt failed\n", __LINE__);
2123 arp->hash.rss = flows1[i];
2124 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2126 printf("%d: Failed to enqueue\n", __LINE__);
2131 /* call the scheduler */
2132 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2134 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2135 struct rte_event ev;
2136 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2137 printf("%d: failed to dequeue\n", __LINE__);
2140 if (ev.mbuf->hash.rss != flows1[0]) {
2141 printf("%d: unexpected flow received\n", __LINE__);
2145 /* drop the flow 0 packet from port 1 */
2146 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2148 /* call the scheduler */
2149 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2152 * Set up the next set of flows, first a new flow to fill up
2153 * CQ 0, so that the next flow 0 packet should go to CQ2
2155 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2157 for (i = 0; i < RTE_DIM(flows2); i++) {
2158 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2159 struct rte_event ev = {
2160 .flow_id = flows2[i],
2161 .op = RTE_EVENT_OP_NEW,
2162 .queue_id = t->qid[0],
2163 .event_type = RTE_EVENT_TYPE_CPU,
2164 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2169 printf("%d: gen of pkt failed\n", __LINE__);
2172 arp->hash.rss = flows2[i];
2174 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2176 printf("%d: Failed to enqueue\n", __LINE__);
2182 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2184 err = test_event_dev_stats_get(evdev, &stats);
2186 printf("%d:failed to get stats\n", __LINE__);
2191 * Now check the resulting inflights on each port.
2193 if (stats.port_inflight[1] != 3) {
2194 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2196 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2197 (unsigned int)stats.port_inflight[1],
2198 (unsigned int)stats.port_inflight[2],
2199 (unsigned int)stats.port_inflight[3]);
2202 if (stats.port_inflight[2] != 4) {
2203 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2205 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2206 (unsigned int)stats.port_inflight[1],
2207 (unsigned int)stats.port_inflight[2],
2208 (unsigned int)stats.port_inflight[3]);
2211 if (stats.port_inflight[3] != 2) {
2212 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2214 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2215 (unsigned int)stats.port_inflight[1],
2216 (unsigned int)stats.port_inflight[2],
2217 (unsigned int)stats.port_inflight[3]);
2221 for (i = 1; i <= 3; i++) {
2222 struct rte_event ev;
2223 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2224 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2226 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2233 invalid_qid(struct test *t)
2235 struct test_event_dev_stats stats;
2236 const int rx_enq = 0;
2240 if (init(t, 1, 4) < 0 ||
2241 create_ports(t, 4) < 0 ||
2242 create_atomic_qids(t, 1) < 0) {
2243 printf("%d: Error initializing device\n", __LINE__);
2247 /* CQ mapping to QID */
2248 for (i = 0; i < 4; i++) {
2249 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2252 printf("%d: error mapping port 1 qid\n", __LINE__);
2257 if (rte_event_dev_start(evdev) < 0) {
2258 printf("%d: Error with start call\n", __LINE__);
2263 * Send in a packet with an invalid qid to the scheduler.
2264 * We should see the packed enqueued OK, but the inflights for
2265 * that packet should not be incremented, and the rx_dropped
2266 * should be incremented.
2268 static uint32_t flows1[] = {20};
2270 for (i = 0; i < RTE_DIM(flows1); i++) {
2271 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2273 printf("%d: gen of pkt failed\n", __LINE__);
2277 struct rte_event ev = {
2278 .op = RTE_EVENT_OP_NEW,
2279 .queue_id = t->qid[0] + flows1[i],
2283 /* generate pkt and enqueue */
2284 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2286 printf("%d: Failed to enqueue\n", __LINE__);
2291 /* call the scheduler */
2292 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2294 err = test_event_dev_stats_get(evdev, &stats);
2296 printf("%d: failed to get stats\n", __LINE__);
2301 * Now check the resulting inflights on the port, and the rx_dropped.
2303 if (stats.port_inflight[0] != 0) {
2304 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2306 rte_event_dev_dump(evdev, stdout);
2309 if (stats.port_rx_dropped[0] != 1) {
2310 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2311 rte_event_dev_dump(evdev, stdout);
2314 /* each packet drop should only be counted in one place - port or dev */
2315 if (stats.rx_dropped != 0) {
2316 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2318 rte_event_dev_dump(evdev, stdout);
2327 single_packet(struct test *t)
2329 const uint32_t MAGIC_SEQN = 7321;
2330 struct rte_event ev;
2331 struct test_event_dev_stats stats;
2332 const int rx_enq = 0;
2333 const int wrk_enq = 2;
2336 /* Create instance with 4 ports */
2337 if (init(t, 1, 4) < 0 ||
2338 create_ports(t, 4) < 0 ||
2339 create_atomic_qids(t, 1) < 0) {
2340 printf("%d: Error initializing device\n", __LINE__);
2344 /* CQ mapping to QID */
2345 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2347 printf("%d: error mapping lb qid\n", __LINE__);
2352 if (rte_event_dev_start(evdev) < 0) {
2353 printf("%d: Error with start call\n", __LINE__);
2357 /************** Gen pkt and enqueue ****************/
2358 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2360 printf("%d: gen of pkt failed\n", __LINE__);
2364 ev.op = RTE_EVENT_OP_NEW;
2365 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2369 arp->seqn = MAGIC_SEQN;
2371 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2373 printf("%d: Failed to enqueue\n", __LINE__);
2377 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2379 err = test_event_dev_stats_get(evdev, &stats);
2381 printf("%d: failed to get stats\n", __LINE__);
2385 if (stats.rx_pkts != 1 ||
2386 stats.tx_pkts != 1 ||
2387 stats.port_inflight[wrk_enq] != 1) {
2388 printf("%d: Sched core didn't handle pkt as expected\n",
2390 rte_event_dev_dump(evdev, stdout);
2396 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2398 printf("%d: Failed to deq\n", __LINE__);
2402 err = test_event_dev_stats_get(evdev, &stats);
2404 printf("%d: failed to get stats\n", __LINE__);
2408 err = test_event_dev_stats_get(evdev, &stats);
2409 if (ev.mbuf->seqn != MAGIC_SEQN) {
2410 printf("%d: magic sequence number not dequeued\n", __LINE__);
2414 rte_pktmbuf_free(ev.mbuf);
2415 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2417 printf("%d: Failed to enqueue\n", __LINE__);
2420 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2422 err = test_event_dev_stats_get(evdev, &stats);
2423 if (stats.port_inflight[wrk_enq] != 0) {
2424 printf("%d: port inflight not correct\n", __LINE__);
2433 inflight_counts(struct test *t)
2435 struct rte_event ev;
2436 struct test_event_dev_stats stats;
2437 const int rx_enq = 0;
2443 /* Create instance with 4 ports */
2444 if (init(t, 2, 3) < 0 ||
2445 create_ports(t, 3) < 0 ||
2446 create_atomic_qids(t, 2) < 0) {
2447 printf("%d: Error initializing device\n", __LINE__);
2451 /* CQ mapping to QID */
2452 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2454 printf("%d: error mapping lb qid\n", __LINE__);
2458 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2460 printf("%d: error mapping lb qid\n", __LINE__);
2465 if (rte_event_dev_start(evdev) < 0) {
2466 printf("%d: Error with start call\n", __LINE__);
2470 /************** FORWARD ****************/
2472 for (i = 0; i < QID1_NUM; i++) {
2473 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2476 printf("%d: gen of pkt failed\n", __LINE__);
2480 ev.queue_id = t->qid[0];
2481 ev.op = RTE_EVENT_OP_NEW;
2483 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2485 printf("%d: Failed to enqueue\n", __LINE__);
2490 for (i = 0; i < QID2_NUM; i++) {
2491 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2494 printf("%d: gen of pkt failed\n", __LINE__);
2497 ev.queue_id = t->qid[1];
2498 ev.op = RTE_EVENT_OP_NEW;
2500 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2502 printf("%d: Failed to enqueue\n", __LINE__);
2508 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2510 err = test_event_dev_stats_get(evdev, &stats);
2512 printf("%d: failed to get stats\n", __LINE__);
2516 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2517 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2518 printf("%d: Sched core didn't handle pkt as expected\n",
2523 if (stats.port_inflight[p1] != QID1_NUM) {
2524 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2528 if (stats.port_inflight[p2] != QID2_NUM) {
2529 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2534 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2536 struct rte_event events[QID1_NUM + QID2_NUM];
2537 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2538 RTE_DIM(events), 0);
2540 if (deq_pkts != QID1_NUM) {
2541 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2544 err = test_event_dev_stats_get(evdev, &stats);
2545 if (stats.port_inflight[p1] != QID1_NUM) {
2546 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2550 for (i = 0; i < QID1_NUM; i++) {
2551 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2554 printf("%d: %s rte enqueue of inf release failed\n",
2555 __LINE__, __func__);
2561 * As the scheduler core decrements inflights, it needs to run to
2562 * process packets to act on the drop messages
2564 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2566 err = test_event_dev_stats_get(evdev, &stats);
2567 if (stats.port_inflight[p1] != 0) {
2568 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2573 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2574 RTE_DIM(events), 0);
2575 if (deq_pkts != QID2_NUM) {
2576 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2579 err = test_event_dev_stats_get(evdev, &stats);
2580 if (stats.port_inflight[p2] != QID2_NUM) {
2581 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2585 for (i = 0; i < QID2_NUM; i++) {
2586 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2589 printf("%d: %s rte enqueue of inf release failed\n",
2590 __LINE__, __func__);
2596 * As the scheduler core decrements inflights, it needs to run to
2597 * process packets to act on the drop messages
2599 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2601 err = test_event_dev_stats_get(evdev, &stats);
2602 if (stats.port_inflight[p2] != 0) {
2603 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2610 rte_event_dev_dump(evdev, stdout);
2616 parallel_basic(struct test *t, int check_order)
2618 const uint8_t rx_port = 0;
2619 const uint8_t w1_port = 1;
2620 const uint8_t w3_port = 3;
2621 const uint8_t tx_port = 4;
2624 uint32_t deq_pkts, j;
2625 struct rte_mbuf *mbufs[3];
2626 struct rte_mbuf *mbufs_out[3] = { 0 };
2627 const uint32_t MAGIC_SEQN = 1234;
2629 /* Create instance with 4 ports */
2630 if (init(t, 2, tx_port + 1) < 0 ||
2631 create_ports(t, tx_port + 1) < 0 ||
2632 (check_order ? create_ordered_qids(t, 1) :
2633 create_unordered_qids(t, 1)) < 0 ||
2634 create_directed_qids(t, 1, &tx_port)) {
2635 printf("%d: Error initializing device\n", __LINE__);
2641 * We need three ports, all mapped to the same ordered qid0. Then we'll
2642 * take a packet out to each port, re-enqueue in reverse order,
2643 * then make sure the reordering has taken place properly when we
2644 * dequeue from the tx_port.
2646 * Simplified test setup diagram:
2650 * qid0 - w2_port - qid1
2654 /* CQ mapping to QID for LB ports (directed mapped on create) */
2655 for (i = w1_port; i <= w3_port; i++) {
2656 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2659 printf("%d: error mapping lb qid\n", __LINE__);
2665 if (rte_event_dev_start(evdev) < 0) {
2666 printf("%d: Error with start call\n", __LINE__);
2670 /* Enqueue 3 packets to the rx port */
2671 for (i = 0; i < 3; i++) {
2672 struct rte_event ev;
2673 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2675 printf("%d: gen of pkt failed\n", __LINE__);
2679 ev.queue_id = t->qid[0];
2680 ev.op = RTE_EVENT_OP_NEW;
2682 mbufs[i]->seqn = MAGIC_SEQN + i;
2684 /* generate pkt and enqueue */
2685 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2687 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2693 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2695 /* use extra slot to make logic in loops easier */
2696 struct rte_event deq_ev[w3_port + 1];
2698 /* Dequeue the 3 packets, one from each worker port */
2699 for (i = w1_port; i <= w3_port; i++) {
2700 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2702 if (deq_pkts != 1) {
2703 printf("%d: Failed to deq\n", __LINE__);
2704 rte_event_dev_dump(evdev, stdout);
2709 /* Enqueue each packet in reverse order, flushing after each one */
2710 for (i = w3_port; i >= w1_port; i--) {
2712 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2713 deq_ev[i].queue_id = t->qid[1];
2714 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2716 printf("%d: Failed to enqueue\n", __LINE__);
2720 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2722 /* dequeue from the tx ports, we should get 3 packets */
2723 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2726 /* Check to see if we've got all 3 packets */
2727 if (deq_pkts != 3) {
2728 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2729 __LINE__, deq_pkts, tx_port);
2730 rte_event_dev_dump(evdev, stdout);
2734 /* Check to see if the sequence numbers are in expected order */
2736 for (j = 0 ; j < deq_pkts ; j++) {
2737 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2739 "%d: Incorrect sequence number(%d) from port %d\n",
2740 __LINE__, mbufs_out[j]->seqn, tx_port);
2746 /* Destroy the instance */
2752 ordered_basic(struct test *t)
2754 return parallel_basic(t, 1);
2758 unordered_basic(struct test *t)
2760 return parallel_basic(t, 0);
2764 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2766 const struct rte_event new_ev = {
2767 .op = RTE_EVENT_OP_NEW
2768 /* all other fields zero */
2770 struct rte_event ev = new_ev;
2771 unsigned int rx_port = 0; /* port we get the first flow on */
2772 char rx_port_used_stat[64];
2773 char rx_port_free_stat[64];
2774 char other_port_used_stat[64];
2776 if (init(t, 1, 2) < 0 ||
2777 create_ports(t, 2) < 0 ||
2778 create_atomic_qids(t, 1) < 0) {
2779 printf("%d: Error initializing device\n", __LINE__);
2782 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2783 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2785 printf("%d: Error links queue to ports\n", __LINE__);
2788 if (rte_event_dev_start(evdev) < 0) {
2789 printf("%d: Error with start call\n", __LINE__);
2793 /* send one packet and see where it goes, port 0 or 1 */
2794 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2795 printf("%d: Error doing first enqueue\n", __LINE__);
2798 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2800 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2804 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2805 "port_%u_cq_ring_used", rx_port);
2806 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2807 "port_%u_cq_ring_free", rx_port);
2808 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2809 "port_%u_cq_ring_used", rx_port ^ 1);
2810 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2812 printf("%d: Error, first event not scheduled\n", __LINE__);
2816 /* now fill up the rx port's queue with one flow to cause HOLB */
2819 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2820 printf("%d: Error with enqueue\n", __LINE__);
2823 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2824 } while (rte_event_dev_xstats_by_name_get(evdev,
2825 rx_port_free_stat, NULL) != 0);
2827 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2829 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2830 printf("%d: Error with enqueue\n", __LINE__);
2833 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2835 /* check that the other port still has an empty CQ */
2836 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2838 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2841 /* check IQ now has one packet */
2842 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2844 printf("%d: Error, QID does not have exactly 1 packet\n",
2849 /* send another flow, which should pass the other IQ entry */
2852 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2853 printf("%d: Error with enqueue\n", __LINE__);
2856 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2858 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2860 printf("%d: Error, second flow did not pass out first\n",
2865 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2867 printf("%d: Error, QID does not have exactly 1 packet\n",
2874 rte_event_dev_dump(evdev, stdout);
2880 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2882 *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2886 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2888 const struct rte_event new_ev = {
2889 .op = RTE_EVENT_OP_NEW,
2893 struct rte_event ev = new_ev;
2897 if (init(t, 1, 1) < 0 ||
2898 create_ports(t, 1) < 0 ||
2899 create_atomic_qids(t, 1) < 0) {
2900 printf("%d: Error initializing device\n", __LINE__);
2904 /* Link the queue so *_start() doesn't error out */
2905 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2906 printf("%d: Error linking queue to port\n", __LINE__);
2910 if (rte_event_dev_start(evdev) < 0) {
2911 printf("%d: Error with start call\n", __LINE__);
2915 for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2916 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2917 printf("%d: Error enqueuing events\n", __LINE__);
2922 /* Schedule the events from the port to the IQ. At least one event
2923 * should be remaining in the queue.
2925 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2927 if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2928 printf("%d: Error installing the flush callback\n", __LINE__);
2935 printf("%d: Error executing the flush callback\n", __LINE__);
2939 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2940 printf("%d: Error uninstalling the flush callback\n", __LINE__);
2946 rte_event_dev_dump(evdev, stdout);
2952 worker_loopback_worker_fn(void *arg)
2954 struct test *t = arg;
2955 uint8_t port = t->port[1];
2960 * Takes packets from the input port and then loops them back through
2961 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2962 * so each packet goes through 8*16 = 128 times.
2964 printf("%d: \tWorker function started\n", __LINE__);
2965 while (count < NUM_PACKETS) {
2966 #define BURST_SIZE 32
2967 struct rte_event ev[BURST_SIZE];
2968 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2975 for (i = 0; i < nb_rx; i++) {
2977 if (ev[i].queue_id != 8) {
2978 ev[i].op = RTE_EVENT_OP_FORWARD;
2979 enqd = rte_event_enqueue_burst(evdev, port,
2982 printf("%d: Can't enqueue FWD!!\n",
2990 ev[i].mbuf->udata64++;
2991 if (ev[i].mbuf->udata64 != 16) {
2992 ev[i].op = RTE_EVENT_OP_FORWARD;
2993 enqd = rte_event_enqueue_burst(evdev, port,
2996 printf("%d: Can't enqueue FWD!!\n",
3002 /* we have hit 16 iterations through system - drop */
3003 rte_pktmbuf_free(ev[i].mbuf);
3005 ev[i].op = RTE_EVENT_OP_RELEASE;
3006 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3008 printf("%d drop enqueue failed\n", __LINE__);
3018 worker_loopback_producer_fn(void *arg)
3020 struct test *t = arg;
3021 uint8_t port = t->port[0];
3024 printf("%d: \tProducer function started\n", __LINE__);
3025 while (count < NUM_PACKETS) {
3026 struct rte_mbuf *m = 0;
3028 m = rte_pktmbuf_alloc(t->mbuf_pool);
3029 } while (m == NULL);
3033 struct rte_event ev = {
3034 .op = RTE_EVENT_OP_NEW,
3035 .queue_id = t->qid[0],
3036 .flow_id = (uintptr_t)m & 0xFFFF,
3040 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3041 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3053 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3055 /* use a single producer core, and a worker core to see what happens
3056 * if the worker loops packets back multiple times
3058 struct test_event_dev_stats stats;
3059 uint64_t print_cycles = 0, cycles = 0;
3060 uint64_t tx_pkts = 0;
3062 int w_lcore, p_lcore;
3064 if (init(t, 8, 2) < 0 ||
3065 create_atomic_qids(t, 8) < 0) {
3066 printf("%d: Error initializing device\n", __LINE__);
3070 /* RX with low max events */
3071 static struct rte_event_port_conf conf = {
3072 .dequeue_depth = 32,
3073 .enqueue_depth = 64,
3075 /* beware: this cannot be initialized in the static above as it would
3076 * only be initialized once - and this needs to be set for multiple runs
3078 conf.new_event_threshold = 512;
3079 conf.event_port_cfg = disable_implicit_release ?
3080 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
3082 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3083 printf("Error setting up RX port\n");
3087 /* TX with higher max events */
3088 conf.new_event_threshold = 4096;
3089 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3090 printf("Error setting up TX port\n");
3095 /* CQ mapping to QID */
3096 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3097 if (err != 8) { /* should have mapped all queues*/
3098 printf("%d: error mapping port 2 to all qids\n", __LINE__);
3102 if (rte_event_dev_start(evdev) < 0) {
3103 printf("%d: Error with start call\n", __LINE__);
3107 p_lcore = rte_get_next_lcore(
3108 /* start core */ -1,
3111 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3113 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3114 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3116 print_cycles = cycles = rte_get_timer_cycles();
3117 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3118 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3120 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3122 uint64_t new_cycles = rte_get_timer_cycles();
3124 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3125 test_event_dev_stats_get(evdev, &stats);
3127 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3128 __LINE__, stats.rx_pkts, stats.tx_pkts);
3130 print_cycles = new_cycles;
3132 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3133 test_event_dev_stats_get(evdev, &stats);
3134 if (stats.tx_pkts == tx_pkts) {
3135 rte_event_dev_dump(evdev, stdout);
3136 printf("Dumping xstats:\n");
3139 "%d: No schedules for seconds, deadlock\n",
3143 tx_pkts = stats.tx_pkts;
3144 cycles = new_cycles;
3147 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3148 /* ensure all completions are flushed */
3150 rte_eal_mp_wait_lcore();
3156 static struct rte_mempool *eventdev_func_mempool;
3159 test_sw_eventdev(void)
3164 t = malloc(sizeof(struct test));
3167 /* manually initialize the op, older gcc's complain on static
3168 * initialization of struct elements that are a bitfield.
3170 release_ev.op = RTE_EVENT_OP_RELEASE;
3172 const char *eventdev_name = "event_sw";
3173 evdev = rte_event_dev_get_dev_id(eventdev_name);
3175 printf("%d: Eventdev %s not found - creating.\n",
3176 __LINE__, eventdev_name);
3177 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3178 printf("Error creating eventdev\n");
3181 evdev = rte_event_dev_get_dev_id(eventdev_name);
3183 printf("Error finding newly created eventdev\n");
3188 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3189 printf("Failed to get service ID for software event dev\n");
3193 rte_service_runstate_set(t->service_id, 1);
3194 rte_service_set_runstate_mapped_check(t->service_id, 0);
3196 /* Only create mbuf pool once, reuse for each test run */
3197 if (!eventdev_func_mempool) {
3198 eventdev_func_mempool = rte_pktmbuf_pool_create(
3199 "EVENTDEV_SW_SA_MBUF_POOL",
3200 (1<<12), /* 4k buffers */
3201 32 /*MBUF_CACHE_SIZE*/,
3203 512, /* use very small mbufs */
3205 if (!eventdev_func_mempool) {
3206 printf("ERROR creating mempool\n");
3210 t->mbuf_pool = eventdev_func_mempool;
3211 printf("*** Running Single Directed Packet test...\n");
3212 ret = test_single_directed_packet(t);
3214 printf("ERROR - Single Directed Packet test FAILED.\n");
3217 printf("*** Running Directed Forward Credit test...\n");
3218 ret = test_directed_forward_credits(t);
3220 printf("ERROR - Directed Forward Credit test FAILED.\n");
3223 printf("*** Running Single Load Balanced Packet test...\n");
3224 ret = single_packet(t);
3226 printf("ERROR - Single Packet test FAILED.\n");
3229 printf("*** Running Unordered Basic test...\n");
3230 ret = unordered_basic(t);
3232 printf("ERROR - Unordered Basic test FAILED.\n");
3235 printf("*** Running Ordered Basic test...\n");
3236 ret = ordered_basic(t);
3238 printf("ERROR - Ordered Basic test FAILED.\n");
3241 printf("*** Running Burst Packets test...\n");
3242 ret = burst_packets(t);
3244 printf("ERROR - Burst Packets test FAILED.\n");
3247 printf("*** Running Load Balancing test...\n");
3248 ret = load_balancing(t);
3250 printf("ERROR - Load Balancing test FAILED.\n");
3253 printf("*** Running Prioritized Directed test...\n");
3254 ret = test_priority_directed(t);
3256 printf("ERROR - Prioritized Directed test FAILED.\n");
3259 printf("*** Running Prioritized Atomic test...\n");
3260 ret = test_priority_atomic(t);
3262 printf("ERROR - Prioritized Atomic test FAILED.\n");
3266 printf("*** Running Prioritized Ordered test...\n");
3267 ret = test_priority_ordered(t);
3269 printf("ERROR - Prioritized Ordered test FAILED.\n");
3272 printf("*** Running Prioritized Unordered test...\n");
3273 ret = test_priority_unordered(t);
3275 printf("ERROR - Prioritized Unordered test FAILED.\n");
3278 printf("*** Running Invalid QID test...\n");
3279 ret = invalid_qid(t);
3281 printf("ERROR - Invalid QID test FAILED.\n");
3284 printf("*** Running Load Balancing History test...\n");
3285 ret = load_balancing_history(t);
3287 printf("ERROR - Load Balancing History test FAILED.\n");
3290 printf("*** Running Inflight Count test...\n");
3291 ret = inflight_counts(t);
3293 printf("ERROR - Inflight Count test FAILED.\n");
3296 printf("*** Running Abuse Inflights test...\n");
3297 ret = abuse_inflights(t);
3299 printf("ERROR - Abuse Inflights test FAILED.\n");
3302 printf("*** Running XStats test...\n");
3303 ret = xstats_tests(t);
3305 printf("ERROR - XStats test FAILED.\n");
3308 printf("*** Running XStats ID Reset test...\n");
3309 ret = xstats_id_reset_tests(t);
3311 printf("ERROR - XStats ID Reset test FAILED.\n");
3314 printf("*** Running XStats Brute Force test...\n");
3315 ret = xstats_brute_force(t);
3317 printf("ERROR - XStats Brute Force test FAILED.\n");
3320 printf("*** Running XStats ID Abuse test...\n");
3321 ret = xstats_id_abuse_tests(t);
3323 printf("ERROR - XStats ID Abuse test FAILED.\n");
3326 printf("*** Running QID Priority test...\n");
3327 ret = qid_priorities(t);
3329 printf("ERROR - QID Priority test FAILED.\n");
3332 printf("*** Running Unlink-in-progress test...\n");
3333 ret = unlink_in_progress(t);
3335 printf("ERROR - Unlink in progress test FAILED.\n");
3338 printf("*** Running Ordered Reconfigure test...\n");
3339 ret = ordered_reconfigure(t);
3341 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3344 printf("*** Running Port LB Single Reconfig test...\n");
3345 ret = port_single_lb_reconfig(t);
3347 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3350 printf("*** Running Port Reconfig Credits test...\n");
3351 ret = port_reconfig_credits(t);
3353 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3356 printf("*** Running Head-of-line-blocking test...\n");
3359 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3362 printf("*** Running Stop Flush test...\n");
3363 ret = dev_stop_flush(t);
3365 printf("ERROR - Stop Flush test FAILED.\n");
3368 if (rte_lcore_count() >= 3) {
3369 printf("*** Running Worker loopback test...\n");
3370 ret = worker_loopback(t, 0);
3372 printf("ERROR - Worker loopback test FAILED.\n");
3376 printf("*** Running Worker loopback test (implicit release disabled)...\n");
3377 ret = worker_loopback(t, 1);
3379 printf("ERROR - Worker loopback test FAILED.\n");
3383 printf("### Not enough cores for worker loopback tests.\n");
3384 printf("### Need at least 3 cores for the tests.\n");
3388 * Free test instance, leaving mempool initialized, and a pointer to it
3389 * in static eventdev_func_mempool, as it is re-used on re-runs
3393 printf("SW Eventdev Selftest Successful.\n");
3397 printf("SW Eventdev Selftest Failed.\n");