1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_launch.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
30 #define NUM_PACKETS (1<<18)
31 #define DEQUEUE_DEPTH 128
36 struct rte_mempool *mbuf_pool;
37 uint8_t port[MAX_PORTS];
38 uint8_t qid[MAX_QIDS];
43 static struct rte_event release_ev;
45 static inline struct rte_mbuf *
46 rte_gen_arp(int portid, struct rte_mempool *mp)
50 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
52 static const uint8_t arp_request[] = {
53 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
54 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
55 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
56 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
57 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
58 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
59 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00
63 int pkt_len = sizeof(arp_request) - 1;
65 m = rte_pktmbuf_alloc(mp);
69 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
70 arp_request, pkt_len);
71 rte_pktmbuf_pkt_len(m) = pkt_len;
72 rte_pktmbuf_data_len(m) = pkt_len;
82 const uint32_t XSTATS_MAX = 1024;
84 uint32_t ids[XSTATS_MAX];
85 uint64_t values[XSTATS_MAX];
86 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
88 for (i = 0; i < XSTATS_MAX; i++)
91 /* Device names / values */
92 int ret = rte_event_dev_xstats_names_get(evdev,
93 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
94 xstats_names, ids, XSTATS_MAX);
96 printf("%d: xstats names get() returned error\n",
100 ret = rte_event_dev_xstats_get(evdev,
101 RTE_EVENT_DEV_XSTATS_DEVICE,
102 0, ids, values, ret);
103 if (ret > (signed int)XSTATS_MAX)
104 printf("%s %d: more xstats available than space\n",
106 for (i = 0; (signed int)i < ret; i++) {
107 printf("%d : %s : %"PRIu64"\n",
108 i, xstats_names[i].name, values[i]);
111 /* Port names / values */
112 ret = rte_event_dev_xstats_names_get(evdev,
113 RTE_EVENT_DEV_XSTATS_PORT, 0,
114 xstats_names, ids, XSTATS_MAX);
115 ret = rte_event_dev_xstats_get(evdev,
116 RTE_EVENT_DEV_XSTATS_PORT, 1,
118 if (ret > (signed int)XSTATS_MAX)
119 printf("%s %d: more xstats available than space\n",
121 for (i = 0; (signed int)i < ret; i++) {
122 printf("%d : %s : %"PRIu64"\n",
123 i, xstats_names[i].name, values[i]);
126 /* Queue names / values */
127 ret = rte_event_dev_xstats_names_get(evdev,
128 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
129 xstats_names, ids, XSTATS_MAX);
130 ret = rte_event_dev_xstats_get(evdev,
131 RTE_EVENT_DEV_XSTATS_QUEUE,
132 1, ids, values, ret);
133 if (ret > (signed int)XSTATS_MAX)
134 printf("%s %d: more xstats available than space\n",
136 for (i = 0; (signed int)i < ret; i++) {
137 printf("%d : %s : %"PRIu64"\n",
138 i, xstats_names[i].name, values[i]);
142 /* initialization and config */
144 init(struct test *t, int nb_queues, int nb_ports)
146 struct rte_event_dev_config config = {
147 .nb_event_queues = nb_queues,
148 .nb_event_ports = nb_ports,
149 .nb_event_queue_flows = 1024,
150 .nb_events_limit = 4096,
151 .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
152 .nb_event_port_enqueue_depth = 128,
156 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
158 memset(t, 0, sizeof(*t));
161 ret = rte_event_dev_configure(evdev, &config);
163 printf("%d: Error configuring device\n", __LINE__);
168 create_ports(struct test *t, int num_ports)
171 static const struct rte_event_port_conf conf = {
172 .new_event_threshold = 1024,
175 .disable_implicit_release = 0,
177 if (num_ports > MAX_PORTS)
180 for (i = 0; i < num_ports; i++) {
181 if (rte_event_port_setup(evdev, i, &conf) < 0) {
182 printf("Error setting up port %d\n", i);
192 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
197 const struct rte_event_queue_conf conf = {
198 .schedule_type = flags,
199 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
200 .nb_atomic_flows = 1024,
201 .nb_atomic_order_sequences = 1024,
204 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
205 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
206 printf("%d: error creating qid %d\n", __LINE__, i);
211 t->nb_qids += num_qids;
212 if (t->nb_qids > MAX_QIDS)
219 create_atomic_qids(struct test *t, int num_qids)
221 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
225 create_ordered_qids(struct test *t, int num_qids)
227 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
232 create_unordered_qids(struct test *t, int num_qids)
234 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
238 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
243 static const struct rte_event_queue_conf conf = {
244 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
245 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
248 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
249 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
250 printf("%d: error creating qid %d\n", __LINE__, i);
255 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
256 &t->qid[i], NULL, 1) != 1) {
257 printf("%d: error creating link for qid %d\n",
262 t->nb_qids += num_qids;
263 if (t->nb_qids > MAX_QIDS)
271 cleanup(struct test *t __rte_unused)
273 rte_event_dev_stop(evdev);
274 rte_event_dev_close(evdev);
278 struct test_event_dev_stats {
279 uint64_t rx_pkts; /**< Total packets received */
280 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
281 uint64_t tx_pkts; /**< Total packets transmitted */
283 /** Packets received on this port */
284 uint64_t port_rx_pkts[MAX_PORTS];
285 /** Packets dropped on this port */
286 uint64_t port_rx_dropped[MAX_PORTS];
287 /** Packets inflight on this port */
288 uint64_t port_inflight[MAX_PORTS];
289 /** Packets transmitted on this port */
290 uint64_t port_tx_pkts[MAX_PORTS];
291 /** Packets received on this qid */
292 uint64_t qid_rx_pkts[MAX_QIDS];
293 /** Packets dropped on this qid */
294 uint64_t qid_rx_dropped[MAX_QIDS];
295 /** Packets transmitted on this qid */
296 uint64_t qid_tx_pkts[MAX_QIDS];
300 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
303 static uint32_t total_ids[3]; /* rx, tx and drop */
304 static uint32_t port_rx_pkts_ids[MAX_PORTS];
305 static uint32_t port_rx_dropped_ids[MAX_PORTS];
306 static uint32_t port_inflight_ids[MAX_PORTS];
307 static uint32_t port_tx_pkts_ids[MAX_PORTS];
308 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
309 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
310 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
313 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
314 "dev_rx", &total_ids[0]);
315 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
316 "dev_drop", &total_ids[1]);
317 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
318 "dev_tx", &total_ids[2]);
319 for (i = 0; i < MAX_PORTS; i++) {
321 snprintf(name, sizeof(name), "port_%u_rx", i);
322 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
323 dev_id, name, &port_rx_pkts_ids[i]);
324 snprintf(name, sizeof(name), "port_%u_drop", i);
325 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
326 dev_id, name, &port_rx_dropped_ids[i]);
327 snprintf(name, sizeof(name), "port_%u_inflight", i);
328 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
329 dev_id, name, &port_inflight_ids[i]);
330 snprintf(name, sizeof(name), "port_%u_tx", i);
331 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
332 dev_id, name, &port_tx_pkts_ids[i]);
334 for (i = 0; i < MAX_QIDS; i++) {
336 snprintf(name, sizeof(name), "qid_%u_rx", i);
337 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
338 dev_id, name, &qid_rx_pkts_ids[i]);
339 snprintf(name, sizeof(name), "qid_%u_drop", i);
340 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
341 dev_id, name, &qid_rx_dropped_ids[i]);
342 snprintf(name, sizeof(name), "qid_%u_tx", i);
343 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
344 dev_id, name, &qid_tx_pkts_ids[i]);
350 /* run_prio_packet_test
351 * This performs a basic packet priority check on the test instance passed in.
352 * It is factored out of the main priority tests as the same tests must be
353 * performed to ensure prioritization of each type of QID.
356 * - An initialized test structure, including mempool
357 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
358 * - t->qid[0] is the QID to be tested
359 * - if LB QID, the CQ must be mapped to the QID.
362 run_prio_packet_test(struct test *t)
365 const uint32_t MAGIC_SEQN[] = {4711, 1234};
366 const uint32_t PRIORITY[] = {
367 RTE_EVENT_DEV_PRIORITY_NORMAL,
368 RTE_EVENT_DEV_PRIORITY_HIGHEST
371 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
372 /* generate pkt and enqueue */
374 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
376 printf("%d: gen of pkt failed\n", __LINE__);
379 arp->seqn = MAGIC_SEQN[i];
381 ev = (struct rte_event){
382 .priority = PRIORITY[i],
383 .op = RTE_EVENT_OP_NEW,
384 .queue_id = t->qid[0],
387 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
389 printf("%d: error failed to enqueue\n", __LINE__);
394 rte_service_run_iter_on_app_lcore(t->service_id, 1);
396 struct test_event_dev_stats stats;
397 err = test_event_dev_stats_get(evdev, &stats);
399 printf("%d: error failed to get stats\n", __LINE__);
403 if (stats.port_rx_pkts[t->port[0]] != 2) {
404 printf("%d: error stats incorrect for directed port\n",
406 rte_event_dev_dump(evdev, stdout);
410 struct rte_event ev, ev2;
412 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
414 printf("%d: error failed to deq\n", __LINE__);
415 rte_event_dev_dump(evdev, stdout);
418 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
419 printf("%d: first packet out not highest priority\n",
421 rte_event_dev_dump(evdev, stdout);
424 rte_pktmbuf_free(ev.mbuf);
426 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
428 printf("%d: error failed to deq\n", __LINE__);
429 rte_event_dev_dump(evdev, stdout);
432 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
433 printf("%d: second packet out not lower priority\n",
435 rte_event_dev_dump(evdev, stdout);
438 rte_pktmbuf_free(ev2.mbuf);
445 test_single_directed_packet(struct test *t)
447 const int rx_enq = 0;
448 const int wrk_enq = 2;
451 /* Create instance with 3 directed QIDs going to 3 ports */
452 if (init(t, 3, 3) < 0 ||
453 create_ports(t, 3) < 0 ||
454 create_directed_qids(t, 3, t->port) < 0)
457 if (rte_event_dev_start(evdev) < 0) {
458 printf("%d: Error with start call\n", __LINE__);
462 /************** FORWARD ****************/
463 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
464 struct rte_event ev = {
465 .op = RTE_EVENT_OP_NEW,
471 printf("%d: gen of pkt failed\n", __LINE__);
475 const uint32_t MAGIC_SEQN = 4711;
476 arp->seqn = MAGIC_SEQN;
478 /* generate pkt and enqueue */
479 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
481 printf("%d: error failed to enqueue\n", __LINE__);
485 /* Run schedule() as dir packets may need to be re-ordered */
486 rte_service_run_iter_on_app_lcore(t->service_id, 1);
488 struct test_event_dev_stats stats;
489 err = test_event_dev_stats_get(evdev, &stats);
491 printf("%d: error failed to get stats\n", __LINE__);
495 if (stats.port_rx_pkts[rx_enq] != 1) {
496 printf("%d: error stats incorrect for directed port\n",
502 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
504 printf("%d: error failed to deq\n", __LINE__);
508 err = test_event_dev_stats_get(evdev, &stats);
509 if (stats.port_rx_pkts[wrk_enq] != 0 &&
510 stats.port_rx_pkts[wrk_enq] != 1) {
511 printf("%d: error directed stats post-dequeue\n", __LINE__);
515 if (ev.mbuf->seqn != MAGIC_SEQN) {
516 printf("%d: error magic sequence number not dequeued\n",
521 rte_pktmbuf_free(ev.mbuf);
527 test_directed_forward_credits(struct test *t)
532 if (init(t, 1, 1) < 0 ||
533 create_ports(t, 1) < 0 ||
534 create_directed_qids(t, 1, t->port) < 0)
537 if (rte_event_dev_start(evdev) < 0) {
538 printf("%d: Error with start call\n", __LINE__);
542 struct rte_event ev = {
543 .op = RTE_EVENT_OP_NEW,
547 for (i = 0; i < 1000; i++) {
548 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
550 printf("%d: error failed to enqueue\n", __LINE__);
553 rte_service_run_iter_on_app_lcore(t->service_id, 1);
556 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
558 printf("%d: error failed to deq\n", __LINE__);
562 /* re-write event to be a forward, and continue looping it */
563 ev.op = RTE_EVENT_OP_FORWARD;
572 test_priority_directed(struct test *t)
574 if (init(t, 1, 1) < 0 ||
575 create_ports(t, 1) < 0 ||
576 create_directed_qids(t, 1, t->port) < 0) {
577 printf("%d: Error initializing device\n", __LINE__);
581 if (rte_event_dev_start(evdev) < 0) {
582 printf("%d: Error with start call\n", __LINE__);
586 return run_prio_packet_test(t);
590 test_priority_atomic(struct test *t)
592 if (init(t, 1, 1) < 0 ||
593 create_ports(t, 1) < 0 ||
594 create_atomic_qids(t, 1) < 0) {
595 printf("%d: Error initializing device\n", __LINE__);
600 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
601 printf("%d: error mapping qid to port\n", __LINE__);
604 if (rte_event_dev_start(evdev) < 0) {
605 printf("%d: Error with start call\n", __LINE__);
609 return run_prio_packet_test(t);
613 test_priority_ordered(struct test *t)
615 if (init(t, 1, 1) < 0 ||
616 create_ports(t, 1) < 0 ||
617 create_ordered_qids(t, 1) < 0) {
618 printf("%d: Error initializing device\n", __LINE__);
623 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
624 printf("%d: error mapping qid to port\n", __LINE__);
627 if (rte_event_dev_start(evdev) < 0) {
628 printf("%d: Error with start call\n", __LINE__);
632 return run_prio_packet_test(t);
636 test_priority_unordered(struct test *t)
638 if (init(t, 1, 1) < 0 ||
639 create_ports(t, 1) < 0 ||
640 create_unordered_qids(t, 1) < 0) {
641 printf("%d: Error initializing device\n", __LINE__);
646 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
647 printf("%d: error mapping qid to port\n", __LINE__);
650 if (rte_event_dev_start(evdev) < 0) {
651 printf("%d: Error with start call\n", __LINE__);
655 return run_prio_packet_test(t);
659 burst_packets(struct test *t)
661 /************** CONFIG ****************/
666 /* Create instance with 2 ports and 2 queues */
667 if (init(t, 2, 2) < 0 ||
668 create_ports(t, 2) < 0 ||
669 create_atomic_qids(t, 2) < 0) {
670 printf("%d: Error initializing device\n", __LINE__);
674 /* CQ mapping to QID */
675 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
677 printf("%d: error mapping lb qid0\n", __LINE__);
680 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
682 printf("%d: error mapping lb qid1\n", __LINE__);
686 if (rte_event_dev_start(evdev) < 0) {
687 printf("%d: Error with start call\n", __LINE__);
691 /************** FORWARD ****************/
692 const uint32_t rx_port = 0;
693 const uint32_t NUM_PKTS = 2;
695 for (i = 0; i < NUM_PKTS; i++) {
696 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
698 printf("%d: error generating pkt\n", __LINE__);
702 struct rte_event ev = {
703 .op = RTE_EVENT_OP_NEW,
708 /* generate pkt and enqueue */
709 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
711 printf("%d: Failed to enqueue\n", __LINE__);
715 rte_service_run_iter_on_app_lcore(t->service_id, 1);
717 /* Check stats for all NUM_PKTS arrived to sched core */
718 struct test_event_dev_stats stats;
720 err = test_event_dev_stats_get(evdev, &stats);
722 printf("%d: failed to get stats\n", __LINE__);
725 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
726 printf("%d: Sched core didn't receive all %d pkts\n",
728 rte_event_dev_dump(evdev, stdout);
736 /******** DEQ QID 1 *******/
739 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
741 rte_pktmbuf_free(ev.mbuf);
744 if (deq_pkts != NUM_PKTS/2) {
745 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
750 /******** DEQ QID 2 *******/
754 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
756 rte_pktmbuf_free(ev.mbuf);
758 if (deq_pkts != NUM_PKTS/2) {
759 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
769 abuse_inflights(struct test *t)
771 const int rx_enq = 0;
772 const int wrk_enq = 2;
775 /* Create instance with 4 ports */
776 if (init(t, 1, 4) < 0 ||
777 create_ports(t, 4) < 0 ||
778 create_atomic_qids(t, 1) < 0) {
779 printf("%d: Error initializing device\n", __LINE__);
783 /* CQ mapping to QID */
784 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
786 printf("%d: error mapping lb qid\n", __LINE__);
791 if (rte_event_dev_start(evdev) < 0) {
792 printf("%d: Error with start call\n", __LINE__);
796 /* Enqueue op only */
797 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
799 printf("%d: Failed to enqueue\n", __LINE__);
804 rte_service_run_iter_on_app_lcore(t->service_id, 1);
806 struct test_event_dev_stats stats;
808 err = test_event_dev_stats_get(evdev, &stats);
810 printf("%d: failed to get stats\n", __LINE__);
814 if (stats.rx_pkts != 0 ||
815 stats.tx_pkts != 0 ||
816 stats.port_inflight[wrk_enq] != 0) {
817 printf("%d: Sched core didn't handle pkt as expected\n",
827 xstats_tests(struct test *t)
829 const int wrk_enq = 2;
832 /* Create instance with 4 ports */
833 if (init(t, 1, 4) < 0 ||
834 create_ports(t, 4) < 0 ||
835 create_atomic_qids(t, 1) < 0) {
836 printf("%d: Error initializing device\n", __LINE__);
840 /* CQ mapping to QID */
841 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
843 printf("%d: error mapping lb qid\n", __LINE__);
848 if (rte_event_dev_start(evdev) < 0) {
849 printf("%d: Error with start call\n", __LINE__);
853 const uint32_t XSTATS_MAX = 1024;
856 uint32_t ids[XSTATS_MAX];
857 uint64_t values[XSTATS_MAX];
858 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
860 for (i = 0; i < XSTATS_MAX; i++)
863 /* Device names / values */
864 int ret = rte_event_dev_xstats_names_get(evdev,
865 RTE_EVENT_DEV_XSTATS_DEVICE,
866 0, xstats_names, ids, XSTATS_MAX);
868 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
871 ret = rte_event_dev_xstats_get(evdev,
872 RTE_EVENT_DEV_XSTATS_DEVICE,
873 0, ids, values, ret);
875 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
879 /* Port names / values */
880 ret = rte_event_dev_xstats_names_get(evdev,
881 RTE_EVENT_DEV_XSTATS_PORT, 0,
882 xstats_names, ids, XSTATS_MAX);
884 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
887 ret = rte_event_dev_xstats_get(evdev,
888 RTE_EVENT_DEV_XSTATS_PORT, 0,
891 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
895 /* Queue names / values */
896 ret = rte_event_dev_xstats_names_get(evdev,
897 RTE_EVENT_DEV_XSTATS_QUEUE,
898 0, xstats_names, ids, XSTATS_MAX);
900 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
904 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
905 ret = rte_event_dev_xstats_get(evdev,
906 RTE_EVENT_DEV_XSTATS_QUEUE,
907 1, ids, values, ret);
908 if (ret != -EINVAL) {
909 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
913 ret = rte_event_dev_xstats_get(evdev,
914 RTE_EVENT_DEV_XSTATS_QUEUE,
915 0, ids, values, ret);
917 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
921 /* enqueue packets to check values */
922 for (i = 0; i < 3; i++) {
924 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
926 printf("%d: gen of pkt failed\n", __LINE__);
929 ev.queue_id = t->qid[i];
930 ev.op = RTE_EVENT_OP_NEW;
935 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
937 printf("%d: Failed to enqueue\n", __LINE__);
942 rte_service_run_iter_on_app_lcore(t->service_id, 1);
944 /* Device names / values */
945 int num_stats = rte_event_dev_xstats_names_get(evdev,
946 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
947 xstats_names, ids, XSTATS_MAX);
950 ret = rte_event_dev_xstats_get(evdev,
951 RTE_EVENT_DEV_XSTATS_DEVICE,
952 0, ids, values, num_stats);
953 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
954 for (i = 0; (signed int)i < ret; i++) {
955 if (expected[i] != values[i]) {
957 "%d Error xstat %d (id %d) %s : %"PRIu64
958 ", expect %"PRIu64"\n",
959 __LINE__, i, ids[i], xstats_names[i].name,
960 values[i], expected[i]);
965 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
968 /* ensure reset statistics are zero-ed */
969 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
970 ret = rte_event_dev_xstats_get(evdev,
971 RTE_EVENT_DEV_XSTATS_DEVICE,
972 0, ids, values, num_stats);
973 for (i = 0; (signed int)i < ret; i++) {
974 if (expected_zero[i] != values[i]) {
976 "%d Error, xstat %d (id %d) %s : %"PRIu64
977 ", expect %"PRIu64"\n",
978 __LINE__, i, ids[i], xstats_names[i].name,
979 values[i], expected_zero[i]);
984 /* port reset checks */
985 num_stats = rte_event_dev_xstats_names_get(evdev,
986 RTE_EVENT_DEV_XSTATS_PORT, 0,
987 xstats_names, ids, XSTATS_MAX);
990 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
991 0, ids, values, num_stats);
993 static const uint64_t port_expected[] = {
998 0 /* avg pkt cycles */,
1000 0 /* rx ring used */,
1001 4096 /* rx ring free */,
1002 0 /* cq ring used */,
1003 32 /* cq ring free */,
1004 0 /* dequeue calls */,
1005 /* 10 dequeue burst buckets */
1009 if (ret != RTE_DIM(port_expected)) {
1011 "%s %d: wrong number of port stats (%d), expected %zu\n",
1012 __func__, __LINE__, ret, RTE_DIM(port_expected));
1015 for (i = 0; (signed int)i < ret; i++) {
1016 if (port_expected[i] != values[i]) {
1018 "%s : %d: Error stat %s is %"PRIu64
1019 ", expected %"PRIu64"\n",
1020 __func__, __LINE__, xstats_names[i].name,
1021 values[i], port_expected[i]);
1026 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1029 /* ensure reset statistics are zero-ed */
1030 static const uint64_t port_expected_zero[] = {
1035 0 /* avg pkt cycles */,
1037 0 /* rx ring used */,
1038 4096 /* rx ring free */,
1039 0 /* cq ring used */,
1040 32 /* cq ring free */,
1041 0 /* dequeue calls */,
1042 /* 10 dequeue burst buckets */
1046 ret = rte_event_dev_xstats_get(evdev,
1047 RTE_EVENT_DEV_XSTATS_PORT,
1048 0, ids, values, num_stats);
1049 for (i = 0; (signed int)i < ret; i++) {
1050 if (port_expected_zero[i] != values[i]) {
1052 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1053 ", expect %"PRIu64"\n",
1054 __LINE__, i, ids[i], xstats_names[i].name,
1055 values[i], port_expected_zero[i]);
1060 /* QUEUE STATS TESTS */
1061 num_stats = rte_event_dev_xstats_names_get(evdev,
1062 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1063 xstats_names, ids, XSTATS_MAX);
1064 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1065 0, ids, values, num_stats);
1067 printf("xstats get returned %d\n", ret);
1070 if ((unsigned int)ret > XSTATS_MAX)
1071 printf("%s %d: more xstats available than space\n",
1072 __func__, __LINE__);
1074 static const uint64_t queue_expected[] = {
1079 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1080 /* QID-to-Port: pinned_flows, packets */
1086 for (i = 0; (signed int)i < ret; i++) {
1087 if (queue_expected[i] != values[i]) {
1089 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1090 ", expect %"PRIu64"\n",
1091 __LINE__, i, ids[i], xstats_names[i].name,
1092 values[i], queue_expected[i]);
1097 /* Reset the queue stats here */
1098 ret = rte_event_dev_xstats_reset(evdev,
1099 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1103 /* Verify that the resetable stats are reset, and others are not */
1104 static const uint64_t queue_expected_zero[] = {
1109 0, 0, 0, 0, /* 4 iq used */
1110 /* QID-to-Port: pinned_flows, packets */
1117 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1118 ids, values, num_stats);
1120 for (i = 0; (signed int)i < ret; i++) {
1121 if (queue_expected_zero[i] != values[i]) {
1123 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1124 ", expect %"PRIu64"\n",
1125 __LINE__, i, ids[i], xstats_names[i].name,
1126 values[i], queue_expected_zero[i]);
1131 printf("%d : %d of values were not as expected above\n",
1140 rte_event_dev_dump(0, stdout);
1147 xstats_id_abuse_tests(struct test *t)
1150 const uint32_t XSTATS_MAX = 1024;
1151 const uint32_t link_port = 2;
1153 uint32_t ids[XSTATS_MAX];
1154 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1156 /* Create instance with 4 ports */
1157 if (init(t, 1, 4) < 0 ||
1158 create_ports(t, 4) < 0 ||
1159 create_atomic_qids(t, 1) < 0) {
1160 printf("%d: Error initializing device\n", __LINE__);
1164 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1166 printf("%d: error mapping lb qid\n", __LINE__);
1170 if (rte_event_dev_start(evdev) < 0) {
1171 printf("%d: Error with start call\n", __LINE__);
1175 /* no test for device, as it ignores the port/q number */
1176 int num_stats = rte_event_dev_xstats_names_get(evdev,
1177 RTE_EVENT_DEV_XSTATS_PORT,
1178 UINT8_MAX-1, xstats_names, ids,
1180 if (num_stats != 0) {
1181 printf("%d: expected %d stats, got return %d\n", __LINE__,
1186 num_stats = rte_event_dev_xstats_names_get(evdev,
1187 RTE_EVENT_DEV_XSTATS_QUEUE,
1188 UINT8_MAX-1, xstats_names, ids,
1190 if (num_stats != 0) {
1191 printf("%d: expected %d stats, got return %d\n", __LINE__,
1204 port_reconfig_credits(struct test *t)
1206 if (init(t, 1, 1) < 0) {
1207 printf("%d: Error initializing device\n", __LINE__);
1212 const uint32_t NUM_ITERS = 32;
1213 for (i = 0; i < NUM_ITERS; i++) {
1214 const struct rte_event_queue_conf conf = {
1215 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1216 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1217 .nb_atomic_flows = 1024,
1218 .nb_atomic_order_sequences = 1024,
1220 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1221 printf("%d: error creating qid\n", __LINE__);
1226 static const struct rte_event_port_conf port_conf = {
1227 .new_event_threshold = 128,
1228 .dequeue_depth = 32,
1229 .enqueue_depth = 64,
1230 .disable_implicit_release = 0,
1232 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1233 printf("%d Error setting up port\n", __LINE__);
1237 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1239 printf("%d: error mapping lb qid\n", __LINE__);
1243 if (rte_event_dev_start(evdev) < 0) {
1244 printf("%d: Error with start call\n", __LINE__);
1248 const uint32_t NPKTS = 1;
1250 for (j = 0; j < NPKTS; j++) {
1251 struct rte_event ev;
1252 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1254 printf("%d: gen of pkt failed\n", __LINE__);
1257 ev.queue_id = t->qid[0];
1258 ev.op = RTE_EVENT_OP_NEW;
1260 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1262 printf("%d: Failed to enqueue\n", __LINE__);
1263 rte_event_dev_dump(0, stdout);
1268 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1270 struct rte_event ev[NPKTS];
1271 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1274 printf("%d error; no packet dequeued\n", __LINE__);
1276 /* let cleanup below stop the device on last iter */
1277 if (i != NUM_ITERS-1)
1278 rte_event_dev_stop(evdev);
1289 port_single_lb_reconfig(struct test *t)
1291 if (init(t, 2, 2) < 0) {
1292 printf("%d: Error initializing device\n", __LINE__);
1296 static const struct rte_event_queue_conf conf_lb_atomic = {
1297 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1298 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1299 .nb_atomic_flows = 1024,
1300 .nb_atomic_order_sequences = 1024,
1302 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1303 printf("%d: error creating qid\n", __LINE__);
1307 static const struct rte_event_queue_conf conf_single_link = {
1308 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1309 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1311 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1312 printf("%d: error creating qid\n", __LINE__);
1316 struct rte_event_port_conf port_conf = {
1317 .new_event_threshold = 128,
1318 .dequeue_depth = 32,
1319 .enqueue_depth = 64,
1320 .disable_implicit_release = 0,
1322 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1323 printf("%d Error setting up port\n", __LINE__);
1326 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1327 printf("%d Error setting up port\n", __LINE__);
1331 /* link port to lb queue */
1332 uint8_t queue_id = 0;
1333 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1334 printf("%d: error creating link for qid\n", __LINE__);
1338 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1340 printf("%d: Error unlinking lb port\n", __LINE__);
1345 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1346 printf("%d: error creating link for qid\n", __LINE__);
1351 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1353 printf("%d: error mapping lb qid\n", __LINE__);
1357 if (rte_event_dev_start(evdev) < 0) {
1358 printf("%d: Error with start call\n", __LINE__);
1370 xstats_brute_force(struct test *t)
1373 const uint32_t XSTATS_MAX = 1024;
1374 uint32_t ids[XSTATS_MAX];
1375 uint64_t values[XSTATS_MAX];
1376 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1379 /* Create instance with 4 ports */
1380 if (init(t, 1, 4) < 0 ||
1381 create_ports(t, 4) < 0 ||
1382 create_atomic_qids(t, 1) < 0) {
1383 printf("%d: Error initializing device\n", __LINE__);
1387 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1389 printf("%d: error mapping lb qid\n", __LINE__);
1393 if (rte_event_dev_start(evdev) < 0) {
1394 printf("%d: Error with start call\n", __LINE__);
1398 for (i = 0; i < XSTATS_MAX; i++)
1401 for (i = 0; i < 3; i++) {
1402 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1404 for (j = 0; j < UINT8_MAX; j++) {
1405 rte_event_dev_xstats_names_get(evdev, mode,
1406 j, xstats_names, ids, XSTATS_MAX);
1408 rte_event_dev_xstats_get(evdev, mode, j, ids,
1409 values, XSTATS_MAX);
1421 xstats_id_reset_tests(struct test *t)
1423 const int wrk_enq = 2;
1426 /* Create instance with 4 ports */
1427 if (init(t, 1, 4) < 0 ||
1428 create_ports(t, 4) < 0 ||
1429 create_atomic_qids(t, 1) < 0) {
1430 printf("%d: Error initializing device\n", __LINE__);
1434 /* CQ mapping to QID */
1435 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1437 printf("%d: error mapping lb qid\n", __LINE__);
1441 if (rte_event_dev_start(evdev) < 0) {
1442 printf("%d: Error with start call\n", __LINE__);
1446 #define XSTATS_MAX 1024
1449 uint32_t ids[XSTATS_MAX];
1450 uint64_t values[XSTATS_MAX];
1451 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1453 for (i = 0; i < XSTATS_MAX; i++)
1456 #define NUM_DEV_STATS 6
1457 /* Device names / values */
1458 int num_stats = rte_event_dev_xstats_names_get(evdev,
1459 RTE_EVENT_DEV_XSTATS_DEVICE,
1460 0, xstats_names, ids, XSTATS_MAX);
1461 if (num_stats != NUM_DEV_STATS) {
1462 printf("%d: expected %d stats, got return %d\n", __LINE__,
1463 NUM_DEV_STATS, num_stats);
1466 ret = rte_event_dev_xstats_get(evdev,
1467 RTE_EVENT_DEV_XSTATS_DEVICE,
1468 0, ids, values, num_stats);
1469 if (ret != NUM_DEV_STATS) {
1470 printf("%d: expected %d stats, got return %d\n", __LINE__,
1471 NUM_DEV_STATS, ret);
1476 for (i = 0; i < NPKTS; i++) {
1477 struct rte_event ev;
1478 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1480 printf("%d: gen of pkt failed\n", __LINE__);
1483 ev.queue_id = t->qid[i];
1484 ev.op = RTE_EVENT_OP_NEW;
1488 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1490 printf("%d: Failed to enqueue\n", __LINE__);
1495 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1497 static const char * const dev_names[] = {
1498 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1499 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1501 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1502 for (i = 0; (int)i < ret; i++) {
1504 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1508 printf("%d: %s id incorrect, expected %d got %d\n",
1509 __LINE__, dev_names[i], i, id);
1512 if (val != dev_expected[i]) {
1513 printf("%d: %s value incorrect, expected %"
1514 PRIu64" got %d\n", __LINE__, dev_names[i],
1515 dev_expected[i], id);
1519 int reset_ret = rte_event_dev_xstats_reset(evdev,
1520 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1524 printf("%d: failed to reset successfully\n", __LINE__);
1527 dev_expected[i] = 0;
1528 /* check value again */
1529 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1530 if (val != dev_expected[i]) {
1531 printf("%d: %s value incorrect, expected %"PRIu64
1532 " got %"PRIu64"\n", __LINE__, dev_names[i],
1533 dev_expected[i], val);
1538 /* 48 is stat offset from start of the devices whole xstats.
1539 * This WILL break every time we add a statistic to a port
1540 * or the device, but there is no other way to test
1543 /* num stats for the tested port. CQ size adds more stats to a port */
1544 #define NUM_PORT_STATS 21
1545 /* the port to test. */
1547 num_stats = rte_event_dev_xstats_names_get(evdev,
1548 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1549 xstats_names, ids, XSTATS_MAX);
1550 if (num_stats != NUM_PORT_STATS) {
1551 printf("%d: expected %d stats, got return %d\n",
1552 __LINE__, NUM_PORT_STATS, num_stats);
1555 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1556 ids, values, num_stats);
1558 if (ret != NUM_PORT_STATS) {
1559 printf("%d: expected %d stats, got return %d\n",
1560 __LINE__, NUM_PORT_STATS, ret);
1563 static const char * const port_names[] = {
1568 "port_2_avg_pkt_cycles",
1570 "port_2_rx_ring_used",
1571 "port_2_rx_ring_free",
1572 "port_2_cq_ring_used",
1573 "port_2_cq_ring_free",
1574 "port_2_dequeue_calls",
1575 "port_2_dequeues_returning_0",
1576 "port_2_dequeues_returning_1-4",
1577 "port_2_dequeues_returning_5-8",
1578 "port_2_dequeues_returning_9-12",
1579 "port_2_dequeues_returning_13-16",
1580 "port_2_dequeues_returning_17-20",
1581 "port_2_dequeues_returning_21-24",
1582 "port_2_dequeues_returning_25-28",
1583 "port_2_dequeues_returning_29-32",
1584 "port_2_dequeues_returning_33-36",
1586 uint64_t port_expected[] = {
1590 NPKTS, /* inflight */
1591 0, /* avg pkt cycles */
1593 0, /* rx ring used */
1594 4096, /* rx ring free */
1595 NPKTS, /* cq ring used */
1596 25, /* cq ring free */
1597 0, /* dequeue zero calls */
1598 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1601 uint64_t port_expected_zero[] = {
1605 NPKTS, /* inflight */
1606 0, /* avg pkt cycles */
1608 0, /* rx ring used */
1609 4096, /* rx ring free */
1610 NPKTS, /* cq ring used */
1611 25, /* cq ring free */
1612 0, /* dequeue zero calls */
1613 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1616 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1617 RTE_DIM(port_names) != NUM_PORT_STATS) {
1618 printf("%d: port array of wrong size\n", __LINE__);
1623 for (i = 0; (int)i < ret; i++) {
1625 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1628 if (id != i + PORT_OFF) {
1629 printf("%d: %s id incorrect, expected %d got %d\n",
1630 __LINE__, port_names[i], i+PORT_OFF,
1634 if (val != port_expected[i]) {
1635 printf("%d: %s value incorrect, expected %"PRIu64
1636 " got %d\n", __LINE__, port_names[i],
1637 port_expected[i], id);
1641 int reset_ret = rte_event_dev_xstats_reset(evdev,
1642 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1646 printf("%d: failed to reset successfully\n", __LINE__);
1649 /* check value again */
1650 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1651 if (val != port_expected_zero[i]) {
1652 printf("%d: %s value incorrect, expected %"PRIu64
1653 " got %"PRIu64"\n", __LINE__, port_names[i],
1654 port_expected_zero[i], val);
1661 /* num queue stats */
1662 #define NUM_Q_STATS 16
1663 /* queue offset from start of the devices whole xstats.
1664 * This will break every time we add a statistic to a device/port/queue
1666 #define QUEUE_OFF 90
1667 const uint32_t queue = 0;
1668 num_stats = rte_event_dev_xstats_names_get(evdev,
1669 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1670 xstats_names, ids, XSTATS_MAX);
1671 if (num_stats != NUM_Q_STATS) {
1672 printf("%d: expected %d stats, got return %d\n",
1673 __LINE__, NUM_Q_STATS, num_stats);
1676 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1677 queue, ids, values, num_stats);
1678 if (ret != NUM_Q_STATS) {
1679 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1682 static const char * const queue_names[] = {
1691 "qid_0_port_0_pinned_flows",
1692 "qid_0_port_0_packets",
1693 "qid_0_port_1_pinned_flows",
1694 "qid_0_port_1_packets",
1695 "qid_0_port_2_pinned_flows",
1696 "qid_0_port_2_packets",
1697 "qid_0_port_3_pinned_flows",
1698 "qid_0_port_3_packets",
1700 uint64_t queue_expected[] = {
1709 /* QID-to-Port: pinned_flows, packets */
1715 uint64_t queue_expected_zero[] = {
1724 /* QID-to-Port: pinned_flows, packets */
1730 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1731 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1732 RTE_DIM(queue_names) != NUM_Q_STATS) {
1733 printf("%d : queue array of wrong size\n", __LINE__);
1738 for (i = 0; (int)i < ret; i++) {
1740 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1743 if (id != i + QUEUE_OFF) {
1744 printf("%d: %s id incorrect, expected %d got %d\n",
1745 __LINE__, queue_names[i], i+QUEUE_OFF,
1749 if (val != queue_expected[i]) {
1750 printf("%d: %d: %s value , expected %"PRIu64
1751 " got %"PRIu64"\n", i, __LINE__,
1752 queue_names[i], queue_expected[i], val);
1756 int reset_ret = rte_event_dev_xstats_reset(evdev,
1757 RTE_EVENT_DEV_XSTATS_QUEUE,
1760 printf("%d: failed to reset successfully\n", __LINE__);
1763 /* check value again */
1764 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1766 if (val != queue_expected_zero[i]) {
1767 printf("%d: %s value incorrect, expected %"PRIu64
1768 " got %"PRIu64"\n", __LINE__, queue_names[i],
1769 queue_expected_zero[i], val);
1785 ordered_reconfigure(struct test *t)
1787 if (init(t, 1, 1) < 0 ||
1788 create_ports(t, 1) < 0) {
1789 printf("%d: Error initializing device\n", __LINE__);
1793 const struct rte_event_queue_conf conf = {
1794 .schedule_type = RTE_SCHED_TYPE_ORDERED,
1795 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1796 .nb_atomic_flows = 1024,
1797 .nb_atomic_order_sequences = 1024,
1800 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1801 printf("%d: error creating qid\n", __LINE__);
1805 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1806 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1810 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1811 if (rte_event_dev_start(evdev) < 0) {
1812 printf("%d: Error with start call\n", __LINE__);
1824 qid_priorities(struct test *t)
1826 /* Test works by having a CQ with enough empty space for all packets,
1827 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1828 * priority of the QID, not the ingress order, to pass the test
1831 /* Create instance with 1 ports, and 3 qids */
1832 if (init(t, 3, 1) < 0 ||
1833 create_ports(t, 1) < 0) {
1834 printf("%d: Error initializing device\n", __LINE__);
1838 for (i = 0; i < 3; i++) {
1840 const struct rte_event_queue_conf conf = {
1841 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1842 /* increase priority (0 == highest), as we go */
1843 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1844 .nb_atomic_flows = 1024,
1845 .nb_atomic_order_sequences = 1024,
1848 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1849 printf("%d: error creating qid %d\n", __LINE__, i);
1855 /* map all QIDs to port */
1856 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1858 if (rte_event_dev_start(evdev) < 0) {
1859 printf("%d: Error with start call\n", __LINE__);
1863 /* enqueue 3 packets, setting seqn and QID to check priority */
1864 for (i = 0; i < 3; i++) {
1865 struct rte_event ev;
1866 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1868 printf("%d: gen of pkt failed\n", __LINE__);
1871 ev.queue_id = t->qid[i];
1872 ev.op = RTE_EVENT_OP_NEW;
1876 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1878 printf("%d: Failed to enqueue\n", __LINE__);
1883 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1885 /* dequeue packets, verify priority was upheld */
1886 struct rte_event ev[32];
1888 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1889 if (deq_pkts != 3) {
1890 printf("%d: failed to deq packets\n", __LINE__);
1891 rte_event_dev_dump(evdev, stdout);
1894 for (i = 0; i < 3; i++) {
1895 if (ev[i].mbuf->seqn != 2-i) {
1897 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1907 unlink_in_progress(struct test *t)
1909 /* Test unlinking API, in particular that when an unlink request has
1910 * not yet been seen by the scheduler thread, that the
1911 * unlink_in_progress() function returns the number of unlinks.
1914 /* Create instance with 1 ports, and 3 qids */
1915 if (init(t, 3, 1) < 0 ||
1916 create_ports(t, 1) < 0) {
1917 printf("%d: Error initializing device\n", __LINE__);
1921 for (i = 0; i < 3; i++) {
1923 const struct rte_event_queue_conf conf = {
1924 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1925 /* increase priority (0 == highest), as we go */
1926 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1927 .nb_atomic_flows = 1024,
1928 .nb_atomic_order_sequences = 1024,
1931 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1932 printf("%d: error creating qid %d\n", __LINE__, i);
1938 /* map all QIDs to port */
1939 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1941 if (rte_event_dev_start(evdev) < 0) {
1942 printf("%d: Error with start call\n", __LINE__);
1946 /* unlink all ports to have outstanding unlink requests */
1947 int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1949 printf("%d: Failed to unlink queues\n", __LINE__);
1953 /* get active unlinks here, expect 3 */
1954 int unlinks_in_progress =
1955 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1956 if (unlinks_in_progress != 3) {
1957 printf("%d: Expected num unlinks in progress == 3, got %d\n",
1958 __LINE__, unlinks_in_progress);
1962 /* run scheduler service on this thread to ack the unlinks */
1963 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1965 /* active unlinks expected as 0 as scheduler thread has acked */
1966 unlinks_in_progress =
1967 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1968 if (unlinks_in_progress != 0) {
1969 printf("%d: Expected num unlinks in progress == 0, got %d\n",
1970 __LINE__, unlinks_in_progress);
1978 load_balancing(struct test *t)
1980 const int rx_enq = 0;
1984 if (init(t, 1, 4) < 0 ||
1985 create_ports(t, 4) < 0 ||
1986 create_atomic_qids(t, 1) < 0) {
1987 printf("%d: Error initializing device\n", __LINE__);
1991 for (i = 0; i < 3; i++) {
1992 /* map port 1 - 3 inclusive */
1993 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1995 printf("%d: error mapping qid to port %d\n",
2001 if (rte_event_dev_start(evdev) < 0) {
2002 printf("%d: Error with start call\n", __LINE__);
2006 /************** FORWARD ****************/
2008 * Create a set of flows that test the load-balancing operation of the
2009 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2010 * with a new flow, which should be sent to the 3rd mapped CQ
2012 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2014 for (i = 0; i < RTE_DIM(flows); i++) {
2015 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2017 printf("%d: gen of pkt failed\n", __LINE__);
2021 struct rte_event ev = {
2022 .op = RTE_EVENT_OP_NEW,
2023 .queue_id = t->qid[0],
2024 .flow_id = flows[i],
2027 /* generate pkt and enqueue */
2028 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2030 printf("%d: Failed to enqueue\n", __LINE__);
2035 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2037 struct test_event_dev_stats stats;
2038 err = test_event_dev_stats_get(evdev, &stats);
2040 printf("%d: failed to get stats\n", __LINE__);
2044 if (stats.port_inflight[1] != 4) {
2045 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2049 if (stats.port_inflight[2] != 2) {
2050 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2054 if (stats.port_inflight[3] != 3) {
2055 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2065 load_balancing_history(struct test *t)
2067 struct test_event_dev_stats stats = {0};
2068 const int rx_enq = 0;
2072 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2073 if (init(t, 1, 4) < 0 ||
2074 create_ports(t, 4) < 0 ||
2075 create_atomic_qids(t, 1) < 0)
2078 /* CQ mapping to QID */
2079 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2080 printf("%d: error mapping port 1 qid\n", __LINE__);
2083 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2084 printf("%d: error mapping port 2 qid\n", __LINE__);
2087 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2088 printf("%d: error mapping port 3 qid\n", __LINE__);
2091 if (rte_event_dev_start(evdev) < 0) {
2092 printf("%d: Error with start call\n", __LINE__);
2097 * Create a set of flows that test the load-balancing operation of the
2098 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2099 * the packet from CQ 0, send in a new set of flows. Ensure that:
2100 * 1. The new flow 3 gets into the empty CQ0
2101 * 2. packets for existing flow gets added into CQ1
2102 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2103 * more outstanding pkts
2105 * This test makes sure that when a flow ends (i.e. all packets
2106 * have been completed for that flow), that the flow can be moved
2107 * to a different CQ when new packets come in for that flow.
2109 static uint32_t flows1[] = {0, 1, 1, 2};
2111 for (i = 0; i < RTE_DIM(flows1); i++) {
2112 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2113 struct rte_event ev = {
2114 .flow_id = flows1[i],
2115 .op = RTE_EVENT_OP_NEW,
2116 .queue_id = t->qid[0],
2117 .event_type = RTE_EVENT_TYPE_CPU,
2118 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2123 printf("%d: gen of pkt failed\n", __LINE__);
2126 arp->hash.rss = flows1[i];
2127 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2129 printf("%d: Failed to enqueue\n", __LINE__);
2134 /* call the scheduler */
2135 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2137 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2138 struct rte_event ev;
2139 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2140 printf("%d: failed to dequeue\n", __LINE__);
2143 if (ev.mbuf->hash.rss != flows1[0]) {
2144 printf("%d: unexpected flow received\n", __LINE__);
2148 /* drop the flow 0 packet from port 1 */
2149 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2151 /* call the scheduler */
2152 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2155 * Set up the next set of flows, first a new flow to fill up
2156 * CQ 0, so that the next flow 0 packet should go to CQ2
2158 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2160 for (i = 0; i < RTE_DIM(flows2); i++) {
2161 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2162 struct rte_event ev = {
2163 .flow_id = flows2[i],
2164 .op = RTE_EVENT_OP_NEW,
2165 .queue_id = t->qid[0],
2166 .event_type = RTE_EVENT_TYPE_CPU,
2167 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2172 printf("%d: gen of pkt failed\n", __LINE__);
2175 arp->hash.rss = flows2[i];
2177 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2179 printf("%d: Failed to enqueue\n", __LINE__);
2185 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2187 err = test_event_dev_stats_get(evdev, &stats);
2189 printf("%d:failed to get stats\n", __LINE__);
2194 * Now check the resulting inflights on each port.
2196 if (stats.port_inflight[1] != 3) {
2197 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2199 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2200 (unsigned int)stats.port_inflight[1],
2201 (unsigned int)stats.port_inflight[2],
2202 (unsigned int)stats.port_inflight[3]);
2205 if (stats.port_inflight[2] != 4) {
2206 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2208 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2209 (unsigned int)stats.port_inflight[1],
2210 (unsigned int)stats.port_inflight[2],
2211 (unsigned int)stats.port_inflight[3]);
2214 if (stats.port_inflight[3] != 2) {
2215 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2217 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2218 (unsigned int)stats.port_inflight[1],
2219 (unsigned int)stats.port_inflight[2],
2220 (unsigned int)stats.port_inflight[3]);
2224 for (i = 1; i <= 3; i++) {
2225 struct rte_event ev;
2226 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2227 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2229 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2236 invalid_qid(struct test *t)
2238 struct test_event_dev_stats stats;
2239 const int rx_enq = 0;
2243 if (init(t, 1, 4) < 0 ||
2244 create_ports(t, 4) < 0 ||
2245 create_atomic_qids(t, 1) < 0) {
2246 printf("%d: Error initializing device\n", __LINE__);
2250 /* CQ mapping to QID */
2251 for (i = 0; i < 4; i++) {
2252 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2255 printf("%d: error mapping port 1 qid\n", __LINE__);
2260 if (rte_event_dev_start(evdev) < 0) {
2261 printf("%d: Error with start call\n", __LINE__);
2266 * Send in a packet with an invalid qid to the scheduler.
2267 * We should see the packed enqueued OK, but the inflights for
2268 * that packet should not be incremented, and the rx_dropped
2269 * should be incremented.
2271 static uint32_t flows1[] = {20};
2273 for (i = 0; i < RTE_DIM(flows1); i++) {
2274 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2276 printf("%d: gen of pkt failed\n", __LINE__);
2280 struct rte_event ev = {
2281 .op = RTE_EVENT_OP_NEW,
2282 .queue_id = t->qid[0] + flows1[i],
2286 /* generate pkt and enqueue */
2287 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2289 printf("%d: Failed to enqueue\n", __LINE__);
2294 /* call the scheduler */
2295 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2297 err = test_event_dev_stats_get(evdev, &stats);
2299 printf("%d: failed to get stats\n", __LINE__);
2304 * Now check the resulting inflights on the port, and the rx_dropped.
2306 if (stats.port_inflight[0] != 0) {
2307 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2309 rte_event_dev_dump(evdev, stdout);
2312 if (stats.port_rx_dropped[0] != 1) {
2313 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2314 rte_event_dev_dump(evdev, stdout);
2317 /* each packet drop should only be counted in one place - port or dev */
2318 if (stats.rx_dropped != 0) {
2319 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2321 rte_event_dev_dump(evdev, stdout);
2330 single_packet(struct test *t)
2332 const uint32_t MAGIC_SEQN = 7321;
2333 struct rte_event ev;
2334 struct test_event_dev_stats stats;
2335 const int rx_enq = 0;
2336 const int wrk_enq = 2;
2339 /* Create instance with 4 ports */
2340 if (init(t, 1, 4) < 0 ||
2341 create_ports(t, 4) < 0 ||
2342 create_atomic_qids(t, 1) < 0) {
2343 printf("%d: Error initializing device\n", __LINE__);
2347 /* CQ mapping to QID */
2348 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2350 printf("%d: error mapping lb qid\n", __LINE__);
2355 if (rte_event_dev_start(evdev) < 0) {
2356 printf("%d: Error with start call\n", __LINE__);
2360 /************** Gen pkt and enqueue ****************/
2361 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2363 printf("%d: gen of pkt failed\n", __LINE__);
2367 ev.op = RTE_EVENT_OP_NEW;
2368 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2372 arp->seqn = MAGIC_SEQN;
2374 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2376 printf("%d: Failed to enqueue\n", __LINE__);
2380 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2382 err = test_event_dev_stats_get(evdev, &stats);
2384 printf("%d: failed to get stats\n", __LINE__);
2388 if (stats.rx_pkts != 1 ||
2389 stats.tx_pkts != 1 ||
2390 stats.port_inflight[wrk_enq] != 1) {
2391 printf("%d: Sched core didn't handle pkt as expected\n",
2393 rte_event_dev_dump(evdev, stdout);
2399 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2401 printf("%d: Failed to deq\n", __LINE__);
2405 err = test_event_dev_stats_get(evdev, &stats);
2407 printf("%d: failed to get stats\n", __LINE__);
2411 err = test_event_dev_stats_get(evdev, &stats);
2412 if (ev.mbuf->seqn != MAGIC_SEQN) {
2413 printf("%d: magic sequence number not dequeued\n", __LINE__);
2417 rte_pktmbuf_free(ev.mbuf);
2418 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2420 printf("%d: Failed to enqueue\n", __LINE__);
2423 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2425 err = test_event_dev_stats_get(evdev, &stats);
2426 if (stats.port_inflight[wrk_enq] != 0) {
2427 printf("%d: port inflight not correct\n", __LINE__);
2436 inflight_counts(struct test *t)
2438 struct rte_event ev;
2439 struct test_event_dev_stats stats;
2440 const int rx_enq = 0;
2446 /* Create instance with 4 ports */
2447 if (init(t, 2, 3) < 0 ||
2448 create_ports(t, 3) < 0 ||
2449 create_atomic_qids(t, 2) < 0) {
2450 printf("%d: Error initializing device\n", __LINE__);
2454 /* CQ mapping to QID */
2455 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2457 printf("%d: error mapping lb qid\n", __LINE__);
2461 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2463 printf("%d: error mapping lb qid\n", __LINE__);
2468 if (rte_event_dev_start(evdev) < 0) {
2469 printf("%d: Error with start call\n", __LINE__);
2473 /************** FORWARD ****************/
2475 for (i = 0; i < QID1_NUM; i++) {
2476 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2479 printf("%d: gen of pkt failed\n", __LINE__);
2483 ev.queue_id = t->qid[0];
2484 ev.op = RTE_EVENT_OP_NEW;
2486 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2488 printf("%d: Failed to enqueue\n", __LINE__);
2493 for (i = 0; i < QID2_NUM; i++) {
2494 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2497 printf("%d: gen of pkt failed\n", __LINE__);
2500 ev.queue_id = t->qid[1];
2501 ev.op = RTE_EVENT_OP_NEW;
2503 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2505 printf("%d: Failed to enqueue\n", __LINE__);
2511 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2513 err = test_event_dev_stats_get(evdev, &stats);
2515 printf("%d: failed to get stats\n", __LINE__);
2519 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2520 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2521 printf("%d: Sched core didn't handle pkt as expected\n",
2526 if (stats.port_inflight[p1] != QID1_NUM) {
2527 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2531 if (stats.port_inflight[p2] != QID2_NUM) {
2532 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2537 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2539 struct rte_event events[QID1_NUM + QID2_NUM];
2540 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2541 RTE_DIM(events), 0);
2543 if (deq_pkts != QID1_NUM) {
2544 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2547 err = test_event_dev_stats_get(evdev, &stats);
2548 if (stats.port_inflight[p1] != QID1_NUM) {
2549 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2553 for (i = 0; i < QID1_NUM; i++) {
2554 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2557 printf("%d: %s rte enqueue of inf release failed\n",
2558 __LINE__, __func__);
2564 * As the scheduler core decrements inflights, it needs to run to
2565 * process packets to act on the drop messages
2567 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2569 err = test_event_dev_stats_get(evdev, &stats);
2570 if (stats.port_inflight[p1] != 0) {
2571 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2576 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2577 RTE_DIM(events), 0);
2578 if (deq_pkts != QID2_NUM) {
2579 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2582 err = test_event_dev_stats_get(evdev, &stats);
2583 if (stats.port_inflight[p2] != QID2_NUM) {
2584 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2588 for (i = 0; i < QID2_NUM; i++) {
2589 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2592 printf("%d: %s rte enqueue of inf release failed\n",
2593 __LINE__, __func__);
2599 * As the scheduler core decrements inflights, it needs to run to
2600 * process packets to act on the drop messages
2602 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2604 err = test_event_dev_stats_get(evdev, &stats);
2605 if (stats.port_inflight[p2] != 0) {
2606 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2613 rte_event_dev_dump(evdev, stdout);
2619 parallel_basic(struct test *t, int check_order)
2621 const uint8_t rx_port = 0;
2622 const uint8_t w1_port = 1;
2623 const uint8_t w3_port = 3;
2624 const uint8_t tx_port = 4;
2627 uint32_t deq_pkts, j;
2628 struct rte_mbuf *mbufs[3];
2629 struct rte_mbuf *mbufs_out[3] = { 0 };
2630 const uint32_t MAGIC_SEQN = 1234;
2632 /* Create instance with 4 ports */
2633 if (init(t, 2, tx_port + 1) < 0 ||
2634 create_ports(t, tx_port + 1) < 0 ||
2635 (check_order ? create_ordered_qids(t, 1) :
2636 create_unordered_qids(t, 1)) < 0 ||
2637 create_directed_qids(t, 1, &tx_port)) {
2638 printf("%d: Error initializing device\n", __LINE__);
2644 * We need three ports, all mapped to the same ordered qid0. Then we'll
2645 * take a packet out to each port, re-enqueue in reverse order,
2646 * then make sure the reordering has taken place properly when we
2647 * dequeue from the tx_port.
2649 * Simplified test setup diagram:
2653 * qid0 - w2_port - qid1
2657 /* CQ mapping to QID for LB ports (directed mapped on create) */
2658 for (i = w1_port; i <= w3_port; i++) {
2659 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2662 printf("%d: error mapping lb qid\n", __LINE__);
2668 if (rte_event_dev_start(evdev) < 0) {
2669 printf("%d: Error with start call\n", __LINE__);
2673 /* Enqueue 3 packets to the rx port */
2674 for (i = 0; i < 3; i++) {
2675 struct rte_event ev;
2676 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2678 printf("%d: gen of pkt failed\n", __LINE__);
2682 ev.queue_id = t->qid[0];
2683 ev.op = RTE_EVENT_OP_NEW;
2685 mbufs[i]->seqn = MAGIC_SEQN + i;
2687 /* generate pkt and enqueue */
2688 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2690 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2696 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2698 /* use extra slot to make logic in loops easier */
2699 struct rte_event deq_ev[w3_port + 1];
2701 /* Dequeue the 3 packets, one from each worker port */
2702 for (i = w1_port; i <= w3_port; i++) {
2703 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2705 if (deq_pkts != 1) {
2706 printf("%d: Failed to deq\n", __LINE__);
2707 rte_event_dev_dump(evdev, stdout);
2712 /* Enqueue each packet in reverse order, flushing after each one */
2713 for (i = w3_port; i >= w1_port; i--) {
2715 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2716 deq_ev[i].queue_id = t->qid[1];
2717 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2719 printf("%d: Failed to enqueue\n", __LINE__);
2723 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2725 /* dequeue from the tx ports, we should get 3 packets */
2726 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2729 /* Check to see if we've got all 3 packets */
2730 if (deq_pkts != 3) {
2731 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2732 __LINE__, deq_pkts, tx_port);
2733 rte_event_dev_dump(evdev, stdout);
2737 /* Check to see if the sequence numbers are in expected order */
2739 for (j = 0 ; j < deq_pkts ; j++) {
2740 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2742 "%d: Incorrect sequence number(%d) from port %d\n",
2743 __LINE__, mbufs_out[j]->seqn, tx_port);
2749 /* Destroy the instance */
2755 ordered_basic(struct test *t)
2757 return parallel_basic(t, 1);
2761 unordered_basic(struct test *t)
2763 return parallel_basic(t, 0);
2767 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2769 const struct rte_event new_ev = {
2770 .op = RTE_EVENT_OP_NEW
2771 /* all other fields zero */
2773 struct rte_event ev = new_ev;
2774 unsigned int rx_port = 0; /* port we get the first flow on */
2775 char rx_port_used_stat[64];
2776 char rx_port_free_stat[64];
2777 char other_port_used_stat[64];
2779 if (init(t, 1, 2) < 0 ||
2780 create_ports(t, 2) < 0 ||
2781 create_atomic_qids(t, 1) < 0) {
2782 printf("%d: Error initializing device\n", __LINE__);
2785 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2786 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2788 printf("%d: Error links queue to ports\n", __LINE__);
2791 if (rte_event_dev_start(evdev) < 0) {
2792 printf("%d: Error with start call\n", __LINE__);
2796 /* send one packet and see where it goes, port 0 or 1 */
2797 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2798 printf("%d: Error doing first enqueue\n", __LINE__);
2801 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2803 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2807 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2808 "port_%u_cq_ring_used", rx_port);
2809 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2810 "port_%u_cq_ring_free", rx_port);
2811 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2812 "port_%u_cq_ring_used", rx_port ^ 1);
2813 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2815 printf("%d: Error, first event not scheduled\n", __LINE__);
2819 /* now fill up the rx port's queue with one flow to cause HOLB */
2822 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2823 printf("%d: Error with enqueue\n", __LINE__);
2826 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2827 } while (rte_event_dev_xstats_by_name_get(evdev,
2828 rx_port_free_stat, NULL) != 0);
2830 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2832 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2833 printf("%d: Error with enqueue\n", __LINE__);
2836 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2838 /* check that the other port still has an empty CQ */
2839 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2841 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2844 /* check IQ now has one packet */
2845 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2847 printf("%d: Error, QID does not have exactly 1 packet\n",
2852 /* send another flow, which should pass the other IQ entry */
2855 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2856 printf("%d: Error with enqueue\n", __LINE__);
2859 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2861 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2863 printf("%d: Error, second flow did not pass out first\n",
2868 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2870 printf("%d: Error, QID does not have exactly 1 packet\n",
2877 rte_event_dev_dump(evdev, stdout);
2883 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2885 *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2889 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2891 const struct rte_event new_ev = {
2892 .op = RTE_EVENT_OP_NEW,
2896 struct rte_event ev = new_ev;
2900 if (init(t, 1, 1) < 0 ||
2901 create_ports(t, 1) < 0 ||
2902 create_atomic_qids(t, 1) < 0) {
2903 printf("%d: Error initializing device\n", __LINE__);
2907 /* Link the queue so *_start() doesn't error out */
2908 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2909 printf("%d: Error linking queue to port\n", __LINE__);
2913 if (rte_event_dev_start(evdev) < 0) {
2914 printf("%d: Error with start call\n", __LINE__);
2918 for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2919 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2920 printf("%d: Error enqueuing events\n", __LINE__);
2925 /* Schedule the events from the port to the IQ. At least one event
2926 * should be remaining in the queue.
2928 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2930 if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2931 printf("%d: Error installing the flush callback\n", __LINE__);
2938 printf("%d: Error executing the flush callback\n", __LINE__);
2942 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2943 printf("%d: Error uninstalling the flush callback\n", __LINE__);
2949 rte_event_dev_dump(evdev, stdout);
2955 worker_loopback_worker_fn(void *arg)
2957 struct test *t = arg;
2958 uint8_t port = t->port[1];
2963 * Takes packets from the input port and then loops them back through
2964 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2965 * so each packet goes through 8*16 = 128 times.
2967 printf("%d: \tWorker function started\n", __LINE__);
2968 while (count < NUM_PACKETS) {
2969 #define BURST_SIZE 32
2970 struct rte_event ev[BURST_SIZE];
2971 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2978 for (i = 0; i < nb_rx; i++) {
2980 if (ev[i].queue_id != 8) {
2981 ev[i].op = RTE_EVENT_OP_FORWARD;
2982 enqd = rte_event_enqueue_burst(evdev, port,
2985 printf("%d: Can't enqueue FWD!!\n",
2993 ev[i].mbuf->udata64++;
2994 if (ev[i].mbuf->udata64 != 16) {
2995 ev[i].op = RTE_EVENT_OP_FORWARD;
2996 enqd = rte_event_enqueue_burst(evdev, port,
2999 printf("%d: Can't enqueue FWD!!\n",
3005 /* we have hit 16 iterations through system - drop */
3006 rte_pktmbuf_free(ev[i].mbuf);
3008 ev[i].op = RTE_EVENT_OP_RELEASE;
3009 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3011 printf("%d drop enqueue failed\n", __LINE__);
3021 worker_loopback_producer_fn(void *arg)
3023 struct test *t = arg;
3024 uint8_t port = t->port[0];
3027 printf("%d: \tProducer function started\n", __LINE__);
3028 while (count < NUM_PACKETS) {
3029 struct rte_mbuf *m = 0;
3031 m = rte_pktmbuf_alloc(t->mbuf_pool);
3032 } while (m == NULL);
3036 struct rte_event ev = {
3037 .op = RTE_EVENT_OP_NEW,
3038 .queue_id = t->qid[0],
3039 .flow_id = (uintptr_t)m & 0xFFFF,
3043 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3044 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3056 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3058 /* use a single producer core, and a worker core to see what happens
3059 * if the worker loops packets back multiple times
3061 struct test_event_dev_stats stats;
3062 uint64_t print_cycles = 0, cycles = 0;
3063 uint64_t tx_pkts = 0;
3065 int w_lcore, p_lcore;
3067 if (init(t, 8, 2) < 0 ||
3068 create_atomic_qids(t, 8) < 0) {
3069 printf("%d: Error initializing device\n", __LINE__);
3073 /* RX with low max events */
3074 static struct rte_event_port_conf conf = {
3075 .dequeue_depth = 32,
3076 .enqueue_depth = 64,
3078 /* beware: this cannot be initialized in the static above as it would
3079 * only be initialized once - and this needs to be set for multiple runs
3081 conf.new_event_threshold = 512;
3082 conf.disable_implicit_release = disable_implicit_release;
3084 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3085 printf("Error setting up RX port\n");
3089 /* TX with higher max events */
3090 conf.new_event_threshold = 4096;
3091 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3092 printf("Error setting up TX port\n");
3097 /* CQ mapping to QID */
3098 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3099 if (err != 8) { /* should have mapped all queues*/
3100 printf("%d: error mapping port 2 to all qids\n", __LINE__);
3104 if (rte_event_dev_start(evdev) < 0) {
3105 printf("%d: Error with start call\n", __LINE__);
3109 p_lcore = rte_get_next_lcore(
3110 /* start core */ -1,
3111 /* skip master */ 1,
3113 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3115 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3116 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3118 print_cycles = cycles = rte_get_timer_cycles();
3119 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3120 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3122 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3124 uint64_t new_cycles = rte_get_timer_cycles();
3126 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3127 test_event_dev_stats_get(evdev, &stats);
3129 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3130 __LINE__, stats.rx_pkts, stats.tx_pkts);
3132 print_cycles = new_cycles;
3134 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3135 test_event_dev_stats_get(evdev, &stats);
3136 if (stats.tx_pkts == tx_pkts) {
3137 rte_event_dev_dump(evdev, stdout);
3138 printf("Dumping xstats:\n");
3141 "%d: No schedules for seconds, deadlock\n",
3145 tx_pkts = stats.tx_pkts;
3146 cycles = new_cycles;
3149 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3150 /* ensure all completions are flushed */
3152 rte_eal_mp_wait_lcore();
3158 static struct rte_mempool *eventdev_func_mempool;
3161 test_sw_eventdev(void)
3166 t = malloc(sizeof(struct test));
3169 /* manually initialize the op, older gcc's complain on static
3170 * initialization of struct elements that are a bitfield.
3172 release_ev.op = RTE_EVENT_OP_RELEASE;
3174 const char *eventdev_name = "event_sw";
3175 evdev = rte_event_dev_get_dev_id(eventdev_name);
3177 printf("%d: Eventdev %s not found - creating.\n",
3178 __LINE__, eventdev_name);
3179 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3180 printf("Error creating eventdev\n");
3183 evdev = rte_event_dev_get_dev_id(eventdev_name);
3185 printf("Error finding newly created eventdev\n");
3190 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3191 printf("Failed to get service ID for software event dev\n");
3195 rte_service_runstate_set(t->service_id, 1);
3196 rte_service_set_runstate_mapped_check(t->service_id, 0);
3198 /* Only create mbuf pool once, reuse for each test run */
3199 if (!eventdev_func_mempool) {
3200 eventdev_func_mempool = rte_pktmbuf_pool_create(
3201 "EVENTDEV_SW_SA_MBUF_POOL",
3202 (1<<12), /* 4k buffers */
3203 32 /*MBUF_CACHE_SIZE*/,
3205 512, /* use very small mbufs */
3207 if (!eventdev_func_mempool) {
3208 printf("ERROR creating mempool\n");
3212 t->mbuf_pool = eventdev_func_mempool;
3213 printf("*** Running Single Directed Packet test...\n");
3214 ret = test_single_directed_packet(t);
3216 printf("ERROR - Single Directed Packet test FAILED.\n");
3219 printf("*** Running Directed Forward Credit test...\n");
3220 ret = test_directed_forward_credits(t);
3222 printf("ERROR - Directed Forward Credit test FAILED.\n");
3225 printf("*** Running Single Load Balanced Packet test...\n");
3226 ret = single_packet(t);
3228 printf("ERROR - Single Packet test FAILED.\n");
3231 printf("*** Running Unordered Basic test...\n");
3232 ret = unordered_basic(t);
3234 printf("ERROR - Unordered Basic test FAILED.\n");
3237 printf("*** Running Ordered Basic test...\n");
3238 ret = ordered_basic(t);
3240 printf("ERROR - Ordered Basic test FAILED.\n");
3243 printf("*** Running Burst Packets test...\n");
3244 ret = burst_packets(t);
3246 printf("ERROR - Burst Packets test FAILED.\n");
3249 printf("*** Running Load Balancing test...\n");
3250 ret = load_balancing(t);
3252 printf("ERROR - Load Balancing test FAILED.\n");
3255 printf("*** Running Prioritized Directed test...\n");
3256 ret = test_priority_directed(t);
3258 printf("ERROR - Prioritized Directed test FAILED.\n");
3261 printf("*** Running Prioritized Atomic test...\n");
3262 ret = test_priority_atomic(t);
3264 printf("ERROR - Prioritized Atomic test FAILED.\n");
3268 printf("*** Running Prioritized Ordered test...\n");
3269 ret = test_priority_ordered(t);
3271 printf("ERROR - Prioritized Ordered test FAILED.\n");
3274 printf("*** Running Prioritized Unordered test...\n");
3275 ret = test_priority_unordered(t);
3277 printf("ERROR - Prioritized Unordered test FAILED.\n");
3280 printf("*** Running Invalid QID test...\n");
3281 ret = invalid_qid(t);
3283 printf("ERROR - Invalid QID test FAILED.\n");
3286 printf("*** Running Load Balancing History test...\n");
3287 ret = load_balancing_history(t);
3289 printf("ERROR - Load Balancing History test FAILED.\n");
3292 printf("*** Running Inflight Count test...\n");
3293 ret = inflight_counts(t);
3295 printf("ERROR - Inflight Count test FAILED.\n");
3298 printf("*** Running Abuse Inflights test...\n");
3299 ret = abuse_inflights(t);
3301 printf("ERROR - Abuse Inflights test FAILED.\n");
3304 printf("*** Running XStats test...\n");
3305 ret = xstats_tests(t);
3307 printf("ERROR - XStats test FAILED.\n");
3310 printf("*** Running XStats ID Reset test...\n");
3311 ret = xstats_id_reset_tests(t);
3313 printf("ERROR - XStats ID Reset test FAILED.\n");
3316 printf("*** Running XStats Brute Force test...\n");
3317 ret = xstats_brute_force(t);
3319 printf("ERROR - XStats Brute Force test FAILED.\n");
3322 printf("*** Running XStats ID Abuse test...\n");
3323 ret = xstats_id_abuse_tests(t);
3325 printf("ERROR - XStats ID Abuse test FAILED.\n");
3328 printf("*** Running QID Priority test...\n");
3329 ret = qid_priorities(t);
3331 printf("ERROR - QID Priority test FAILED.\n");
3334 printf("*** Running Unlink-in-progress test...\n");
3335 ret = unlink_in_progress(t);
3337 printf("ERROR - Unlink in progress test FAILED.\n");
3340 printf("*** Running Ordered Reconfigure test...\n");
3341 ret = ordered_reconfigure(t);
3343 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3346 printf("*** Running Port LB Single Reconfig test...\n");
3347 ret = port_single_lb_reconfig(t);
3349 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3352 printf("*** Running Port Reconfig Credits test...\n");
3353 ret = port_reconfig_credits(t);
3355 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3358 printf("*** Running Head-of-line-blocking test...\n");
3361 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3364 printf("*** Running Stop Flush test...\n");
3365 ret = dev_stop_flush(t);
3367 printf("ERROR - Stop Flush test FAILED.\n");
3370 if (rte_lcore_count() >= 3) {
3371 printf("*** Running Worker loopback test...\n");
3372 ret = worker_loopback(t, 0);
3374 printf("ERROR - Worker loopback test FAILED.\n");
3378 printf("*** Running Worker loopback test (implicit release disabled)...\n");
3379 ret = worker_loopback(t, 1);
3381 printf("ERROR - Worker loopback test FAILED.\n");
3385 printf("### Not enough cores for worker loopback tests.\n");
3386 printf("### Need at least 3 cores for the tests.\n");
3390 * Free test instance, leaving mempool initialized, and a pointer to it
3391 * in static eventdev_func_mempool, as it is re-used on re-runs
3395 printf("SW Eventdev Selftest Successful.\n");
3399 printf("SW Eventdev Selftest Failed.\n");