1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_launch.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
30 #define NUM_PACKETS (1<<18)
35 struct rte_mempool *mbuf_pool;
36 uint8_t port[MAX_PORTS];
37 uint8_t qid[MAX_QIDS];
42 static struct rte_event release_ev;
44 static inline struct rte_mbuf *
45 rte_gen_arp(int portid, struct rte_mempool *mp)
49 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
51 static const uint8_t arp_request[] = {
52 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
53 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
54 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
55 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
56 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
57 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00
62 int pkt_len = sizeof(arp_request) - 1;
64 m = rte_pktmbuf_alloc(mp);
68 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
69 arp_request, pkt_len);
70 rte_pktmbuf_pkt_len(m) = pkt_len;
71 rte_pktmbuf_data_len(m) = pkt_len;
81 const uint32_t XSTATS_MAX = 1024;
83 uint32_t ids[XSTATS_MAX];
84 uint64_t values[XSTATS_MAX];
85 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
87 for (i = 0; i < XSTATS_MAX; i++)
90 /* Device names / values */
91 int ret = rte_event_dev_xstats_names_get(evdev,
92 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
93 xstats_names, ids, XSTATS_MAX);
95 printf("%d: xstats names get() returned error\n",
99 ret = rte_event_dev_xstats_get(evdev,
100 RTE_EVENT_DEV_XSTATS_DEVICE,
101 0, ids, values, ret);
102 if (ret > (signed int)XSTATS_MAX)
103 printf("%s %d: more xstats available than space\n",
105 for (i = 0; (signed int)i < ret; i++) {
106 printf("%d : %s : %"PRIu64"\n",
107 i, xstats_names[i].name, values[i]);
110 /* Port names / values */
111 ret = rte_event_dev_xstats_names_get(evdev,
112 RTE_EVENT_DEV_XSTATS_PORT, 0,
113 xstats_names, ids, XSTATS_MAX);
114 ret = rte_event_dev_xstats_get(evdev,
115 RTE_EVENT_DEV_XSTATS_PORT, 1,
117 if (ret > (signed int)XSTATS_MAX)
118 printf("%s %d: more xstats available than space\n",
120 for (i = 0; (signed int)i < ret; i++) {
121 printf("%d : %s : %"PRIu64"\n",
122 i, xstats_names[i].name, values[i]);
125 /* Queue names / values */
126 ret = rte_event_dev_xstats_names_get(evdev,
127 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
128 xstats_names, ids, XSTATS_MAX);
129 ret = rte_event_dev_xstats_get(evdev,
130 RTE_EVENT_DEV_XSTATS_QUEUE,
131 1, ids, values, ret);
132 if (ret > (signed int)XSTATS_MAX)
133 printf("%s %d: more xstats available than space\n",
135 for (i = 0; (signed int)i < ret; i++) {
136 printf("%d : %s : %"PRIu64"\n",
137 i, xstats_names[i].name, values[i]);
141 /* initialization and config */
143 init(struct test *t, int nb_queues, int nb_ports)
145 struct rte_event_dev_config config = {
146 .nb_event_queues = nb_queues,
147 .nb_event_ports = nb_ports,
148 .nb_event_queue_flows = 1024,
149 .nb_events_limit = 4096,
150 .nb_event_port_dequeue_depth = 128,
151 .nb_event_port_enqueue_depth = 128,
155 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
157 memset(t, 0, sizeof(*t));
160 ret = rte_event_dev_configure(evdev, &config);
162 printf("%d: Error configuring device\n", __LINE__);
167 create_ports(struct test *t, int num_ports)
170 static const struct rte_event_port_conf conf = {
171 .new_event_threshold = 1024,
175 if (num_ports > MAX_PORTS)
178 for (i = 0; i < num_ports; i++) {
179 if (rte_event_port_setup(evdev, i, &conf) < 0) {
180 printf("Error setting up port %d\n", i);
190 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
195 const struct rte_event_queue_conf conf = {
196 .schedule_type = flags,
197 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
198 .nb_atomic_flows = 1024,
199 .nb_atomic_order_sequences = 1024,
202 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
203 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
204 printf("%d: error creating qid %d\n", __LINE__, i);
209 t->nb_qids += num_qids;
210 if (t->nb_qids > MAX_QIDS)
217 create_atomic_qids(struct test *t, int num_qids)
219 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
223 create_ordered_qids(struct test *t, int num_qids)
225 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
230 create_unordered_qids(struct test *t, int num_qids)
232 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
236 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
241 static const struct rte_event_queue_conf conf = {
242 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
243 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
246 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
247 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
248 printf("%d: error creating qid %d\n", __LINE__, i);
253 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
254 &t->qid[i], NULL, 1) != 1) {
255 printf("%d: error creating link for qid %d\n",
260 t->nb_qids += num_qids;
261 if (t->nb_qids > MAX_QIDS)
269 cleanup(struct test *t __rte_unused)
271 rte_event_dev_stop(evdev);
272 rte_event_dev_close(evdev);
276 struct test_event_dev_stats {
277 uint64_t rx_pkts; /**< Total packets received */
278 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
279 uint64_t tx_pkts; /**< Total packets transmitted */
281 /** Packets received on this port */
282 uint64_t port_rx_pkts[MAX_PORTS];
283 /** Packets dropped on this port */
284 uint64_t port_rx_dropped[MAX_PORTS];
285 /** Packets inflight on this port */
286 uint64_t port_inflight[MAX_PORTS];
287 /** Packets transmitted on this port */
288 uint64_t port_tx_pkts[MAX_PORTS];
289 /** Packets received on this qid */
290 uint64_t qid_rx_pkts[MAX_QIDS];
291 /** Packets dropped on this qid */
292 uint64_t qid_rx_dropped[MAX_QIDS];
293 /** Packets transmitted on this qid */
294 uint64_t qid_tx_pkts[MAX_QIDS];
298 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
301 static uint32_t total_ids[3]; /* rx, tx and drop */
302 static uint32_t port_rx_pkts_ids[MAX_PORTS];
303 static uint32_t port_rx_dropped_ids[MAX_PORTS];
304 static uint32_t port_inflight_ids[MAX_PORTS];
305 static uint32_t port_tx_pkts_ids[MAX_PORTS];
306 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
307 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
308 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
311 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
312 "dev_rx", &total_ids[0]);
313 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
314 "dev_drop", &total_ids[1]);
315 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
316 "dev_tx", &total_ids[2]);
317 for (i = 0; i < MAX_PORTS; i++) {
319 snprintf(name, sizeof(name), "port_%u_rx", i);
320 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
321 dev_id, name, &port_rx_pkts_ids[i]);
322 snprintf(name, sizeof(name), "port_%u_drop", i);
323 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
324 dev_id, name, &port_rx_dropped_ids[i]);
325 snprintf(name, sizeof(name), "port_%u_inflight", i);
326 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
327 dev_id, name, &port_inflight_ids[i]);
328 snprintf(name, sizeof(name), "port_%u_tx", i);
329 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
330 dev_id, name, &port_tx_pkts_ids[i]);
332 for (i = 0; i < MAX_QIDS; i++) {
334 snprintf(name, sizeof(name), "qid_%u_rx", i);
335 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
336 dev_id, name, &qid_rx_pkts_ids[i]);
337 snprintf(name, sizeof(name), "qid_%u_drop", i);
338 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
339 dev_id, name, &qid_rx_dropped_ids[i]);
340 snprintf(name, sizeof(name), "qid_%u_tx", i);
341 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
342 dev_id, name, &qid_tx_pkts_ids[i]);
348 /* run_prio_packet_test
349 * This performs a basic packet priority check on the test instance passed in.
350 * It is factored out of the main priority tests as the same tests must be
351 * performed to ensure prioritization of each type of QID.
354 * - An initialized test structure, including mempool
355 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
356 * - t->qid[0] is the QID to be tested
357 * - if LB QID, the CQ must be mapped to the QID.
360 run_prio_packet_test(struct test *t)
363 const uint32_t MAGIC_SEQN[] = {4711, 1234};
364 const uint32_t PRIORITY[] = {
365 RTE_EVENT_DEV_PRIORITY_NORMAL,
366 RTE_EVENT_DEV_PRIORITY_HIGHEST
369 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
370 /* generate pkt and enqueue */
372 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
374 printf("%d: gen of pkt failed\n", __LINE__);
377 arp->seqn = MAGIC_SEQN[i];
379 ev = (struct rte_event){
380 .priority = PRIORITY[i],
381 .op = RTE_EVENT_OP_NEW,
382 .queue_id = t->qid[0],
385 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
387 printf("%d: error failed to enqueue\n", __LINE__);
392 rte_service_run_iter_on_app_lcore(t->service_id, 1);
394 struct test_event_dev_stats stats;
395 err = test_event_dev_stats_get(evdev, &stats);
397 printf("%d: error failed to get stats\n", __LINE__);
401 if (stats.port_rx_pkts[t->port[0]] != 2) {
402 printf("%d: error stats incorrect for directed port\n",
404 rte_event_dev_dump(evdev, stdout);
408 struct rte_event ev, ev2;
410 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
412 printf("%d: error failed to deq\n", __LINE__);
413 rte_event_dev_dump(evdev, stdout);
416 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
417 printf("%d: first packet out not highest priority\n",
419 rte_event_dev_dump(evdev, stdout);
422 rte_pktmbuf_free(ev.mbuf);
424 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
426 printf("%d: error failed to deq\n", __LINE__);
427 rte_event_dev_dump(evdev, stdout);
430 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
431 printf("%d: second packet out not lower priority\n",
433 rte_event_dev_dump(evdev, stdout);
436 rte_pktmbuf_free(ev2.mbuf);
443 test_single_directed_packet(struct test *t)
445 const int rx_enq = 0;
446 const int wrk_enq = 2;
449 /* Create instance with 3 directed QIDs going to 3 ports */
450 if (init(t, 3, 3) < 0 ||
451 create_ports(t, 3) < 0 ||
452 create_directed_qids(t, 3, t->port) < 0)
455 if (rte_event_dev_start(evdev) < 0) {
456 printf("%d: Error with start call\n", __LINE__);
460 /************** FORWARD ****************/
461 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
462 struct rte_event ev = {
463 .op = RTE_EVENT_OP_NEW,
469 printf("%d: gen of pkt failed\n", __LINE__);
473 const uint32_t MAGIC_SEQN = 4711;
474 arp->seqn = MAGIC_SEQN;
476 /* generate pkt and enqueue */
477 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
479 printf("%d: error failed to enqueue\n", __LINE__);
483 /* Run schedule() as dir packets may need to be re-ordered */
484 rte_service_run_iter_on_app_lcore(t->service_id, 1);
486 struct test_event_dev_stats stats;
487 err = test_event_dev_stats_get(evdev, &stats);
489 printf("%d: error failed to get stats\n", __LINE__);
493 if (stats.port_rx_pkts[rx_enq] != 1) {
494 printf("%d: error stats incorrect for directed port\n",
500 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
502 printf("%d: error failed to deq\n", __LINE__);
506 err = test_event_dev_stats_get(evdev, &stats);
507 if (stats.port_rx_pkts[wrk_enq] != 0 &&
508 stats.port_rx_pkts[wrk_enq] != 1) {
509 printf("%d: error directed stats post-dequeue\n", __LINE__);
513 if (ev.mbuf->seqn != MAGIC_SEQN) {
514 printf("%d: error magic sequence number not dequeued\n",
519 rte_pktmbuf_free(ev.mbuf);
525 test_directed_forward_credits(struct test *t)
530 if (init(t, 1, 1) < 0 ||
531 create_ports(t, 1) < 0 ||
532 create_directed_qids(t, 1, t->port) < 0)
535 if (rte_event_dev_start(evdev) < 0) {
536 printf("%d: Error with start call\n", __LINE__);
540 struct rte_event ev = {
541 .op = RTE_EVENT_OP_NEW,
545 for (i = 0; i < 1000; i++) {
546 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
548 printf("%d: error failed to enqueue\n", __LINE__);
551 rte_service_run_iter_on_app_lcore(t->service_id, 1);
554 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
556 printf("%d: error failed to deq\n", __LINE__);
560 /* re-write event to be a forward, and continue looping it */
561 ev.op = RTE_EVENT_OP_FORWARD;
570 test_priority_directed(struct test *t)
572 if (init(t, 1, 1) < 0 ||
573 create_ports(t, 1) < 0 ||
574 create_directed_qids(t, 1, t->port) < 0) {
575 printf("%d: Error initializing device\n", __LINE__);
579 if (rte_event_dev_start(evdev) < 0) {
580 printf("%d: Error with start call\n", __LINE__);
584 return run_prio_packet_test(t);
588 test_priority_atomic(struct test *t)
590 if (init(t, 1, 1) < 0 ||
591 create_ports(t, 1) < 0 ||
592 create_atomic_qids(t, 1) < 0) {
593 printf("%d: Error initializing device\n", __LINE__);
598 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
599 printf("%d: error mapping qid to port\n", __LINE__);
602 if (rte_event_dev_start(evdev) < 0) {
603 printf("%d: Error with start call\n", __LINE__);
607 return run_prio_packet_test(t);
611 test_priority_ordered(struct test *t)
613 if (init(t, 1, 1) < 0 ||
614 create_ports(t, 1) < 0 ||
615 create_ordered_qids(t, 1) < 0) {
616 printf("%d: Error initializing device\n", __LINE__);
621 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
622 printf("%d: error mapping qid to port\n", __LINE__);
625 if (rte_event_dev_start(evdev) < 0) {
626 printf("%d: Error with start call\n", __LINE__);
630 return run_prio_packet_test(t);
634 test_priority_unordered(struct test *t)
636 if (init(t, 1, 1) < 0 ||
637 create_ports(t, 1) < 0 ||
638 create_unordered_qids(t, 1) < 0) {
639 printf("%d: Error initializing device\n", __LINE__);
644 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
645 printf("%d: error mapping qid to port\n", __LINE__);
648 if (rte_event_dev_start(evdev) < 0) {
649 printf("%d: Error with start call\n", __LINE__);
653 return run_prio_packet_test(t);
657 burst_packets(struct test *t)
659 /************** CONFIG ****************/
664 /* Create instance with 2 ports and 2 queues */
665 if (init(t, 2, 2) < 0 ||
666 create_ports(t, 2) < 0 ||
667 create_atomic_qids(t, 2) < 0) {
668 printf("%d: Error initializing device\n", __LINE__);
672 /* CQ mapping to QID */
673 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
675 printf("%d: error mapping lb qid0\n", __LINE__);
678 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
680 printf("%d: error mapping lb qid1\n", __LINE__);
684 if (rte_event_dev_start(evdev) < 0) {
685 printf("%d: Error with start call\n", __LINE__);
689 /************** FORWARD ****************/
690 const uint32_t rx_port = 0;
691 const uint32_t NUM_PKTS = 2;
693 for (i = 0; i < NUM_PKTS; i++) {
694 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
696 printf("%d: error generating pkt\n", __LINE__);
700 struct rte_event ev = {
701 .op = RTE_EVENT_OP_NEW,
706 /* generate pkt and enqueue */
707 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
709 printf("%d: Failed to enqueue\n", __LINE__);
713 rte_service_run_iter_on_app_lcore(t->service_id, 1);
715 /* Check stats for all NUM_PKTS arrived to sched core */
716 struct test_event_dev_stats stats;
718 err = test_event_dev_stats_get(evdev, &stats);
720 printf("%d: failed to get stats\n", __LINE__);
723 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
724 printf("%d: Sched core didn't receive all %d pkts\n",
726 rte_event_dev_dump(evdev, stdout);
734 /******** DEQ QID 1 *******/
737 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
739 rte_pktmbuf_free(ev.mbuf);
742 if (deq_pkts != NUM_PKTS/2) {
743 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
748 /******** DEQ QID 2 *******/
752 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
754 rte_pktmbuf_free(ev.mbuf);
756 if (deq_pkts != NUM_PKTS/2) {
757 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
767 abuse_inflights(struct test *t)
769 const int rx_enq = 0;
770 const int wrk_enq = 2;
773 /* Create instance with 4 ports */
774 if (init(t, 1, 4) < 0 ||
775 create_ports(t, 4) < 0 ||
776 create_atomic_qids(t, 1) < 0) {
777 printf("%d: Error initializing device\n", __LINE__);
781 /* CQ mapping to QID */
782 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
784 printf("%d: error mapping lb qid\n", __LINE__);
789 if (rte_event_dev_start(evdev) < 0) {
790 printf("%d: Error with start call\n", __LINE__);
794 /* Enqueue op only */
795 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
797 printf("%d: Failed to enqueue\n", __LINE__);
802 rte_service_run_iter_on_app_lcore(t->service_id, 1);
804 struct test_event_dev_stats stats;
806 err = test_event_dev_stats_get(evdev, &stats);
808 printf("%d: failed to get stats\n", __LINE__);
812 if (stats.rx_pkts != 0 ||
813 stats.tx_pkts != 0 ||
814 stats.port_inflight[wrk_enq] != 0) {
815 printf("%d: Sched core didn't handle pkt as expected\n",
825 xstats_tests(struct test *t)
827 const int wrk_enq = 2;
830 /* Create instance with 4 ports */
831 if (init(t, 1, 4) < 0 ||
832 create_ports(t, 4) < 0 ||
833 create_atomic_qids(t, 1) < 0) {
834 printf("%d: Error initializing device\n", __LINE__);
838 /* CQ mapping to QID */
839 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
841 printf("%d: error mapping lb qid\n", __LINE__);
846 if (rte_event_dev_start(evdev) < 0) {
847 printf("%d: Error with start call\n", __LINE__);
851 const uint32_t XSTATS_MAX = 1024;
854 uint32_t ids[XSTATS_MAX];
855 uint64_t values[XSTATS_MAX];
856 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
858 for (i = 0; i < XSTATS_MAX; i++)
861 /* Device names / values */
862 int ret = rte_event_dev_xstats_names_get(evdev,
863 RTE_EVENT_DEV_XSTATS_DEVICE,
864 0, xstats_names, ids, XSTATS_MAX);
866 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
869 ret = rte_event_dev_xstats_get(evdev,
870 RTE_EVENT_DEV_XSTATS_DEVICE,
871 0, ids, values, ret);
873 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
877 /* Port names / values */
878 ret = rte_event_dev_xstats_names_get(evdev,
879 RTE_EVENT_DEV_XSTATS_PORT, 0,
880 xstats_names, ids, XSTATS_MAX);
882 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
885 ret = rte_event_dev_xstats_get(evdev,
886 RTE_EVENT_DEV_XSTATS_PORT, 0,
889 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
893 /* Queue names / values */
894 ret = rte_event_dev_xstats_names_get(evdev,
895 RTE_EVENT_DEV_XSTATS_QUEUE,
896 0, xstats_names, ids, XSTATS_MAX);
898 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
902 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
903 ret = rte_event_dev_xstats_get(evdev,
904 RTE_EVENT_DEV_XSTATS_QUEUE,
905 1, ids, values, ret);
906 if (ret != -EINVAL) {
907 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
911 ret = rte_event_dev_xstats_get(evdev,
912 RTE_EVENT_DEV_XSTATS_QUEUE,
913 0, ids, values, ret);
915 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
919 /* enqueue packets to check values */
920 for (i = 0; i < 3; i++) {
922 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
924 printf("%d: gen of pkt failed\n", __LINE__);
927 ev.queue_id = t->qid[i];
928 ev.op = RTE_EVENT_OP_NEW;
933 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
935 printf("%d: Failed to enqueue\n", __LINE__);
940 rte_service_run_iter_on_app_lcore(t->service_id, 1);
942 /* Device names / values */
943 int num_stats = rte_event_dev_xstats_names_get(evdev,
944 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
945 xstats_names, ids, XSTATS_MAX);
948 ret = rte_event_dev_xstats_get(evdev,
949 RTE_EVENT_DEV_XSTATS_DEVICE,
950 0, ids, values, num_stats);
951 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
952 for (i = 0; (signed int)i < ret; i++) {
953 if (expected[i] != values[i]) {
955 "%d Error xstat %d (id %d) %s : %"PRIu64
956 ", expect %"PRIu64"\n",
957 __LINE__, i, ids[i], xstats_names[i].name,
958 values[i], expected[i]);
963 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
966 /* ensure reset statistics are zero-ed */
967 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
968 ret = rte_event_dev_xstats_get(evdev,
969 RTE_EVENT_DEV_XSTATS_DEVICE,
970 0, ids, values, num_stats);
971 for (i = 0; (signed int)i < ret; i++) {
972 if (expected_zero[i] != values[i]) {
974 "%d Error, xstat %d (id %d) %s : %"PRIu64
975 ", expect %"PRIu64"\n",
976 __LINE__, i, ids[i], xstats_names[i].name,
977 values[i], expected_zero[i]);
982 /* port reset checks */
983 num_stats = rte_event_dev_xstats_names_get(evdev,
984 RTE_EVENT_DEV_XSTATS_PORT, 0,
985 xstats_names, ids, XSTATS_MAX);
988 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
989 0, ids, values, num_stats);
991 static const uint64_t port_expected[] = {
996 0 /* avg pkt cycles */,
998 0 /* rx ring used */,
999 4096 /* rx ring free */,
1000 0 /* cq ring used */,
1001 32 /* cq ring free */,
1002 0 /* dequeue calls */,
1003 /* 10 dequeue burst buckets */
1007 if (ret != RTE_DIM(port_expected)) {
1009 "%s %d: wrong number of port stats (%d), expected %zu\n",
1010 __func__, __LINE__, ret, RTE_DIM(port_expected));
1013 for (i = 0; (signed int)i < ret; i++) {
1014 if (port_expected[i] != values[i]) {
1016 "%s : %d: Error stat %s is %"PRIu64
1017 ", expected %"PRIu64"\n",
1018 __func__, __LINE__, xstats_names[i].name,
1019 values[i], port_expected[i]);
1024 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1027 /* ensure reset statistics are zero-ed */
1028 static const uint64_t port_expected_zero[] = {
1033 0 /* avg pkt cycles */,
1035 0 /* rx ring used */,
1036 4096 /* rx ring free */,
1037 0 /* cq ring used */,
1038 32 /* cq ring free */,
1039 0 /* dequeue calls */,
1040 /* 10 dequeue burst buckets */
1044 ret = rte_event_dev_xstats_get(evdev,
1045 RTE_EVENT_DEV_XSTATS_PORT,
1046 0, ids, values, num_stats);
1047 for (i = 0; (signed int)i < ret; i++) {
1048 if (port_expected_zero[i] != values[i]) {
1050 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1051 ", expect %"PRIu64"\n",
1052 __LINE__, i, ids[i], xstats_names[i].name,
1053 values[i], port_expected_zero[i]);
1058 /* QUEUE STATS TESTS */
1059 num_stats = rte_event_dev_xstats_names_get(evdev,
1060 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1061 xstats_names, ids, XSTATS_MAX);
1062 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1063 0, ids, values, num_stats);
1065 printf("xstats get returned %d\n", ret);
1068 if ((unsigned int)ret > XSTATS_MAX)
1069 printf("%s %d: more xstats available than space\n",
1070 __func__, __LINE__);
1072 static const uint64_t queue_expected[] = {
1078 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1079 /* QID-to-Port: pinned_flows, packets */
1085 for (i = 0; (signed int)i < ret; i++) {
1086 if (queue_expected[i] != values[i]) {
1088 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1089 ", expect %"PRIu64"\n",
1090 __LINE__, i, ids[i], xstats_names[i].name,
1091 values[i], queue_expected[i]);
1096 /* Reset the queue stats here */
1097 ret = rte_event_dev_xstats_reset(evdev,
1098 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1102 /* Verify that the resetable stats are reset, and others are not */
1103 static const uint64_t queue_expected_zero[] = {
1109 0, 0, 0, 0, /* 4 iq used */
1110 /* QID-to-Port: pinned_flows, packets */
1117 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1118 ids, values, num_stats);
1120 for (i = 0; (signed int)i < ret; i++) {
1121 if (queue_expected_zero[i] != values[i]) {
1123 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1124 ", expect %"PRIu64"\n",
1125 __LINE__, i, ids[i], xstats_names[i].name,
1126 values[i], queue_expected_zero[i]);
1131 printf("%d : %d of values were not as expected above\n",
1140 rte_event_dev_dump(0, stdout);
1147 xstats_id_abuse_tests(struct test *t)
1150 const uint32_t XSTATS_MAX = 1024;
1151 const uint32_t link_port = 2;
1153 uint32_t ids[XSTATS_MAX];
1154 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1156 /* Create instance with 4 ports */
1157 if (init(t, 1, 4) < 0 ||
1158 create_ports(t, 4) < 0 ||
1159 create_atomic_qids(t, 1) < 0) {
1160 printf("%d: Error initializing device\n", __LINE__);
1164 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1166 printf("%d: error mapping lb qid\n", __LINE__);
1170 if (rte_event_dev_start(evdev) < 0) {
1171 printf("%d: Error with start call\n", __LINE__);
1175 /* no test for device, as it ignores the port/q number */
1176 int num_stats = rte_event_dev_xstats_names_get(evdev,
1177 RTE_EVENT_DEV_XSTATS_PORT,
1178 UINT8_MAX-1, xstats_names, ids,
1180 if (num_stats != 0) {
1181 printf("%d: expected %d stats, got return %d\n", __LINE__,
1186 num_stats = rte_event_dev_xstats_names_get(evdev,
1187 RTE_EVENT_DEV_XSTATS_QUEUE,
1188 UINT8_MAX-1, xstats_names, ids,
1190 if (num_stats != 0) {
1191 printf("%d: expected %d stats, got return %d\n", __LINE__,
1204 port_reconfig_credits(struct test *t)
1206 if (init(t, 1, 1) < 0) {
1207 printf("%d: Error initializing device\n", __LINE__);
1212 const uint32_t NUM_ITERS = 32;
1213 for (i = 0; i < NUM_ITERS; i++) {
1214 const struct rte_event_queue_conf conf = {
1215 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1216 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1217 .nb_atomic_flows = 1024,
1218 .nb_atomic_order_sequences = 1024,
1220 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1221 printf("%d: error creating qid\n", __LINE__);
1226 static const struct rte_event_port_conf port_conf = {
1227 .new_event_threshold = 128,
1228 .dequeue_depth = 32,
1229 .enqueue_depth = 64,
1231 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1232 printf("%d Error setting up port\n", __LINE__);
1236 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1238 printf("%d: error mapping lb qid\n", __LINE__);
1242 if (rte_event_dev_start(evdev) < 0) {
1243 printf("%d: Error with start call\n", __LINE__);
1247 const uint32_t NPKTS = 1;
1249 for (j = 0; j < NPKTS; j++) {
1250 struct rte_event ev;
1251 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1253 printf("%d: gen of pkt failed\n", __LINE__);
1256 ev.queue_id = t->qid[0];
1257 ev.op = RTE_EVENT_OP_NEW;
1259 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1261 printf("%d: Failed to enqueue\n", __LINE__);
1262 rte_event_dev_dump(0, stdout);
1267 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1269 struct rte_event ev[NPKTS];
1270 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1273 printf("%d error; no packet dequeued\n", __LINE__);
1275 /* let cleanup below stop the device on last iter */
1276 if (i != NUM_ITERS-1)
1277 rte_event_dev_stop(evdev);
1288 port_single_lb_reconfig(struct test *t)
1290 if (init(t, 2, 2) < 0) {
1291 printf("%d: Error initializing device\n", __LINE__);
1295 static const struct rte_event_queue_conf conf_lb_atomic = {
1296 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1297 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1298 .nb_atomic_flows = 1024,
1299 .nb_atomic_order_sequences = 1024,
1301 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1302 printf("%d: error creating qid\n", __LINE__);
1306 static const struct rte_event_queue_conf conf_single_link = {
1307 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1308 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1310 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1311 printf("%d: error creating qid\n", __LINE__);
1315 struct rte_event_port_conf port_conf = {
1316 .new_event_threshold = 128,
1317 .dequeue_depth = 32,
1318 .enqueue_depth = 64,
1320 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1321 printf("%d Error setting up port\n", __LINE__);
1324 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1325 printf("%d Error setting up port\n", __LINE__);
1329 /* link port to lb queue */
1330 uint8_t queue_id = 0;
1331 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1332 printf("%d: error creating link for qid\n", __LINE__);
1336 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1338 printf("%d: Error unlinking lb port\n", __LINE__);
1343 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1344 printf("%d: error creating link for qid\n", __LINE__);
1349 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1351 printf("%d: error mapping lb qid\n", __LINE__);
1355 if (rte_event_dev_start(evdev) < 0) {
1356 printf("%d: Error with start call\n", __LINE__);
1368 xstats_brute_force(struct test *t)
1371 const uint32_t XSTATS_MAX = 1024;
1372 uint32_t ids[XSTATS_MAX];
1373 uint64_t values[XSTATS_MAX];
1374 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1377 /* Create instance with 4 ports */
1378 if (init(t, 1, 4) < 0 ||
1379 create_ports(t, 4) < 0 ||
1380 create_atomic_qids(t, 1) < 0) {
1381 printf("%d: Error initializing device\n", __LINE__);
1385 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1387 printf("%d: error mapping lb qid\n", __LINE__);
1391 if (rte_event_dev_start(evdev) < 0) {
1392 printf("%d: Error with start call\n", __LINE__);
1396 for (i = 0; i < XSTATS_MAX; i++)
1399 for (i = 0; i < 3; i++) {
1400 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1402 for (j = 0; j < UINT8_MAX; j++) {
1403 rte_event_dev_xstats_names_get(evdev, mode,
1404 j, xstats_names, ids, XSTATS_MAX);
1406 rte_event_dev_xstats_get(evdev, mode, j, ids,
1407 values, XSTATS_MAX);
1419 xstats_id_reset_tests(struct test *t)
1421 const int wrk_enq = 2;
1424 /* Create instance with 4 ports */
1425 if (init(t, 1, 4) < 0 ||
1426 create_ports(t, 4) < 0 ||
1427 create_atomic_qids(t, 1) < 0) {
1428 printf("%d: Error initializing device\n", __LINE__);
1432 /* CQ mapping to QID */
1433 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1435 printf("%d: error mapping lb qid\n", __LINE__);
1439 if (rte_event_dev_start(evdev) < 0) {
1440 printf("%d: Error with start call\n", __LINE__);
1444 #define XSTATS_MAX 1024
1447 uint32_t ids[XSTATS_MAX];
1448 uint64_t values[XSTATS_MAX];
1449 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1451 for (i = 0; i < XSTATS_MAX; i++)
1454 #define NUM_DEV_STATS 6
1455 /* Device names / values */
1456 int num_stats = rte_event_dev_xstats_names_get(evdev,
1457 RTE_EVENT_DEV_XSTATS_DEVICE,
1458 0, xstats_names, ids, XSTATS_MAX);
1459 if (num_stats != NUM_DEV_STATS) {
1460 printf("%d: expected %d stats, got return %d\n", __LINE__,
1461 NUM_DEV_STATS, num_stats);
1464 ret = rte_event_dev_xstats_get(evdev,
1465 RTE_EVENT_DEV_XSTATS_DEVICE,
1466 0, ids, values, num_stats);
1467 if (ret != NUM_DEV_STATS) {
1468 printf("%d: expected %d stats, got return %d\n", __LINE__,
1469 NUM_DEV_STATS, ret);
1474 for (i = 0; i < NPKTS; i++) {
1475 struct rte_event ev;
1476 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1478 printf("%d: gen of pkt failed\n", __LINE__);
1481 ev.queue_id = t->qid[i];
1482 ev.op = RTE_EVENT_OP_NEW;
1486 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1488 printf("%d: Failed to enqueue\n", __LINE__);
1493 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1495 static const char * const dev_names[] = {
1496 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1497 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1499 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1500 for (i = 0; (int)i < ret; i++) {
1502 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1506 printf("%d: %s id incorrect, expected %d got %d\n",
1507 __LINE__, dev_names[i], i, id);
1510 if (val != dev_expected[i]) {
1511 printf("%d: %s value incorrect, expected %"
1512 PRIu64" got %d\n", __LINE__, dev_names[i],
1513 dev_expected[i], id);
1517 int reset_ret = rte_event_dev_xstats_reset(evdev,
1518 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1522 printf("%d: failed to reset successfully\n", __LINE__);
1525 dev_expected[i] = 0;
1526 /* check value again */
1527 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1528 if (val != dev_expected[i]) {
1529 printf("%d: %s value incorrect, expected %"PRIu64
1530 " got %"PRIu64"\n", __LINE__, dev_names[i],
1531 dev_expected[i], val);
1536 /* 48 is stat offset from start of the devices whole xstats.
1537 * This WILL break every time we add a statistic to a port
1538 * or the device, but there is no other way to test
1541 /* num stats for the tested port. CQ size adds more stats to a port */
1542 #define NUM_PORT_STATS 21
1543 /* the port to test. */
1545 num_stats = rte_event_dev_xstats_names_get(evdev,
1546 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1547 xstats_names, ids, XSTATS_MAX);
1548 if (num_stats != NUM_PORT_STATS) {
1549 printf("%d: expected %d stats, got return %d\n",
1550 __LINE__, NUM_PORT_STATS, num_stats);
1553 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1554 ids, values, num_stats);
1556 if (ret != NUM_PORT_STATS) {
1557 printf("%d: expected %d stats, got return %d\n",
1558 __LINE__, NUM_PORT_STATS, ret);
1561 static const char * const port_names[] = {
1566 "port_2_avg_pkt_cycles",
1568 "port_2_rx_ring_used",
1569 "port_2_rx_ring_free",
1570 "port_2_cq_ring_used",
1571 "port_2_cq_ring_free",
1572 "port_2_dequeue_calls",
1573 "port_2_dequeues_returning_0",
1574 "port_2_dequeues_returning_1-4",
1575 "port_2_dequeues_returning_5-8",
1576 "port_2_dequeues_returning_9-12",
1577 "port_2_dequeues_returning_13-16",
1578 "port_2_dequeues_returning_17-20",
1579 "port_2_dequeues_returning_21-24",
1580 "port_2_dequeues_returning_25-28",
1581 "port_2_dequeues_returning_29-32",
1582 "port_2_dequeues_returning_33-36",
1584 uint64_t port_expected[] = {
1588 NPKTS, /* inflight */
1589 0, /* avg pkt cycles */
1591 0, /* rx ring used */
1592 4096, /* rx ring free */
1593 NPKTS, /* cq ring used */
1594 25, /* cq ring free */
1595 0, /* dequeue zero calls */
1596 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1599 uint64_t port_expected_zero[] = {
1603 NPKTS, /* inflight */
1604 0, /* avg pkt cycles */
1606 0, /* rx ring used */
1607 4096, /* rx ring free */
1608 NPKTS, /* cq ring used */
1609 25, /* cq ring free */
1610 0, /* dequeue zero calls */
1611 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1614 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1615 RTE_DIM(port_names) != NUM_PORT_STATS) {
1616 printf("%d: port array of wrong size\n", __LINE__);
1621 for (i = 0; (int)i < ret; i++) {
1623 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1626 if (id != i + PORT_OFF) {
1627 printf("%d: %s id incorrect, expected %d got %d\n",
1628 __LINE__, port_names[i], i+PORT_OFF,
1632 if (val != port_expected[i]) {
1633 printf("%d: %s value incorrect, expected %"PRIu64
1634 " got %d\n", __LINE__, port_names[i],
1635 port_expected[i], id);
1639 int reset_ret = rte_event_dev_xstats_reset(evdev,
1640 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1644 printf("%d: failed to reset successfully\n", __LINE__);
1647 /* check value again */
1648 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1649 if (val != port_expected_zero[i]) {
1650 printf("%d: %s value incorrect, expected %"PRIu64
1651 " got %"PRIu64"\n", __LINE__, port_names[i],
1652 port_expected_zero[i], val);
1659 /* num queue stats */
1660 #define NUM_Q_STATS 17
1661 /* queue offset from start of the devices whole xstats.
1662 * This will break every time we add a statistic to a device/port/queue
1664 #define QUEUE_OFF 90
1665 const uint32_t queue = 0;
1666 num_stats = rte_event_dev_xstats_names_get(evdev,
1667 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1668 xstats_names, ids, XSTATS_MAX);
1669 if (num_stats != NUM_Q_STATS) {
1670 printf("%d: expected %d stats, got return %d\n",
1671 __LINE__, NUM_Q_STATS, num_stats);
1674 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1675 queue, ids, values, num_stats);
1676 if (ret != NUM_Q_STATS) {
1677 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1680 static const char * const queue_names[] = {
1690 "qid_0_port_0_pinned_flows",
1691 "qid_0_port_0_packets",
1692 "qid_0_port_1_pinned_flows",
1693 "qid_0_port_1_packets",
1694 "qid_0_port_2_pinned_flows",
1695 "qid_0_port_2_packets",
1696 "qid_0_port_3_pinned_flows",
1697 "qid_0_port_3_packets",
1699 uint64_t queue_expected[] = {
1709 /* QID-to-Port: pinned_flows, packets */
1715 uint64_t queue_expected_zero[] = {
1725 /* QID-to-Port: pinned_flows, packets */
1731 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1732 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1733 RTE_DIM(queue_names) != NUM_Q_STATS) {
1734 printf("%d : queue array of wrong size\n", __LINE__);
1739 for (i = 0; (int)i < ret; i++) {
1741 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1744 if (id != i + QUEUE_OFF) {
1745 printf("%d: %s id incorrect, expected %d got %d\n",
1746 __LINE__, queue_names[i], i+QUEUE_OFF,
1750 if (val != queue_expected[i]) {
1751 printf("%d: %d: %s value , expected %"PRIu64
1752 " got %"PRIu64"\n", i, __LINE__,
1753 queue_names[i], queue_expected[i], val);
1757 int reset_ret = rte_event_dev_xstats_reset(evdev,
1758 RTE_EVENT_DEV_XSTATS_QUEUE,
1761 printf("%d: failed to reset successfully\n", __LINE__);
1764 /* check value again */
1765 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1767 if (val != queue_expected_zero[i]) {
1768 printf("%d: %s value incorrect, expected %"PRIu64
1769 " got %"PRIu64"\n", __LINE__, queue_names[i],
1770 queue_expected_zero[i], val);
1786 ordered_reconfigure(struct test *t)
1788 if (init(t, 1, 1) < 0 ||
1789 create_ports(t, 1) < 0) {
1790 printf("%d: Error initializing device\n", __LINE__);
1794 const struct rte_event_queue_conf conf = {
1795 .schedule_type = RTE_SCHED_TYPE_ORDERED,
1796 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1797 .nb_atomic_flows = 1024,
1798 .nb_atomic_order_sequences = 1024,
1801 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1802 printf("%d: error creating qid\n", __LINE__);
1806 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1807 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1811 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1812 if (rte_event_dev_start(evdev) < 0) {
1813 printf("%d: Error with start call\n", __LINE__);
1825 qid_priorities(struct test *t)
1827 /* Test works by having a CQ with enough empty space for all packets,
1828 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1829 * priority of the QID, not the ingress order, to pass the test
1832 /* Create instance with 1 ports, and 3 qids */
1833 if (init(t, 3, 1) < 0 ||
1834 create_ports(t, 1) < 0) {
1835 printf("%d: Error initializing device\n", __LINE__);
1839 for (i = 0; i < 3; i++) {
1841 const struct rte_event_queue_conf conf = {
1842 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1843 /* increase priority (0 == highest), as we go */
1844 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1845 .nb_atomic_flows = 1024,
1846 .nb_atomic_order_sequences = 1024,
1849 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1850 printf("%d: error creating qid %d\n", __LINE__, i);
1856 /* map all QIDs to port */
1857 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1859 if (rte_event_dev_start(evdev) < 0) {
1860 printf("%d: Error with start call\n", __LINE__);
1864 /* enqueue 3 packets, setting seqn and QID to check priority */
1865 for (i = 0; i < 3; i++) {
1866 struct rte_event ev;
1867 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1869 printf("%d: gen of pkt failed\n", __LINE__);
1872 ev.queue_id = t->qid[i];
1873 ev.op = RTE_EVENT_OP_NEW;
1877 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1879 printf("%d: Failed to enqueue\n", __LINE__);
1884 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1886 /* dequeue packets, verify priority was upheld */
1887 struct rte_event ev[32];
1889 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1890 if (deq_pkts != 3) {
1891 printf("%d: failed to deq packets\n", __LINE__);
1892 rte_event_dev_dump(evdev, stdout);
1895 for (i = 0; i < 3; i++) {
1896 if (ev[i].mbuf->seqn != 2-i) {
1898 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1908 load_balancing(struct test *t)
1910 const int rx_enq = 0;
1914 if (init(t, 1, 4) < 0 ||
1915 create_ports(t, 4) < 0 ||
1916 create_atomic_qids(t, 1) < 0) {
1917 printf("%d: Error initializing device\n", __LINE__);
1921 for (i = 0; i < 3; i++) {
1922 /* map port 1 - 3 inclusive */
1923 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1925 printf("%d: error mapping qid to port %d\n",
1931 if (rte_event_dev_start(evdev) < 0) {
1932 printf("%d: Error with start call\n", __LINE__);
1936 /************** FORWARD ****************/
1938 * Create a set of flows that test the load-balancing operation of the
1939 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1940 * with a new flow, which should be sent to the 3rd mapped CQ
1942 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1944 for (i = 0; i < RTE_DIM(flows); i++) {
1945 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1947 printf("%d: gen of pkt failed\n", __LINE__);
1951 struct rte_event ev = {
1952 .op = RTE_EVENT_OP_NEW,
1953 .queue_id = t->qid[0],
1954 .flow_id = flows[i],
1957 /* generate pkt and enqueue */
1958 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1960 printf("%d: Failed to enqueue\n", __LINE__);
1965 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1967 struct test_event_dev_stats stats;
1968 err = test_event_dev_stats_get(evdev, &stats);
1970 printf("%d: failed to get stats\n", __LINE__);
1974 if (stats.port_inflight[1] != 4) {
1975 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1979 if (stats.port_inflight[2] != 2) {
1980 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1984 if (stats.port_inflight[3] != 3) {
1985 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1995 load_balancing_history(struct test *t)
1997 struct test_event_dev_stats stats = {0};
1998 const int rx_enq = 0;
2002 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2003 if (init(t, 1, 4) < 0 ||
2004 create_ports(t, 4) < 0 ||
2005 create_atomic_qids(t, 1) < 0)
2008 /* CQ mapping to QID */
2009 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2010 printf("%d: error mapping port 1 qid\n", __LINE__);
2013 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2014 printf("%d: error mapping port 2 qid\n", __LINE__);
2017 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2018 printf("%d: error mapping port 3 qid\n", __LINE__);
2021 if (rte_event_dev_start(evdev) < 0) {
2022 printf("%d: Error with start call\n", __LINE__);
2027 * Create a set of flows that test the load-balancing operation of the
2028 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2029 * the packet from CQ 0, send in a new set of flows. Ensure that:
2030 * 1. The new flow 3 gets into the empty CQ0
2031 * 2. packets for existing flow gets added into CQ1
2032 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2033 * more outstanding pkts
2035 * This test makes sure that when a flow ends (i.e. all packets
2036 * have been completed for that flow), that the flow can be moved
2037 * to a different CQ when new packets come in for that flow.
2039 static uint32_t flows1[] = {0, 1, 1, 2};
2041 for (i = 0; i < RTE_DIM(flows1); i++) {
2042 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2043 struct rte_event ev = {
2044 .flow_id = flows1[i],
2045 .op = RTE_EVENT_OP_NEW,
2046 .queue_id = t->qid[0],
2047 .event_type = RTE_EVENT_TYPE_CPU,
2048 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2053 printf("%d: gen of pkt failed\n", __LINE__);
2056 arp->hash.rss = flows1[i];
2057 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2059 printf("%d: Failed to enqueue\n", __LINE__);
2064 /* call the scheduler */
2065 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2067 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2068 struct rte_event ev;
2069 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2070 printf("%d: failed to dequeue\n", __LINE__);
2073 if (ev.mbuf->hash.rss != flows1[0]) {
2074 printf("%d: unexpected flow received\n", __LINE__);
2078 /* drop the flow 0 packet from port 1 */
2079 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2081 /* call the scheduler */
2082 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2085 * Set up the next set of flows, first a new flow to fill up
2086 * CQ 0, so that the next flow 0 packet should go to CQ2
2088 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2090 for (i = 0; i < RTE_DIM(flows2); i++) {
2091 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2092 struct rte_event ev = {
2093 .flow_id = flows2[i],
2094 .op = RTE_EVENT_OP_NEW,
2095 .queue_id = t->qid[0],
2096 .event_type = RTE_EVENT_TYPE_CPU,
2097 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2102 printf("%d: gen of pkt failed\n", __LINE__);
2105 arp->hash.rss = flows2[i];
2107 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2109 printf("%d: Failed to enqueue\n", __LINE__);
2115 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2117 err = test_event_dev_stats_get(evdev, &stats);
2119 printf("%d:failed to get stats\n", __LINE__);
2124 * Now check the resulting inflights on each port.
2126 if (stats.port_inflight[1] != 3) {
2127 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2129 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2130 (unsigned int)stats.port_inflight[1],
2131 (unsigned int)stats.port_inflight[2],
2132 (unsigned int)stats.port_inflight[3]);
2135 if (stats.port_inflight[2] != 4) {
2136 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2138 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2139 (unsigned int)stats.port_inflight[1],
2140 (unsigned int)stats.port_inflight[2],
2141 (unsigned int)stats.port_inflight[3]);
2144 if (stats.port_inflight[3] != 2) {
2145 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2147 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2148 (unsigned int)stats.port_inflight[1],
2149 (unsigned int)stats.port_inflight[2],
2150 (unsigned int)stats.port_inflight[3]);
2154 for (i = 1; i <= 3; i++) {
2155 struct rte_event ev;
2156 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2157 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2159 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2166 invalid_qid(struct test *t)
2168 struct test_event_dev_stats stats;
2169 const int rx_enq = 0;
2173 if (init(t, 1, 4) < 0 ||
2174 create_ports(t, 4) < 0 ||
2175 create_atomic_qids(t, 1) < 0) {
2176 printf("%d: Error initializing device\n", __LINE__);
2180 /* CQ mapping to QID */
2181 for (i = 0; i < 4; i++) {
2182 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2185 printf("%d: error mapping port 1 qid\n", __LINE__);
2190 if (rte_event_dev_start(evdev) < 0) {
2191 printf("%d: Error with start call\n", __LINE__);
2196 * Send in a packet with an invalid qid to the scheduler.
2197 * We should see the packed enqueued OK, but the inflights for
2198 * that packet should not be incremented, and the rx_dropped
2199 * should be incremented.
2201 static uint32_t flows1[] = {20};
2203 for (i = 0; i < RTE_DIM(flows1); i++) {
2204 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2206 printf("%d: gen of pkt failed\n", __LINE__);
2210 struct rte_event ev = {
2211 .op = RTE_EVENT_OP_NEW,
2212 .queue_id = t->qid[0] + flows1[i],
2216 /* generate pkt and enqueue */
2217 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2219 printf("%d: Failed to enqueue\n", __LINE__);
2224 /* call the scheduler */
2225 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2227 err = test_event_dev_stats_get(evdev, &stats);
2229 printf("%d: failed to get stats\n", __LINE__);
2234 * Now check the resulting inflights on the port, and the rx_dropped.
2236 if (stats.port_inflight[0] != 0) {
2237 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2239 rte_event_dev_dump(evdev, stdout);
2242 if (stats.port_rx_dropped[0] != 1) {
2243 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2244 rte_event_dev_dump(evdev, stdout);
2247 /* each packet drop should only be counted in one place - port or dev */
2248 if (stats.rx_dropped != 0) {
2249 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2251 rte_event_dev_dump(evdev, stdout);
2260 single_packet(struct test *t)
2262 const uint32_t MAGIC_SEQN = 7321;
2263 struct rte_event ev;
2264 struct test_event_dev_stats stats;
2265 const int rx_enq = 0;
2266 const int wrk_enq = 2;
2269 /* Create instance with 4 ports */
2270 if (init(t, 1, 4) < 0 ||
2271 create_ports(t, 4) < 0 ||
2272 create_atomic_qids(t, 1) < 0) {
2273 printf("%d: Error initializing device\n", __LINE__);
2277 /* CQ mapping to QID */
2278 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2280 printf("%d: error mapping lb qid\n", __LINE__);
2285 if (rte_event_dev_start(evdev) < 0) {
2286 printf("%d: Error with start call\n", __LINE__);
2290 /************** Gen pkt and enqueue ****************/
2291 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2293 printf("%d: gen of pkt failed\n", __LINE__);
2297 ev.op = RTE_EVENT_OP_NEW;
2298 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2302 arp->seqn = MAGIC_SEQN;
2304 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2306 printf("%d: Failed to enqueue\n", __LINE__);
2310 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2312 err = test_event_dev_stats_get(evdev, &stats);
2314 printf("%d: failed to get stats\n", __LINE__);
2318 if (stats.rx_pkts != 1 ||
2319 stats.tx_pkts != 1 ||
2320 stats.port_inflight[wrk_enq] != 1) {
2321 printf("%d: Sched core didn't handle pkt as expected\n",
2323 rte_event_dev_dump(evdev, stdout);
2329 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2331 printf("%d: Failed to deq\n", __LINE__);
2335 err = test_event_dev_stats_get(evdev, &stats);
2337 printf("%d: failed to get stats\n", __LINE__);
2341 err = test_event_dev_stats_get(evdev, &stats);
2342 if (ev.mbuf->seqn != MAGIC_SEQN) {
2343 printf("%d: magic sequence number not dequeued\n", __LINE__);
2347 rte_pktmbuf_free(ev.mbuf);
2348 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2350 printf("%d: Failed to enqueue\n", __LINE__);
2353 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2355 err = test_event_dev_stats_get(evdev, &stats);
2356 if (stats.port_inflight[wrk_enq] != 0) {
2357 printf("%d: port inflight not correct\n", __LINE__);
2366 inflight_counts(struct test *t)
2368 struct rte_event ev;
2369 struct test_event_dev_stats stats;
2370 const int rx_enq = 0;
2376 /* Create instance with 4 ports */
2377 if (init(t, 2, 3) < 0 ||
2378 create_ports(t, 3) < 0 ||
2379 create_atomic_qids(t, 2) < 0) {
2380 printf("%d: Error initializing device\n", __LINE__);
2384 /* CQ mapping to QID */
2385 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2387 printf("%d: error mapping lb qid\n", __LINE__);
2391 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2393 printf("%d: error mapping lb qid\n", __LINE__);
2398 if (rte_event_dev_start(evdev) < 0) {
2399 printf("%d: Error with start call\n", __LINE__);
2403 /************** FORWARD ****************/
2405 for (i = 0; i < QID1_NUM; i++) {
2406 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2409 printf("%d: gen of pkt failed\n", __LINE__);
2413 ev.queue_id = t->qid[0];
2414 ev.op = RTE_EVENT_OP_NEW;
2416 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2418 printf("%d: Failed to enqueue\n", __LINE__);
2423 for (i = 0; i < QID2_NUM; i++) {
2424 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2427 printf("%d: gen of pkt failed\n", __LINE__);
2430 ev.queue_id = t->qid[1];
2431 ev.op = RTE_EVENT_OP_NEW;
2433 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2435 printf("%d: Failed to enqueue\n", __LINE__);
2441 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2443 err = test_event_dev_stats_get(evdev, &stats);
2445 printf("%d: failed to get stats\n", __LINE__);
2449 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2450 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2451 printf("%d: Sched core didn't handle pkt as expected\n",
2456 if (stats.port_inflight[p1] != QID1_NUM) {
2457 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2461 if (stats.port_inflight[p2] != QID2_NUM) {
2462 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2467 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2469 struct rte_event events[QID1_NUM + QID2_NUM];
2470 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2471 RTE_DIM(events), 0);
2473 if (deq_pkts != QID1_NUM) {
2474 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2477 err = test_event_dev_stats_get(evdev, &stats);
2478 if (stats.port_inflight[p1] != QID1_NUM) {
2479 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2483 for (i = 0; i < QID1_NUM; i++) {
2484 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2487 printf("%d: %s rte enqueue of inf release failed\n",
2488 __LINE__, __func__);
2494 * As the scheduler core decrements inflights, it needs to run to
2495 * process packets to act on the drop messages
2497 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2499 err = test_event_dev_stats_get(evdev, &stats);
2500 if (stats.port_inflight[p1] != 0) {
2501 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2506 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2507 RTE_DIM(events), 0);
2508 if (deq_pkts != QID2_NUM) {
2509 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2512 err = test_event_dev_stats_get(evdev, &stats);
2513 if (stats.port_inflight[p2] != QID2_NUM) {
2514 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2518 for (i = 0; i < QID2_NUM; i++) {
2519 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2522 printf("%d: %s rte enqueue of inf release failed\n",
2523 __LINE__, __func__);
2529 * As the scheduler core decrements inflights, it needs to run to
2530 * process packets to act on the drop messages
2532 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2534 err = test_event_dev_stats_get(evdev, &stats);
2535 if (stats.port_inflight[p2] != 0) {
2536 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2543 rte_event_dev_dump(evdev, stdout);
2549 parallel_basic(struct test *t, int check_order)
2551 const uint8_t rx_port = 0;
2552 const uint8_t w1_port = 1;
2553 const uint8_t w3_port = 3;
2554 const uint8_t tx_port = 4;
2557 uint32_t deq_pkts, j;
2558 struct rte_mbuf *mbufs[3];
2559 struct rte_mbuf *mbufs_out[3] = { 0 };
2560 const uint32_t MAGIC_SEQN = 1234;
2562 /* Create instance with 4 ports */
2563 if (init(t, 2, tx_port + 1) < 0 ||
2564 create_ports(t, tx_port + 1) < 0 ||
2565 (check_order ? create_ordered_qids(t, 1) :
2566 create_unordered_qids(t, 1)) < 0 ||
2567 create_directed_qids(t, 1, &tx_port)) {
2568 printf("%d: Error initializing device\n", __LINE__);
2574 * We need three ports, all mapped to the same ordered qid0. Then we'll
2575 * take a packet out to each port, re-enqueue in reverse order,
2576 * then make sure the reordering has taken place properly when we
2577 * dequeue from the tx_port.
2579 * Simplified test setup diagram:
2583 * qid0 - w2_port - qid1
2587 /* CQ mapping to QID for LB ports (directed mapped on create) */
2588 for (i = w1_port; i <= w3_port; i++) {
2589 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2592 printf("%d: error mapping lb qid\n", __LINE__);
2598 if (rte_event_dev_start(evdev) < 0) {
2599 printf("%d: Error with start call\n", __LINE__);
2603 /* Enqueue 3 packets to the rx port */
2604 for (i = 0; i < 3; i++) {
2605 struct rte_event ev;
2606 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2608 printf("%d: gen of pkt failed\n", __LINE__);
2612 ev.queue_id = t->qid[0];
2613 ev.op = RTE_EVENT_OP_NEW;
2615 mbufs[i]->seqn = MAGIC_SEQN + i;
2617 /* generate pkt and enqueue */
2618 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2620 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2626 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2628 /* use extra slot to make logic in loops easier */
2629 struct rte_event deq_ev[w3_port + 1];
2631 /* Dequeue the 3 packets, one from each worker port */
2632 for (i = w1_port; i <= w3_port; i++) {
2633 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2635 if (deq_pkts != 1) {
2636 printf("%d: Failed to deq\n", __LINE__);
2637 rte_event_dev_dump(evdev, stdout);
2642 /* Enqueue each packet in reverse order, flushing after each one */
2643 for (i = w3_port; i >= w1_port; i--) {
2645 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2646 deq_ev[i].queue_id = t->qid[1];
2647 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2649 printf("%d: Failed to enqueue\n", __LINE__);
2653 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2655 /* dequeue from the tx ports, we should get 3 packets */
2656 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2659 /* Check to see if we've got all 3 packets */
2660 if (deq_pkts != 3) {
2661 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2662 __LINE__, deq_pkts, tx_port);
2663 rte_event_dev_dump(evdev, stdout);
2667 /* Check to see if the sequence numbers are in expected order */
2669 for (j = 0 ; j < deq_pkts ; j++) {
2670 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2672 "%d: Incorrect sequence number(%d) from port %d\n",
2673 __LINE__, mbufs_out[j]->seqn, tx_port);
2679 /* Destroy the instance */
2685 ordered_basic(struct test *t)
2687 return parallel_basic(t, 1);
2691 unordered_basic(struct test *t)
2693 return parallel_basic(t, 0);
2697 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2699 const struct rte_event new_ev = {
2700 .op = RTE_EVENT_OP_NEW
2701 /* all other fields zero */
2703 struct rte_event ev = new_ev;
2704 unsigned int rx_port = 0; /* port we get the first flow on */
2705 char rx_port_used_stat[64];
2706 char rx_port_free_stat[64];
2707 char other_port_used_stat[64];
2709 if (init(t, 1, 2) < 0 ||
2710 create_ports(t, 2) < 0 ||
2711 create_atomic_qids(t, 1) < 0) {
2712 printf("%d: Error initializing device\n", __LINE__);
2715 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2716 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2718 printf("%d: Error links queue to ports\n", __LINE__);
2721 if (rte_event_dev_start(evdev) < 0) {
2722 printf("%d: Error with start call\n", __LINE__);
2726 /* send one packet and see where it goes, port 0 or 1 */
2727 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2728 printf("%d: Error doing first enqueue\n", __LINE__);
2731 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2733 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2737 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2738 "port_%u_cq_ring_used", rx_port);
2739 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2740 "port_%u_cq_ring_free", rx_port);
2741 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2742 "port_%u_cq_ring_used", rx_port ^ 1);
2743 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2745 printf("%d: Error, first event not scheduled\n", __LINE__);
2749 /* now fill up the rx port's queue with one flow to cause HOLB */
2752 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2753 printf("%d: Error with enqueue\n", __LINE__);
2756 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2757 } while (rte_event_dev_xstats_by_name_get(evdev,
2758 rx_port_free_stat, NULL) != 0);
2760 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2762 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2763 printf("%d: Error with enqueue\n", __LINE__);
2766 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2768 /* check that the other port still has an empty CQ */
2769 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2771 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2774 /* check IQ now has one packet */
2775 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2777 printf("%d: Error, QID does not have exactly 1 packet\n",
2782 /* send another flow, which should pass the other IQ entry */
2785 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2786 printf("%d: Error with enqueue\n", __LINE__);
2789 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2791 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2793 printf("%d: Error, second flow did not pass out first\n",
2798 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2800 printf("%d: Error, QID does not have exactly 1 packet\n",
2807 rte_event_dev_dump(evdev, stdout);
2813 worker_loopback_worker_fn(void *arg)
2815 struct test *t = arg;
2816 uint8_t port = t->port[1];
2821 * Takes packets from the input port and then loops them back through
2822 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2823 * so each packet goes through 8*16 = 128 times.
2825 printf("%d: \tWorker function started\n", __LINE__);
2826 while (count < NUM_PACKETS) {
2827 #define BURST_SIZE 32
2828 struct rte_event ev[BURST_SIZE];
2829 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2836 for (i = 0; i < nb_rx; i++) {
2838 if (ev[i].queue_id != 8) {
2839 ev[i].op = RTE_EVENT_OP_FORWARD;
2840 enqd = rte_event_enqueue_burst(evdev, port,
2843 printf("%d: Can't enqueue FWD!!\n",
2851 ev[i].mbuf->udata64++;
2852 if (ev[i].mbuf->udata64 != 16) {
2853 ev[i].op = RTE_EVENT_OP_FORWARD;
2854 enqd = rte_event_enqueue_burst(evdev, port,
2857 printf("%d: Can't enqueue FWD!!\n",
2863 /* we have hit 16 iterations through system - drop */
2864 rte_pktmbuf_free(ev[i].mbuf);
2866 ev[i].op = RTE_EVENT_OP_RELEASE;
2867 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2869 printf("%d drop enqueue failed\n", __LINE__);
2879 worker_loopback_producer_fn(void *arg)
2881 struct test *t = arg;
2882 uint8_t port = t->port[0];
2885 printf("%d: \tProducer function started\n", __LINE__);
2886 while (count < NUM_PACKETS) {
2887 struct rte_mbuf *m = 0;
2889 m = rte_pktmbuf_alloc(t->mbuf_pool);
2890 } while (m == NULL);
2894 struct rte_event ev = {
2895 .op = RTE_EVENT_OP_NEW,
2896 .queue_id = t->qid[0],
2897 .flow_id = (uintptr_t)m & 0xFFFF,
2901 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2902 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2914 worker_loopback(struct test *t)
2916 /* use a single producer core, and a worker core to see what happens
2917 * if the worker loops packets back multiple times
2919 struct test_event_dev_stats stats;
2920 uint64_t print_cycles = 0, cycles = 0;
2921 uint64_t tx_pkts = 0;
2923 int w_lcore, p_lcore;
2925 if (init(t, 8, 2) < 0 ||
2926 create_atomic_qids(t, 8) < 0) {
2927 printf("%d: Error initializing device\n", __LINE__);
2931 /* RX with low max events */
2932 static struct rte_event_port_conf conf = {
2933 .dequeue_depth = 32,
2934 .enqueue_depth = 64,
2936 /* beware: this cannot be initialized in the static above as it would
2937 * only be initialized once - and this needs to be set for multiple runs
2939 conf.new_event_threshold = 512;
2941 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2942 printf("Error setting up RX port\n");
2946 /* TX with higher max events */
2947 conf.new_event_threshold = 4096;
2948 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2949 printf("Error setting up TX port\n");
2954 /* CQ mapping to QID */
2955 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2956 if (err != 8) { /* should have mapped all queues*/
2957 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2961 if (rte_event_dev_start(evdev) < 0) {
2962 printf("%d: Error with start call\n", __LINE__);
2966 p_lcore = rte_get_next_lcore(
2967 /* start core */ -1,
2968 /* skip master */ 1,
2970 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2972 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
2973 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
2975 print_cycles = cycles = rte_get_timer_cycles();
2976 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
2977 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
2979 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2981 uint64_t new_cycles = rte_get_timer_cycles();
2983 if (new_cycles - print_cycles > rte_get_timer_hz()) {
2984 test_event_dev_stats_get(evdev, &stats);
2986 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
2987 __LINE__, stats.rx_pkts, stats.tx_pkts);
2989 print_cycles = new_cycles;
2991 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
2992 test_event_dev_stats_get(evdev, &stats);
2993 if (stats.tx_pkts == tx_pkts) {
2994 rte_event_dev_dump(evdev, stdout);
2995 printf("Dumping xstats:\n");
2998 "%d: No schedules for seconds, deadlock\n",
3002 tx_pkts = stats.tx_pkts;
3003 cycles = new_cycles;
3006 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3007 /* ensure all completions are flushed */
3009 rte_eal_mp_wait_lcore();
3015 static struct rte_mempool *eventdev_func_mempool;
3018 test_sw_eventdev(void)
3020 struct test *t = malloc(sizeof(struct test));
3023 /* manually initialize the op, older gcc's complain on static
3024 * initialization of struct elements that are a bitfield.
3026 release_ev.op = RTE_EVENT_OP_RELEASE;
3028 const char *eventdev_name = "event_sw0";
3029 evdev = rte_event_dev_get_dev_id(eventdev_name);
3031 printf("%d: Eventdev %s not found - creating.\n",
3032 __LINE__, eventdev_name);
3033 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3034 printf("Error creating eventdev\n");
3037 evdev = rte_event_dev_get_dev_id(eventdev_name);
3039 printf("Error finding newly created eventdev\n");
3044 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3045 printf("Failed to get service ID for software event dev\n");
3049 rte_service_runstate_set(t->service_id, 1);
3050 rte_service_set_runstate_mapped_check(t->service_id, 0);
3052 /* Only create mbuf pool once, reuse for each test run */
3053 if (!eventdev_func_mempool) {
3054 eventdev_func_mempool = rte_pktmbuf_pool_create(
3055 "EVENTDEV_SW_SA_MBUF_POOL",
3056 (1<<12), /* 4k buffers */
3057 32 /*MBUF_CACHE_SIZE*/,
3059 512, /* use very small mbufs */
3061 if (!eventdev_func_mempool) {
3062 printf("ERROR creating mempool\n");
3066 t->mbuf_pool = eventdev_func_mempool;
3067 printf("*** Running Single Directed Packet test...\n");
3068 ret = test_single_directed_packet(t);
3070 printf("ERROR - Single Directed Packet test FAILED.\n");
3073 printf("*** Running Directed Forward Credit test...\n");
3074 ret = test_directed_forward_credits(t);
3076 printf("ERROR - Directed Forward Credit test FAILED.\n");
3079 printf("*** Running Single Load Balanced Packet test...\n");
3080 ret = single_packet(t);
3082 printf("ERROR - Single Packet test FAILED.\n");
3085 printf("*** Running Unordered Basic test...\n");
3086 ret = unordered_basic(t);
3088 printf("ERROR - Unordered Basic test FAILED.\n");
3091 printf("*** Running Ordered Basic test...\n");
3092 ret = ordered_basic(t);
3094 printf("ERROR - Ordered Basic test FAILED.\n");
3097 printf("*** Running Burst Packets test...\n");
3098 ret = burst_packets(t);
3100 printf("ERROR - Burst Packets test FAILED.\n");
3103 printf("*** Running Load Balancing test...\n");
3104 ret = load_balancing(t);
3106 printf("ERROR - Load Balancing test FAILED.\n");
3109 printf("*** Running Prioritized Directed test...\n");
3110 ret = test_priority_directed(t);
3112 printf("ERROR - Prioritized Directed test FAILED.\n");
3115 printf("*** Running Prioritized Atomic test...\n");
3116 ret = test_priority_atomic(t);
3118 printf("ERROR - Prioritized Atomic test FAILED.\n");
3122 printf("*** Running Prioritized Ordered test...\n");
3123 ret = test_priority_ordered(t);
3125 printf("ERROR - Prioritized Ordered test FAILED.\n");
3128 printf("*** Running Prioritized Unordered test...\n");
3129 ret = test_priority_unordered(t);
3131 printf("ERROR - Prioritized Unordered test FAILED.\n");
3134 printf("*** Running Invalid QID test...\n");
3135 ret = invalid_qid(t);
3137 printf("ERROR - Invalid QID test FAILED.\n");
3140 printf("*** Running Load Balancing History test...\n");
3141 ret = load_balancing_history(t);
3143 printf("ERROR - Load Balancing History test FAILED.\n");
3146 printf("*** Running Inflight Count test...\n");
3147 ret = inflight_counts(t);
3149 printf("ERROR - Inflight Count test FAILED.\n");
3152 printf("*** Running Abuse Inflights test...\n");
3153 ret = abuse_inflights(t);
3155 printf("ERROR - Abuse Inflights test FAILED.\n");
3158 printf("*** Running XStats test...\n");
3159 ret = xstats_tests(t);
3161 printf("ERROR - XStats test FAILED.\n");
3164 printf("*** Running XStats ID Reset test...\n");
3165 ret = xstats_id_reset_tests(t);
3167 printf("ERROR - XStats ID Reset test FAILED.\n");
3170 printf("*** Running XStats Brute Force test...\n");
3171 ret = xstats_brute_force(t);
3173 printf("ERROR - XStats Brute Force test FAILED.\n");
3176 printf("*** Running XStats ID Abuse test...\n");
3177 ret = xstats_id_abuse_tests(t);
3179 printf("ERROR - XStats ID Abuse test FAILED.\n");
3182 printf("*** Running QID Priority test...\n");
3183 ret = qid_priorities(t);
3185 printf("ERROR - QID Priority test FAILED.\n");
3188 printf("*** Running Ordered Reconfigure test...\n");
3189 ret = ordered_reconfigure(t);
3191 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3194 printf("*** Running Port LB Single Reconfig test...\n");
3195 ret = port_single_lb_reconfig(t);
3197 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3200 printf("*** Running Port Reconfig Credits test...\n");
3201 ret = port_reconfig_credits(t);
3203 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3206 printf("*** Running Head-of-line-blocking test...\n");
3209 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3212 if (rte_lcore_count() >= 3) {
3213 printf("*** Running Worker loopback test...\n");
3214 ret = worker_loopback(t);
3216 printf("ERROR - Worker loopback test FAILED.\n");
3220 printf("### Not enough cores for worker loopback test.\n");
3221 printf("### Need at least 3 cores for test.\n");
3224 * Free test instance, leaving mempool initialized, and a pointer to it
3225 * in static eventdev_func_mempool, as it is re-used on re-runs
3232 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);