1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
10 #include <sys/queue.h>
12 #include <rte_memory.h>
13 #include <rte_launch.h>
15 #include <rte_per_lcore.h>
16 #include <rte_lcore.h>
17 #include <rte_debug.h>
18 #include <rte_ethdev.h>
19 #include <rte_cycles.h>
20 #include <rte_eventdev.h>
21 #include <rte_pause.h>
22 #include <rte_service.h>
23 #include <rte_service_component.h>
24 #include <rte_bus_vdev.h>
30 #define NUM_PACKETS (1<<18)
31 #define DEQUEUE_DEPTH 128
36 struct rte_mempool *mbuf_pool;
37 uint8_t port[MAX_PORTS];
38 uint8_t qid[MAX_QIDS];
43 typedef uint8_t counter_dynfield_t;
44 static int counter_dynfield_offset = -1;
46 static inline counter_dynfield_t *
47 counter_field(struct rte_mbuf *mbuf)
49 return RTE_MBUF_DYNFIELD(mbuf, \
50 counter_dynfield_offset, counter_dynfield_t *);
53 static struct rte_event release_ev;
55 static inline struct rte_mbuf *
56 rte_gen_arp(int portid, struct rte_mempool *mp)
60 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
62 static const uint8_t arp_request[] = {
63 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
64 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
65 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
66 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
67 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
68 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
69 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
70 0x00, 0x00, 0x00, 0x00
73 int pkt_len = sizeof(arp_request) - 1;
75 m = rte_pktmbuf_alloc(mp);
79 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
80 arp_request, pkt_len);
81 rte_pktmbuf_pkt_len(m) = pkt_len;
82 rte_pktmbuf_data_len(m) = pkt_len;
92 const uint32_t XSTATS_MAX = 1024;
94 uint32_t ids[XSTATS_MAX];
95 uint64_t values[XSTATS_MAX];
96 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
98 for (i = 0; i < XSTATS_MAX; i++)
101 /* Device names / values */
102 int ret = rte_event_dev_xstats_names_get(evdev,
103 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
104 xstats_names, ids, XSTATS_MAX);
106 printf("%d: xstats names get() returned error\n",
110 ret = rte_event_dev_xstats_get(evdev,
111 RTE_EVENT_DEV_XSTATS_DEVICE,
112 0, ids, values, ret);
113 if (ret > (signed int)XSTATS_MAX)
114 printf("%s %d: more xstats available than space\n",
116 for (i = 0; (signed int)i < ret; i++) {
117 printf("%d : %s : %"PRIu64"\n",
118 i, xstats_names[i].name, values[i]);
121 /* Port names / values */
122 ret = rte_event_dev_xstats_names_get(evdev,
123 RTE_EVENT_DEV_XSTATS_PORT, 0,
124 xstats_names, ids, XSTATS_MAX);
125 ret = rte_event_dev_xstats_get(evdev,
126 RTE_EVENT_DEV_XSTATS_PORT, 1,
128 if (ret > (signed int)XSTATS_MAX)
129 printf("%s %d: more xstats available than space\n",
131 for (i = 0; (signed int)i < ret; i++) {
132 printf("%d : %s : %"PRIu64"\n",
133 i, xstats_names[i].name, values[i]);
136 /* Queue names / values */
137 ret = rte_event_dev_xstats_names_get(evdev,
138 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
139 xstats_names, ids, XSTATS_MAX);
140 ret = rte_event_dev_xstats_get(evdev,
141 RTE_EVENT_DEV_XSTATS_QUEUE,
142 1, ids, values, ret);
143 if (ret > (signed int)XSTATS_MAX)
144 printf("%s %d: more xstats available than space\n",
146 for (i = 0; (signed int)i < ret; i++) {
147 printf("%d : %s : %"PRIu64"\n",
148 i, xstats_names[i].name, values[i]);
152 /* initialization and config */
154 init(struct test *t, int nb_queues, int nb_ports)
156 struct rte_event_dev_config config = {
157 .nb_event_queues = nb_queues,
158 .nb_event_ports = nb_ports,
159 .nb_event_queue_flows = 1024,
160 .nb_events_limit = 4096,
161 .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
162 .nb_event_port_enqueue_depth = 128,
166 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
168 memset(t, 0, sizeof(*t));
171 ret = rte_event_dev_configure(evdev, &config);
173 printf("%d: Error configuring device\n", __LINE__);
178 create_ports(struct test *t, int num_ports)
181 static const struct rte_event_port_conf conf = {
182 .new_event_threshold = 1024,
186 if (num_ports > MAX_PORTS)
189 for (i = 0; i < num_ports; i++) {
190 if (rte_event_port_setup(evdev, i, &conf) < 0) {
191 printf("Error setting up port %d\n", i);
201 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
206 const struct rte_event_queue_conf conf = {
207 .schedule_type = flags,
208 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
209 .nb_atomic_flows = 1024,
210 .nb_atomic_order_sequences = 1024,
213 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
214 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
215 printf("%d: error creating qid %d\n", __LINE__, i);
220 t->nb_qids += num_qids;
221 if (t->nb_qids > MAX_QIDS)
228 create_atomic_qids(struct test *t, int num_qids)
230 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC);
234 create_ordered_qids(struct test *t, int num_qids)
236 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED);
241 create_unordered_qids(struct test *t, int num_qids)
243 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL);
247 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
252 static const struct rte_event_queue_conf conf = {
253 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
254 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
257 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
258 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
259 printf("%d: error creating qid %d\n", __LINE__, i);
264 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
265 &t->qid[i], NULL, 1) != 1) {
266 printf("%d: error creating link for qid %d\n",
271 t->nb_qids += num_qids;
272 if (t->nb_qids > MAX_QIDS)
280 cleanup(struct test *t __rte_unused)
282 rte_event_dev_stop(evdev);
283 rte_event_dev_close(evdev);
287 struct test_event_dev_stats {
288 uint64_t rx_pkts; /**< Total packets received */
289 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
290 uint64_t tx_pkts; /**< Total packets transmitted */
292 /** Packets received on this port */
293 uint64_t port_rx_pkts[MAX_PORTS];
294 /** Packets dropped on this port */
295 uint64_t port_rx_dropped[MAX_PORTS];
296 /** Packets inflight on this port */
297 uint64_t port_inflight[MAX_PORTS];
298 /** Packets transmitted on this port */
299 uint64_t port_tx_pkts[MAX_PORTS];
300 /** Packets received on this qid */
301 uint64_t qid_rx_pkts[MAX_QIDS];
302 /** Packets dropped on this qid */
303 uint64_t qid_rx_dropped[MAX_QIDS];
304 /** Packets transmitted on this qid */
305 uint64_t qid_tx_pkts[MAX_QIDS];
309 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
312 static uint32_t total_ids[3]; /* rx, tx and drop */
313 static uint32_t port_rx_pkts_ids[MAX_PORTS];
314 static uint32_t port_rx_dropped_ids[MAX_PORTS];
315 static uint32_t port_inflight_ids[MAX_PORTS];
316 static uint32_t port_tx_pkts_ids[MAX_PORTS];
317 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
318 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
319 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
322 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
323 "dev_rx", &total_ids[0]);
324 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
325 "dev_drop", &total_ids[1]);
326 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
327 "dev_tx", &total_ids[2]);
328 for (i = 0; i < MAX_PORTS; i++) {
330 snprintf(name, sizeof(name), "port_%u_rx", i);
331 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
332 dev_id, name, &port_rx_pkts_ids[i]);
333 snprintf(name, sizeof(name), "port_%u_drop", i);
334 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
335 dev_id, name, &port_rx_dropped_ids[i]);
336 snprintf(name, sizeof(name), "port_%u_inflight", i);
337 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
338 dev_id, name, &port_inflight_ids[i]);
339 snprintf(name, sizeof(name), "port_%u_tx", i);
340 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
341 dev_id, name, &port_tx_pkts_ids[i]);
343 for (i = 0; i < MAX_QIDS; i++) {
345 snprintf(name, sizeof(name), "qid_%u_rx", i);
346 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
347 dev_id, name, &qid_rx_pkts_ids[i]);
348 snprintf(name, sizeof(name), "qid_%u_drop", i);
349 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
350 dev_id, name, &qid_rx_dropped_ids[i]);
351 snprintf(name, sizeof(name), "qid_%u_tx", i);
352 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
353 dev_id, name, &qid_tx_pkts_ids[i]);
359 /* run_prio_packet_test
360 * This performs a basic packet priority check on the test instance passed in.
361 * It is factored out of the main priority tests as the same tests must be
362 * performed to ensure prioritization of each type of QID.
365 * - An initialized test structure, including mempool
366 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
367 * - t->qid[0] is the QID to be tested
368 * - if LB QID, the CQ must be mapped to the QID.
371 run_prio_packet_test(struct test *t)
374 const uint32_t MAGIC_SEQN[] = {4711, 1234};
375 const uint32_t PRIORITY[] = {
376 RTE_EVENT_DEV_PRIORITY_NORMAL,
377 RTE_EVENT_DEV_PRIORITY_HIGHEST
380 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
381 /* generate pkt and enqueue */
383 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
385 printf("%d: gen of pkt failed\n", __LINE__);
388 arp->seqn = MAGIC_SEQN[i];
390 ev = (struct rte_event){
391 .priority = PRIORITY[i],
392 .op = RTE_EVENT_OP_NEW,
393 .queue_id = t->qid[0],
396 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
398 printf("%d: error failed to enqueue\n", __LINE__);
403 rte_service_run_iter_on_app_lcore(t->service_id, 1);
405 struct test_event_dev_stats stats;
406 err = test_event_dev_stats_get(evdev, &stats);
408 printf("%d: error failed to get stats\n", __LINE__);
412 if (stats.port_rx_pkts[t->port[0]] != 2) {
413 printf("%d: error stats incorrect for directed port\n",
415 rte_event_dev_dump(evdev, stdout);
419 struct rte_event ev, ev2;
421 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
423 printf("%d: error failed to deq\n", __LINE__);
424 rte_event_dev_dump(evdev, stdout);
427 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
428 printf("%d: first packet out not highest priority\n",
430 rte_event_dev_dump(evdev, stdout);
433 rte_pktmbuf_free(ev.mbuf);
435 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
437 printf("%d: error failed to deq\n", __LINE__);
438 rte_event_dev_dump(evdev, stdout);
441 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
442 printf("%d: second packet out not lower priority\n",
444 rte_event_dev_dump(evdev, stdout);
447 rte_pktmbuf_free(ev2.mbuf);
454 test_single_directed_packet(struct test *t)
456 const int rx_enq = 0;
457 const int wrk_enq = 2;
460 /* Create instance with 3 directed QIDs going to 3 ports */
461 if (init(t, 3, 3) < 0 ||
462 create_ports(t, 3) < 0 ||
463 create_directed_qids(t, 3, t->port) < 0)
466 if (rte_event_dev_start(evdev) < 0) {
467 printf("%d: Error with start call\n", __LINE__);
471 /************** FORWARD ****************/
472 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
473 struct rte_event ev = {
474 .op = RTE_EVENT_OP_NEW,
480 printf("%d: gen of pkt failed\n", __LINE__);
484 const uint32_t MAGIC_SEQN = 4711;
485 arp->seqn = MAGIC_SEQN;
487 /* generate pkt and enqueue */
488 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
490 printf("%d: error failed to enqueue\n", __LINE__);
494 /* Run schedule() as dir packets may need to be re-ordered */
495 rte_service_run_iter_on_app_lcore(t->service_id, 1);
497 struct test_event_dev_stats stats;
498 err = test_event_dev_stats_get(evdev, &stats);
500 printf("%d: error failed to get stats\n", __LINE__);
504 if (stats.port_rx_pkts[rx_enq] != 1) {
505 printf("%d: error stats incorrect for directed port\n",
511 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
513 printf("%d: error failed to deq\n", __LINE__);
517 err = test_event_dev_stats_get(evdev, &stats);
518 if (stats.port_rx_pkts[wrk_enq] != 0 &&
519 stats.port_rx_pkts[wrk_enq] != 1) {
520 printf("%d: error directed stats post-dequeue\n", __LINE__);
524 if (ev.mbuf->seqn != MAGIC_SEQN) {
525 printf("%d: error magic sequence number not dequeued\n",
530 rte_pktmbuf_free(ev.mbuf);
536 test_directed_forward_credits(struct test *t)
541 if (init(t, 1, 1) < 0 ||
542 create_ports(t, 1) < 0 ||
543 create_directed_qids(t, 1, t->port) < 0)
546 if (rte_event_dev_start(evdev) < 0) {
547 printf("%d: Error with start call\n", __LINE__);
551 struct rte_event ev = {
552 .op = RTE_EVENT_OP_NEW,
556 for (i = 0; i < 1000; i++) {
557 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
559 printf("%d: error failed to enqueue\n", __LINE__);
562 rte_service_run_iter_on_app_lcore(t->service_id, 1);
565 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
567 printf("%d: error failed to deq\n", __LINE__);
571 /* re-write event to be a forward, and continue looping it */
572 ev.op = RTE_EVENT_OP_FORWARD;
581 test_priority_directed(struct test *t)
583 if (init(t, 1, 1) < 0 ||
584 create_ports(t, 1) < 0 ||
585 create_directed_qids(t, 1, t->port) < 0) {
586 printf("%d: Error initializing device\n", __LINE__);
590 if (rte_event_dev_start(evdev) < 0) {
591 printf("%d: Error with start call\n", __LINE__);
595 return run_prio_packet_test(t);
599 test_priority_atomic(struct test *t)
601 if (init(t, 1, 1) < 0 ||
602 create_ports(t, 1) < 0 ||
603 create_atomic_qids(t, 1) < 0) {
604 printf("%d: Error initializing device\n", __LINE__);
609 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
610 printf("%d: error mapping qid to port\n", __LINE__);
613 if (rte_event_dev_start(evdev) < 0) {
614 printf("%d: Error with start call\n", __LINE__);
618 return run_prio_packet_test(t);
622 test_priority_ordered(struct test *t)
624 if (init(t, 1, 1) < 0 ||
625 create_ports(t, 1) < 0 ||
626 create_ordered_qids(t, 1) < 0) {
627 printf("%d: Error initializing device\n", __LINE__);
632 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
633 printf("%d: error mapping qid to port\n", __LINE__);
636 if (rte_event_dev_start(evdev) < 0) {
637 printf("%d: Error with start call\n", __LINE__);
641 return run_prio_packet_test(t);
645 test_priority_unordered(struct test *t)
647 if (init(t, 1, 1) < 0 ||
648 create_ports(t, 1) < 0 ||
649 create_unordered_qids(t, 1) < 0) {
650 printf("%d: Error initializing device\n", __LINE__);
655 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
656 printf("%d: error mapping qid to port\n", __LINE__);
659 if (rte_event_dev_start(evdev) < 0) {
660 printf("%d: Error with start call\n", __LINE__);
664 return run_prio_packet_test(t);
668 burst_packets(struct test *t)
670 /************** CONFIG ****************/
675 /* Create instance with 2 ports and 2 queues */
676 if (init(t, 2, 2) < 0 ||
677 create_ports(t, 2) < 0 ||
678 create_atomic_qids(t, 2) < 0) {
679 printf("%d: Error initializing device\n", __LINE__);
683 /* CQ mapping to QID */
684 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
686 printf("%d: error mapping lb qid0\n", __LINE__);
689 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
691 printf("%d: error mapping lb qid1\n", __LINE__);
695 if (rte_event_dev_start(evdev) < 0) {
696 printf("%d: Error with start call\n", __LINE__);
700 /************** FORWARD ****************/
701 const uint32_t rx_port = 0;
702 const uint32_t NUM_PKTS = 2;
704 for (i = 0; i < NUM_PKTS; i++) {
705 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
707 printf("%d: error generating pkt\n", __LINE__);
711 struct rte_event ev = {
712 .op = RTE_EVENT_OP_NEW,
717 /* generate pkt and enqueue */
718 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
720 printf("%d: Failed to enqueue\n", __LINE__);
724 rte_service_run_iter_on_app_lcore(t->service_id, 1);
726 /* Check stats for all NUM_PKTS arrived to sched core */
727 struct test_event_dev_stats stats;
729 err = test_event_dev_stats_get(evdev, &stats);
731 printf("%d: failed to get stats\n", __LINE__);
734 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
735 printf("%d: Sched core didn't receive all %d pkts\n",
737 rte_event_dev_dump(evdev, stdout);
745 /******** DEQ QID 1 *******/
748 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
750 rte_pktmbuf_free(ev.mbuf);
753 if (deq_pkts != NUM_PKTS/2) {
754 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
759 /******** DEQ QID 2 *******/
763 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
765 rte_pktmbuf_free(ev.mbuf);
767 if (deq_pkts != NUM_PKTS/2) {
768 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
778 abuse_inflights(struct test *t)
780 const int rx_enq = 0;
781 const int wrk_enq = 2;
784 /* Create instance with 4 ports */
785 if (init(t, 1, 4) < 0 ||
786 create_ports(t, 4) < 0 ||
787 create_atomic_qids(t, 1) < 0) {
788 printf("%d: Error initializing device\n", __LINE__);
792 /* CQ mapping to QID */
793 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
795 printf("%d: error mapping lb qid\n", __LINE__);
800 if (rte_event_dev_start(evdev) < 0) {
801 printf("%d: Error with start call\n", __LINE__);
805 /* Enqueue op only */
806 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
808 printf("%d: Failed to enqueue\n", __LINE__);
813 rte_service_run_iter_on_app_lcore(t->service_id, 1);
815 struct test_event_dev_stats stats;
817 err = test_event_dev_stats_get(evdev, &stats);
819 printf("%d: failed to get stats\n", __LINE__);
823 if (stats.rx_pkts != 0 ||
824 stats.tx_pkts != 0 ||
825 stats.port_inflight[wrk_enq] != 0) {
826 printf("%d: Sched core didn't handle pkt as expected\n",
836 xstats_tests(struct test *t)
838 const int wrk_enq = 2;
841 /* Create instance with 4 ports */
842 if (init(t, 1, 4) < 0 ||
843 create_ports(t, 4) < 0 ||
844 create_atomic_qids(t, 1) < 0) {
845 printf("%d: Error initializing device\n", __LINE__);
849 /* CQ mapping to QID */
850 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
852 printf("%d: error mapping lb qid\n", __LINE__);
857 if (rte_event_dev_start(evdev) < 0) {
858 printf("%d: Error with start call\n", __LINE__);
862 const uint32_t XSTATS_MAX = 1024;
865 uint32_t ids[XSTATS_MAX];
866 uint64_t values[XSTATS_MAX];
867 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
869 for (i = 0; i < XSTATS_MAX; i++)
872 /* Device names / values */
873 int ret = rte_event_dev_xstats_names_get(evdev,
874 RTE_EVENT_DEV_XSTATS_DEVICE,
875 0, xstats_names, ids, XSTATS_MAX);
877 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
880 ret = rte_event_dev_xstats_get(evdev,
881 RTE_EVENT_DEV_XSTATS_DEVICE,
882 0, ids, values, ret);
884 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
888 /* Port names / values */
889 ret = rte_event_dev_xstats_names_get(evdev,
890 RTE_EVENT_DEV_XSTATS_PORT, 0,
891 xstats_names, ids, XSTATS_MAX);
893 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
896 ret = rte_event_dev_xstats_get(evdev,
897 RTE_EVENT_DEV_XSTATS_PORT, 0,
900 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
904 /* Queue names / values */
905 ret = rte_event_dev_xstats_names_get(evdev,
906 RTE_EVENT_DEV_XSTATS_QUEUE,
907 0, xstats_names, ids, XSTATS_MAX);
909 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
913 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
914 ret = rte_event_dev_xstats_get(evdev,
915 RTE_EVENT_DEV_XSTATS_QUEUE,
916 1, ids, values, ret);
917 if (ret != -EINVAL) {
918 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
922 ret = rte_event_dev_xstats_get(evdev,
923 RTE_EVENT_DEV_XSTATS_QUEUE,
924 0, ids, values, ret);
926 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret);
930 /* enqueue packets to check values */
931 for (i = 0; i < 3; i++) {
933 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
935 printf("%d: gen of pkt failed\n", __LINE__);
938 ev.queue_id = t->qid[i];
939 ev.op = RTE_EVENT_OP_NEW;
944 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
946 printf("%d: Failed to enqueue\n", __LINE__);
951 rte_service_run_iter_on_app_lcore(t->service_id, 1);
953 /* Device names / values */
954 int num_stats = rte_event_dev_xstats_names_get(evdev,
955 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
956 xstats_names, ids, XSTATS_MAX);
959 ret = rte_event_dev_xstats_get(evdev,
960 RTE_EVENT_DEV_XSTATS_DEVICE,
961 0, ids, values, num_stats);
962 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
963 for (i = 0; (signed int)i < ret; i++) {
964 if (expected[i] != values[i]) {
966 "%d Error xstat %d (id %d) %s : %"PRIu64
967 ", expect %"PRIu64"\n",
968 __LINE__, i, ids[i], xstats_names[i].name,
969 values[i], expected[i]);
974 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
977 /* ensure reset statistics are zero-ed */
978 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
979 ret = rte_event_dev_xstats_get(evdev,
980 RTE_EVENT_DEV_XSTATS_DEVICE,
981 0, ids, values, num_stats);
982 for (i = 0; (signed int)i < ret; i++) {
983 if (expected_zero[i] != values[i]) {
985 "%d Error, xstat %d (id %d) %s : %"PRIu64
986 ", expect %"PRIu64"\n",
987 __LINE__, i, ids[i], xstats_names[i].name,
988 values[i], expected_zero[i]);
993 /* port reset checks */
994 num_stats = rte_event_dev_xstats_names_get(evdev,
995 RTE_EVENT_DEV_XSTATS_PORT, 0,
996 xstats_names, ids, XSTATS_MAX);
999 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1000 0, ids, values, num_stats);
1002 static const uint64_t port_expected[] = {
1007 0 /* avg pkt cycles */,
1009 0 /* rx ring used */,
1010 4096 /* rx ring free */,
1011 0 /* cq ring used */,
1012 32 /* cq ring free */,
1013 0 /* dequeue calls */,
1014 /* 10 dequeue burst buckets */
1018 if (ret != RTE_DIM(port_expected)) {
1020 "%s %d: wrong number of port stats (%d), expected %zu\n",
1021 __func__, __LINE__, ret, RTE_DIM(port_expected));
1024 for (i = 0; (signed int)i < ret; i++) {
1025 if (port_expected[i] != values[i]) {
1027 "%s : %d: Error stat %s is %"PRIu64
1028 ", expected %"PRIu64"\n",
1029 __func__, __LINE__, xstats_names[i].name,
1030 values[i], port_expected[i]);
1035 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1038 /* ensure reset statistics are zero-ed */
1039 static const uint64_t port_expected_zero[] = {
1044 0 /* avg pkt cycles */,
1046 0 /* rx ring used */,
1047 4096 /* rx ring free */,
1048 0 /* cq ring used */,
1049 32 /* cq ring free */,
1050 0 /* dequeue calls */,
1051 /* 10 dequeue burst buckets */
1055 ret = rte_event_dev_xstats_get(evdev,
1056 RTE_EVENT_DEV_XSTATS_PORT,
1057 0, ids, values, num_stats);
1058 for (i = 0; (signed int)i < ret; i++) {
1059 if (port_expected_zero[i] != values[i]) {
1061 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1062 ", expect %"PRIu64"\n",
1063 __LINE__, i, ids[i], xstats_names[i].name,
1064 values[i], port_expected_zero[i]);
1069 /* QUEUE STATS TESTS */
1070 num_stats = rte_event_dev_xstats_names_get(evdev,
1071 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1072 xstats_names, ids, XSTATS_MAX);
1073 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1074 0, ids, values, num_stats);
1076 printf("xstats get returned %d\n", ret);
1079 if ((unsigned int)ret > XSTATS_MAX)
1080 printf("%s %d: more xstats available than space\n",
1081 __func__, __LINE__);
1083 static const uint64_t queue_expected[] = {
1088 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1089 /* QID-to-Port: pinned_flows, packets */
1095 for (i = 0; (signed int)i < ret; i++) {
1096 if (queue_expected[i] != values[i]) {
1098 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1099 ", expect %"PRIu64"\n",
1100 __LINE__, i, ids[i], xstats_names[i].name,
1101 values[i], queue_expected[i]);
1106 /* Reset the queue stats here */
1107 ret = rte_event_dev_xstats_reset(evdev,
1108 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1112 /* Verify that the resetable stats are reset, and others are not */
1113 static const uint64_t queue_expected_zero[] = {
1118 0, 0, 0, 0, /* 4 iq used */
1119 /* QID-to-Port: pinned_flows, packets */
1126 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1127 ids, values, num_stats);
1129 for (i = 0; (signed int)i < ret; i++) {
1130 if (queue_expected_zero[i] != values[i]) {
1132 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1133 ", expect %"PRIu64"\n",
1134 __LINE__, i, ids[i], xstats_names[i].name,
1135 values[i], queue_expected_zero[i]);
1140 printf("%d : %d of values were not as expected above\n",
1149 rte_event_dev_dump(0, stdout);
1156 xstats_id_abuse_tests(struct test *t)
1159 const uint32_t XSTATS_MAX = 1024;
1160 const uint32_t link_port = 2;
1162 uint32_t ids[XSTATS_MAX];
1163 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1165 /* Create instance with 4 ports */
1166 if (init(t, 1, 4) < 0 ||
1167 create_ports(t, 4) < 0 ||
1168 create_atomic_qids(t, 1) < 0) {
1169 printf("%d: Error initializing device\n", __LINE__);
1173 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1175 printf("%d: error mapping lb qid\n", __LINE__);
1179 if (rte_event_dev_start(evdev) < 0) {
1180 printf("%d: Error with start call\n", __LINE__);
1184 /* no test for device, as it ignores the port/q number */
1185 int num_stats = rte_event_dev_xstats_names_get(evdev,
1186 RTE_EVENT_DEV_XSTATS_PORT,
1187 UINT8_MAX-1, xstats_names, ids,
1189 if (num_stats != 0) {
1190 printf("%d: expected %d stats, got return %d\n", __LINE__,
1195 num_stats = rte_event_dev_xstats_names_get(evdev,
1196 RTE_EVENT_DEV_XSTATS_QUEUE,
1197 UINT8_MAX-1, xstats_names, ids,
1199 if (num_stats != 0) {
1200 printf("%d: expected %d stats, got return %d\n", __LINE__,
1213 port_reconfig_credits(struct test *t)
1215 if (init(t, 1, 1) < 0) {
1216 printf("%d: Error initializing device\n", __LINE__);
1221 const uint32_t NUM_ITERS = 32;
1222 for (i = 0; i < NUM_ITERS; i++) {
1223 const struct rte_event_queue_conf conf = {
1224 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1225 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1226 .nb_atomic_flows = 1024,
1227 .nb_atomic_order_sequences = 1024,
1229 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1230 printf("%d: error creating qid\n", __LINE__);
1235 static const struct rte_event_port_conf port_conf = {
1236 .new_event_threshold = 128,
1237 .dequeue_depth = 32,
1238 .enqueue_depth = 64,
1240 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1241 printf("%d Error setting up port\n", __LINE__);
1245 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1247 printf("%d: error mapping lb qid\n", __LINE__);
1251 if (rte_event_dev_start(evdev) < 0) {
1252 printf("%d: Error with start call\n", __LINE__);
1256 const uint32_t NPKTS = 1;
1258 for (j = 0; j < NPKTS; j++) {
1259 struct rte_event ev;
1260 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1262 printf("%d: gen of pkt failed\n", __LINE__);
1265 ev.queue_id = t->qid[0];
1266 ev.op = RTE_EVENT_OP_NEW;
1268 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1270 printf("%d: Failed to enqueue\n", __LINE__);
1271 rte_event_dev_dump(0, stdout);
1276 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1278 struct rte_event ev[NPKTS];
1279 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1282 printf("%d error; no packet dequeued\n", __LINE__);
1284 /* let cleanup below stop the device on last iter */
1285 if (i != NUM_ITERS-1)
1286 rte_event_dev_stop(evdev);
1297 port_single_lb_reconfig(struct test *t)
1299 if (init(t, 2, 2) < 0) {
1300 printf("%d: Error initializing device\n", __LINE__);
1304 static const struct rte_event_queue_conf conf_lb_atomic = {
1305 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1306 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1307 .nb_atomic_flows = 1024,
1308 .nb_atomic_order_sequences = 1024,
1310 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1311 printf("%d: error creating qid\n", __LINE__);
1315 static const struct rte_event_queue_conf conf_single_link = {
1316 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1317 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1319 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1320 printf("%d: error creating qid\n", __LINE__);
1324 struct rte_event_port_conf port_conf = {
1325 .new_event_threshold = 128,
1326 .dequeue_depth = 32,
1327 .enqueue_depth = 64,
1329 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1330 printf("%d Error setting up port\n", __LINE__);
1333 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1334 printf("%d Error setting up port\n", __LINE__);
1338 /* link port to lb queue */
1339 uint8_t queue_id = 0;
1340 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1341 printf("%d: error creating link for qid\n", __LINE__);
1345 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1347 printf("%d: Error unlinking lb port\n", __LINE__);
1352 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1353 printf("%d: error creating link for qid\n", __LINE__);
1358 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1360 printf("%d: error mapping lb qid\n", __LINE__);
1364 if (rte_event_dev_start(evdev) < 0) {
1365 printf("%d: Error with start call\n", __LINE__);
1377 xstats_brute_force(struct test *t)
1380 const uint32_t XSTATS_MAX = 1024;
1381 uint32_t ids[XSTATS_MAX];
1382 uint64_t values[XSTATS_MAX];
1383 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1386 /* Create instance with 4 ports */
1387 if (init(t, 1, 4) < 0 ||
1388 create_ports(t, 4) < 0 ||
1389 create_atomic_qids(t, 1) < 0) {
1390 printf("%d: Error initializing device\n", __LINE__);
1394 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1396 printf("%d: error mapping lb qid\n", __LINE__);
1400 if (rte_event_dev_start(evdev) < 0) {
1401 printf("%d: Error with start call\n", __LINE__);
1405 for (i = 0; i < XSTATS_MAX; i++)
1408 for (i = 0; i < 3; i++) {
1409 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1411 for (j = 0; j < UINT8_MAX; j++) {
1412 rte_event_dev_xstats_names_get(evdev, mode,
1413 j, xstats_names, ids, XSTATS_MAX);
1415 rte_event_dev_xstats_get(evdev, mode, j, ids,
1416 values, XSTATS_MAX);
1428 xstats_id_reset_tests(struct test *t)
1430 const int wrk_enq = 2;
1433 /* Create instance with 4 ports */
1434 if (init(t, 1, 4) < 0 ||
1435 create_ports(t, 4) < 0 ||
1436 create_atomic_qids(t, 1) < 0) {
1437 printf("%d: Error initializing device\n", __LINE__);
1441 /* CQ mapping to QID */
1442 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1444 printf("%d: error mapping lb qid\n", __LINE__);
1448 if (rte_event_dev_start(evdev) < 0) {
1449 printf("%d: Error with start call\n", __LINE__);
1453 #define XSTATS_MAX 1024
1456 uint32_t ids[XSTATS_MAX];
1457 uint64_t values[XSTATS_MAX];
1458 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1460 for (i = 0; i < XSTATS_MAX; i++)
1463 #define NUM_DEV_STATS 6
1464 /* Device names / values */
1465 int num_stats = rte_event_dev_xstats_names_get(evdev,
1466 RTE_EVENT_DEV_XSTATS_DEVICE,
1467 0, xstats_names, ids, XSTATS_MAX);
1468 if (num_stats != NUM_DEV_STATS) {
1469 printf("%d: expected %d stats, got return %d\n", __LINE__,
1470 NUM_DEV_STATS, num_stats);
1473 ret = rte_event_dev_xstats_get(evdev,
1474 RTE_EVENT_DEV_XSTATS_DEVICE,
1475 0, ids, values, num_stats);
1476 if (ret != NUM_DEV_STATS) {
1477 printf("%d: expected %d stats, got return %d\n", __LINE__,
1478 NUM_DEV_STATS, ret);
1483 for (i = 0; i < NPKTS; i++) {
1484 struct rte_event ev;
1485 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1487 printf("%d: gen of pkt failed\n", __LINE__);
1490 ev.queue_id = t->qid[i];
1491 ev.op = RTE_EVENT_OP_NEW;
1495 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1497 printf("%d: Failed to enqueue\n", __LINE__);
1502 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1504 static const char * const dev_names[] = {
1505 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1506 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1508 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1509 for (i = 0; (int)i < ret; i++) {
1511 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1515 printf("%d: %s id incorrect, expected %d got %d\n",
1516 __LINE__, dev_names[i], i, id);
1519 if (val != dev_expected[i]) {
1520 printf("%d: %s value incorrect, expected %"
1521 PRIu64" got %d\n", __LINE__, dev_names[i],
1522 dev_expected[i], id);
1526 int reset_ret = rte_event_dev_xstats_reset(evdev,
1527 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1531 printf("%d: failed to reset successfully\n", __LINE__);
1534 dev_expected[i] = 0;
1535 /* check value again */
1536 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1537 if (val != dev_expected[i]) {
1538 printf("%d: %s value incorrect, expected %"PRIu64
1539 " got %"PRIu64"\n", __LINE__, dev_names[i],
1540 dev_expected[i], val);
1545 /* 48 is stat offset from start of the devices whole xstats.
1546 * This WILL break every time we add a statistic to a port
1547 * or the device, but there is no other way to test
1550 /* num stats for the tested port. CQ size adds more stats to a port */
1551 #define NUM_PORT_STATS 21
1552 /* the port to test. */
1554 num_stats = rte_event_dev_xstats_names_get(evdev,
1555 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1556 xstats_names, ids, XSTATS_MAX);
1557 if (num_stats != NUM_PORT_STATS) {
1558 printf("%d: expected %d stats, got return %d\n",
1559 __LINE__, NUM_PORT_STATS, num_stats);
1562 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1563 ids, values, num_stats);
1565 if (ret != NUM_PORT_STATS) {
1566 printf("%d: expected %d stats, got return %d\n",
1567 __LINE__, NUM_PORT_STATS, ret);
1570 static const char * const port_names[] = {
1575 "port_2_avg_pkt_cycles",
1577 "port_2_rx_ring_used",
1578 "port_2_rx_ring_free",
1579 "port_2_cq_ring_used",
1580 "port_2_cq_ring_free",
1581 "port_2_dequeue_calls",
1582 "port_2_dequeues_returning_0",
1583 "port_2_dequeues_returning_1-4",
1584 "port_2_dequeues_returning_5-8",
1585 "port_2_dequeues_returning_9-12",
1586 "port_2_dequeues_returning_13-16",
1587 "port_2_dequeues_returning_17-20",
1588 "port_2_dequeues_returning_21-24",
1589 "port_2_dequeues_returning_25-28",
1590 "port_2_dequeues_returning_29-32",
1591 "port_2_dequeues_returning_33-36",
1593 uint64_t port_expected[] = {
1597 NPKTS, /* inflight */
1598 0, /* avg pkt cycles */
1600 0, /* rx ring used */
1601 4096, /* rx ring free */
1602 NPKTS, /* cq ring used */
1603 25, /* cq ring free */
1604 0, /* dequeue zero calls */
1605 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1608 uint64_t port_expected_zero[] = {
1612 NPKTS, /* inflight */
1613 0, /* avg pkt cycles */
1615 0, /* rx ring used */
1616 4096, /* rx ring free */
1617 NPKTS, /* cq ring used */
1618 25, /* cq ring free */
1619 0, /* dequeue zero calls */
1620 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1623 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1624 RTE_DIM(port_names) != NUM_PORT_STATS) {
1625 printf("%d: port array of wrong size\n", __LINE__);
1630 for (i = 0; (int)i < ret; i++) {
1632 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1635 if (id != i + PORT_OFF) {
1636 printf("%d: %s id incorrect, expected %d got %d\n",
1637 __LINE__, port_names[i], i+PORT_OFF,
1641 if (val != port_expected[i]) {
1642 printf("%d: %s value incorrect, expected %"PRIu64
1643 " got %d\n", __LINE__, port_names[i],
1644 port_expected[i], id);
1648 int reset_ret = rte_event_dev_xstats_reset(evdev,
1649 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1653 printf("%d: failed to reset successfully\n", __LINE__);
1656 /* check value again */
1657 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1658 if (val != port_expected_zero[i]) {
1659 printf("%d: %s value incorrect, expected %"PRIu64
1660 " got %"PRIu64"\n", __LINE__, port_names[i],
1661 port_expected_zero[i], val);
1668 /* num queue stats */
1669 #define NUM_Q_STATS 16
1670 /* queue offset from start of the devices whole xstats.
1671 * This will break every time we add a statistic to a device/port/queue
1673 #define QUEUE_OFF 90
1674 const uint32_t queue = 0;
1675 num_stats = rte_event_dev_xstats_names_get(evdev,
1676 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1677 xstats_names, ids, XSTATS_MAX);
1678 if (num_stats != NUM_Q_STATS) {
1679 printf("%d: expected %d stats, got return %d\n",
1680 __LINE__, NUM_Q_STATS, num_stats);
1683 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1684 queue, ids, values, num_stats);
1685 if (ret != NUM_Q_STATS) {
1686 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1689 static const char * const queue_names[] = {
1698 "qid_0_port_0_pinned_flows",
1699 "qid_0_port_0_packets",
1700 "qid_0_port_1_pinned_flows",
1701 "qid_0_port_1_packets",
1702 "qid_0_port_2_pinned_flows",
1703 "qid_0_port_2_packets",
1704 "qid_0_port_3_pinned_flows",
1705 "qid_0_port_3_packets",
1707 uint64_t queue_expected[] = {
1716 /* QID-to-Port: pinned_flows, packets */
1722 uint64_t queue_expected_zero[] = {
1731 /* QID-to-Port: pinned_flows, packets */
1737 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1738 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1739 RTE_DIM(queue_names) != NUM_Q_STATS) {
1740 printf("%d : queue array of wrong size\n", __LINE__);
1745 for (i = 0; (int)i < ret; i++) {
1747 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1750 if (id != i + QUEUE_OFF) {
1751 printf("%d: %s id incorrect, expected %d got %d\n",
1752 __LINE__, queue_names[i], i+QUEUE_OFF,
1756 if (val != queue_expected[i]) {
1757 printf("%d: %d: %s value , expected %"PRIu64
1758 " got %"PRIu64"\n", i, __LINE__,
1759 queue_names[i], queue_expected[i], val);
1763 int reset_ret = rte_event_dev_xstats_reset(evdev,
1764 RTE_EVENT_DEV_XSTATS_QUEUE,
1767 printf("%d: failed to reset successfully\n", __LINE__);
1770 /* check value again */
1771 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1773 if (val != queue_expected_zero[i]) {
1774 printf("%d: %s value incorrect, expected %"PRIu64
1775 " got %"PRIu64"\n", __LINE__, queue_names[i],
1776 queue_expected_zero[i], val);
1792 ordered_reconfigure(struct test *t)
1794 if (init(t, 1, 1) < 0 ||
1795 create_ports(t, 1) < 0) {
1796 printf("%d: Error initializing device\n", __LINE__);
1800 const struct rte_event_queue_conf conf = {
1801 .schedule_type = RTE_SCHED_TYPE_ORDERED,
1802 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1803 .nb_atomic_flows = 1024,
1804 .nb_atomic_order_sequences = 1024,
1807 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1808 printf("%d: error creating qid\n", __LINE__);
1812 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1813 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1817 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1818 if (rte_event_dev_start(evdev) < 0) {
1819 printf("%d: Error with start call\n", __LINE__);
1831 qid_priorities(struct test *t)
1833 /* Test works by having a CQ with enough empty space for all packets,
1834 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1835 * priority of the QID, not the ingress order, to pass the test
1838 /* Create instance with 1 ports, and 3 qids */
1839 if (init(t, 3, 1) < 0 ||
1840 create_ports(t, 1) < 0) {
1841 printf("%d: Error initializing device\n", __LINE__);
1845 for (i = 0; i < 3; i++) {
1847 const struct rte_event_queue_conf conf = {
1848 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1849 /* increase priority (0 == highest), as we go */
1850 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1851 .nb_atomic_flows = 1024,
1852 .nb_atomic_order_sequences = 1024,
1855 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1856 printf("%d: error creating qid %d\n", __LINE__, i);
1862 /* map all QIDs to port */
1863 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1865 if (rte_event_dev_start(evdev) < 0) {
1866 printf("%d: Error with start call\n", __LINE__);
1870 /* enqueue 3 packets, setting seqn and QID to check priority */
1871 for (i = 0; i < 3; i++) {
1872 struct rte_event ev;
1873 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1875 printf("%d: gen of pkt failed\n", __LINE__);
1878 ev.queue_id = t->qid[i];
1879 ev.op = RTE_EVENT_OP_NEW;
1883 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1885 printf("%d: Failed to enqueue\n", __LINE__);
1890 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1892 /* dequeue packets, verify priority was upheld */
1893 struct rte_event ev[32];
1895 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1896 if (deq_pkts != 3) {
1897 printf("%d: failed to deq packets\n", __LINE__);
1898 rte_event_dev_dump(evdev, stdout);
1901 for (i = 0; i < 3; i++) {
1902 if (ev[i].mbuf->seqn != 2-i) {
1904 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1914 unlink_in_progress(struct test *t)
1916 /* Test unlinking API, in particular that when an unlink request has
1917 * not yet been seen by the scheduler thread, that the
1918 * unlink_in_progress() function returns the number of unlinks.
1921 /* Create instance with 1 ports, and 3 qids */
1922 if (init(t, 3, 1) < 0 ||
1923 create_ports(t, 1) < 0) {
1924 printf("%d: Error initializing device\n", __LINE__);
1928 for (i = 0; i < 3; i++) {
1930 const struct rte_event_queue_conf conf = {
1931 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
1932 /* increase priority (0 == highest), as we go */
1933 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1934 .nb_atomic_flows = 1024,
1935 .nb_atomic_order_sequences = 1024,
1938 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1939 printf("%d: error creating qid %d\n", __LINE__, i);
1945 /* map all QIDs to port */
1946 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1948 if (rte_event_dev_start(evdev) < 0) {
1949 printf("%d: Error with start call\n", __LINE__);
1953 /* unlink all ports to have outstanding unlink requests */
1954 int ret = rte_event_port_unlink(evdev, t->port[0], NULL, 0);
1956 printf("%d: Failed to unlink queues\n", __LINE__);
1960 /* get active unlinks here, expect 3 */
1961 int unlinks_in_progress =
1962 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1963 if (unlinks_in_progress != 3) {
1964 printf("%d: Expected num unlinks in progress == 3, got %d\n",
1965 __LINE__, unlinks_in_progress);
1969 /* run scheduler service on this thread to ack the unlinks */
1970 rte_service_run_iter_on_app_lcore(t->service_id, 1);
1972 /* active unlinks expected as 0 as scheduler thread has acked */
1973 unlinks_in_progress =
1974 rte_event_port_unlinks_in_progress(evdev, t->port[0]);
1975 if (unlinks_in_progress != 0) {
1976 printf("%d: Expected num unlinks in progress == 0, got %d\n",
1977 __LINE__, unlinks_in_progress);
1985 load_balancing(struct test *t)
1987 const int rx_enq = 0;
1991 if (init(t, 1, 4) < 0 ||
1992 create_ports(t, 4) < 0 ||
1993 create_atomic_qids(t, 1) < 0) {
1994 printf("%d: Error initializing device\n", __LINE__);
1998 for (i = 0; i < 3; i++) {
1999 /* map port 1 - 3 inclusive */
2000 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
2002 printf("%d: error mapping qid to port %d\n",
2008 if (rte_event_dev_start(evdev) < 0) {
2009 printf("%d: Error with start call\n", __LINE__);
2013 /************** FORWARD ****************/
2015 * Create a set of flows that test the load-balancing operation of the
2016 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
2017 * with a new flow, which should be sent to the 3rd mapped CQ
2019 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
2021 for (i = 0; i < RTE_DIM(flows); i++) {
2022 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2024 printf("%d: gen of pkt failed\n", __LINE__);
2028 struct rte_event ev = {
2029 .op = RTE_EVENT_OP_NEW,
2030 .queue_id = t->qid[0],
2031 .flow_id = flows[i],
2034 /* generate pkt and enqueue */
2035 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2037 printf("%d: Failed to enqueue\n", __LINE__);
2042 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2044 struct test_event_dev_stats stats;
2045 err = test_event_dev_stats_get(evdev, &stats);
2047 printf("%d: failed to get stats\n", __LINE__);
2051 if (stats.port_inflight[1] != 4) {
2052 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2056 if (stats.port_inflight[2] != 2) {
2057 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2061 if (stats.port_inflight[3] != 3) {
2062 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2072 load_balancing_history(struct test *t)
2074 struct test_event_dev_stats stats = {0};
2075 const int rx_enq = 0;
2079 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2080 if (init(t, 1, 4) < 0 ||
2081 create_ports(t, 4) < 0 ||
2082 create_atomic_qids(t, 1) < 0)
2085 /* CQ mapping to QID */
2086 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2087 printf("%d: error mapping port 1 qid\n", __LINE__);
2090 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2091 printf("%d: error mapping port 2 qid\n", __LINE__);
2094 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2095 printf("%d: error mapping port 3 qid\n", __LINE__);
2098 if (rte_event_dev_start(evdev) < 0) {
2099 printf("%d: Error with start call\n", __LINE__);
2104 * Create a set of flows that test the load-balancing operation of the
2105 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2106 * the packet from CQ 0, send in a new set of flows. Ensure that:
2107 * 1. The new flow 3 gets into the empty CQ0
2108 * 2. packets for existing flow gets added into CQ1
2109 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2110 * more outstanding pkts
2112 * This test makes sure that when a flow ends (i.e. all packets
2113 * have been completed for that flow), that the flow can be moved
2114 * to a different CQ when new packets come in for that flow.
2116 static uint32_t flows1[] = {0, 1, 1, 2};
2118 for (i = 0; i < RTE_DIM(flows1); i++) {
2119 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2120 struct rte_event ev = {
2121 .flow_id = flows1[i],
2122 .op = RTE_EVENT_OP_NEW,
2123 .queue_id = t->qid[0],
2124 .event_type = RTE_EVENT_TYPE_CPU,
2125 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2130 printf("%d: gen of pkt failed\n", __LINE__);
2133 arp->hash.rss = flows1[i];
2134 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2136 printf("%d: Failed to enqueue\n", __LINE__);
2141 /* call the scheduler */
2142 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2144 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2145 struct rte_event ev;
2146 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2147 printf("%d: failed to dequeue\n", __LINE__);
2150 if (ev.mbuf->hash.rss != flows1[0]) {
2151 printf("%d: unexpected flow received\n", __LINE__);
2155 /* drop the flow 0 packet from port 1 */
2156 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2158 /* call the scheduler */
2159 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2162 * Set up the next set of flows, first a new flow to fill up
2163 * CQ 0, so that the next flow 0 packet should go to CQ2
2165 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2167 for (i = 0; i < RTE_DIM(flows2); i++) {
2168 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2169 struct rte_event ev = {
2170 .flow_id = flows2[i],
2171 .op = RTE_EVENT_OP_NEW,
2172 .queue_id = t->qid[0],
2173 .event_type = RTE_EVENT_TYPE_CPU,
2174 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2179 printf("%d: gen of pkt failed\n", __LINE__);
2182 arp->hash.rss = flows2[i];
2184 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2186 printf("%d: Failed to enqueue\n", __LINE__);
2192 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2194 err = test_event_dev_stats_get(evdev, &stats);
2196 printf("%d:failed to get stats\n", __LINE__);
2201 * Now check the resulting inflights on each port.
2203 if (stats.port_inflight[1] != 3) {
2204 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2206 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2207 (unsigned int)stats.port_inflight[1],
2208 (unsigned int)stats.port_inflight[2],
2209 (unsigned int)stats.port_inflight[3]);
2212 if (stats.port_inflight[2] != 4) {
2213 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2215 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2216 (unsigned int)stats.port_inflight[1],
2217 (unsigned int)stats.port_inflight[2],
2218 (unsigned int)stats.port_inflight[3]);
2221 if (stats.port_inflight[3] != 2) {
2222 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2224 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2225 (unsigned int)stats.port_inflight[1],
2226 (unsigned int)stats.port_inflight[2],
2227 (unsigned int)stats.port_inflight[3]);
2231 for (i = 1; i <= 3; i++) {
2232 struct rte_event ev;
2233 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2234 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2236 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2243 invalid_qid(struct test *t)
2245 struct test_event_dev_stats stats;
2246 const int rx_enq = 0;
2250 if (init(t, 1, 4) < 0 ||
2251 create_ports(t, 4) < 0 ||
2252 create_atomic_qids(t, 1) < 0) {
2253 printf("%d: Error initializing device\n", __LINE__);
2257 /* CQ mapping to QID */
2258 for (i = 0; i < 4; i++) {
2259 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2262 printf("%d: error mapping port 1 qid\n", __LINE__);
2267 if (rte_event_dev_start(evdev) < 0) {
2268 printf("%d: Error with start call\n", __LINE__);
2273 * Send in a packet with an invalid qid to the scheduler.
2274 * We should see the packed enqueued OK, but the inflights for
2275 * that packet should not be incremented, and the rx_dropped
2276 * should be incremented.
2278 static uint32_t flows1[] = {20};
2280 for (i = 0; i < RTE_DIM(flows1); i++) {
2281 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2283 printf("%d: gen of pkt failed\n", __LINE__);
2287 struct rte_event ev = {
2288 .op = RTE_EVENT_OP_NEW,
2289 .queue_id = t->qid[0] + flows1[i],
2293 /* generate pkt and enqueue */
2294 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2296 printf("%d: Failed to enqueue\n", __LINE__);
2301 /* call the scheduler */
2302 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2304 err = test_event_dev_stats_get(evdev, &stats);
2306 printf("%d: failed to get stats\n", __LINE__);
2311 * Now check the resulting inflights on the port, and the rx_dropped.
2313 if (stats.port_inflight[0] != 0) {
2314 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2316 rte_event_dev_dump(evdev, stdout);
2319 if (stats.port_rx_dropped[0] != 1) {
2320 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2321 rte_event_dev_dump(evdev, stdout);
2324 /* each packet drop should only be counted in one place - port or dev */
2325 if (stats.rx_dropped != 0) {
2326 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2328 rte_event_dev_dump(evdev, stdout);
2337 single_packet(struct test *t)
2339 const uint32_t MAGIC_SEQN = 7321;
2340 struct rte_event ev;
2341 struct test_event_dev_stats stats;
2342 const int rx_enq = 0;
2343 const int wrk_enq = 2;
2346 /* Create instance with 4 ports */
2347 if (init(t, 1, 4) < 0 ||
2348 create_ports(t, 4) < 0 ||
2349 create_atomic_qids(t, 1) < 0) {
2350 printf("%d: Error initializing device\n", __LINE__);
2354 /* CQ mapping to QID */
2355 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2357 printf("%d: error mapping lb qid\n", __LINE__);
2362 if (rte_event_dev_start(evdev) < 0) {
2363 printf("%d: Error with start call\n", __LINE__);
2367 /************** Gen pkt and enqueue ****************/
2368 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2370 printf("%d: gen of pkt failed\n", __LINE__);
2374 ev.op = RTE_EVENT_OP_NEW;
2375 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2379 arp->seqn = MAGIC_SEQN;
2381 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2383 printf("%d: Failed to enqueue\n", __LINE__);
2387 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2389 err = test_event_dev_stats_get(evdev, &stats);
2391 printf("%d: failed to get stats\n", __LINE__);
2395 if (stats.rx_pkts != 1 ||
2396 stats.tx_pkts != 1 ||
2397 stats.port_inflight[wrk_enq] != 1) {
2398 printf("%d: Sched core didn't handle pkt as expected\n",
2400 rte_event_dev_dump(evdev, stdout);
2406 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2408 printf("%d: Failed to deq\n", __LINE__);
2412 err = test_event_dev_stats_get(evdev, &stats);
2414 printf("%d: failed to get stats\n", __LINE__);
2418 err = test_event_dev_stats_get(evdev, &stats);
2419 if (ev.mbuf->seqn != MAGIC_SEQN) {
2420 printf("%d: magic sequence number not dequeued\n", __LINE__);
2424 rte_pktmbuf_free(ev.mbuf);
2425 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2427 printf("%d: Failed to enqueue\n", __LINE__);
2430 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2432 err = test_event_dev_stats_get(evdev, &stats);
2433 if (stats.port_inflight[wrk_enq] != 0) {
2434 printf("%d: port inflight not correct\n", __LINE__);
2443 inflight_counts(struct test *t)
2445 struct rte_event ev;
2446 struct test_event_dev_stats stats;
2447 const int rx_enq = 0;
2453 /* Create instance with 4 ports */
2454 if (init(t, 2, 3) < 0 ||
2455 create_ports(t, 3) < 0 ||
2456 create_atomic_qids(t, 2) < 0) {
2457 printf("%d: Error initializing device\n", __LINE__);
2461 /* CQ mapping to QID */
2462 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2464 printf("%d: error mapping lb qid\n", __LINE__);
2468 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2470 printf("%d: error mapping lb qid\n", __LINE__);
2475 if (rte_event_dev_start(evdev) < 0) {
2476 printf("%d: Error with start call\n", __LINE__);
2480 /************** FORWARD ****************/
2482 for (i = 0; i < QID1_NUM; i++) {
2483 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2486 printf("%d: gen of pkt failed\n", __LINE__);
2490 ev.queue_id = t->qid[0];
2491 ev.op = RTE_EVENT_OP_NEW;
2493 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2495 printf("%d: Failed to enqueue\n", __LINE__);
2500 for (i = 0; i < QID2_NUM; i++) {
2501 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2504 printf("%d: gen of pkt failed\n", __LINE__);
2507 ev.queue_id = t->qid[1];
2508 ev.op = RTE_EVENT_OP_NEW;
2510 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2512 printf("%d: Failed to enqueue\n", __LINE__);
2518 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2520 err = test_event_dev_stats_get(evdev, &stats);
2522 printf("%d: failed to get stats\n", __LINE__);
2526 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2527 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2528 printf("%d: Sched core didn't handle pkt as expected\n",
2533 if (stats.port_inflight[p1] != QID1_NUM) {
2534 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2538 if (stats.port_inflight[p2] != QID2_NUM) {
2539 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2544 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2546 struct rte_event events[QID1_NUM + QID2_NUM];
2547 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2548 RTE_DIM(events), 0);
2550 if (deq_pkts != QID1_NUM) {
2551 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2554 err = test_event_dev_stats_get(evdev, &stats);
2555 if (stats.port_inflight[p1] != QID1_NUM) {
2556 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2560 for (i = 0; i < QID1_NUM; i++) {
2561 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2564 printf("%d: %s rte enqueue of inf release failed\n",
2565 __LINE__, __func__);
2571 * As the scheduler core decrements inflights, it needs to run to
2572 * process packets to act on the drop messages
2574 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2576 err = test_event_dev_stats_get(evdev, &stats);
2577 if (stats.port_inflight[p1] != 0) {
2578 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2583 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2584 RTE_DIM(events), 0);
2585 if (deq_pkts != QID2_NUM) {
2586 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2589 err = test_event_dev_stats_get(evdev, &stats);
2590 if (stats.port_inflight[p2] != QID2_NUM) {
2591 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2595 for (i = 0; i < QID2_NUM; i++) {
2596 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2599 printf("%d: %s rte enqueue of inf release failed\n",
2600 __LINE__, __func__);
2606 * As the scheduler core decrements inflights, it needs to run to
2607 * process packets to act on the drop messages
2609 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2611 err = test_event_dev_stats_get(evdev, &stats);
2612 if (stats.port_inflight[p2] != 0) {
2613 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2620 rte_event_dev_dump(evdev, stdout);
2626 parallel_basic(struct test *t, int check_order)
2628 const uint8_t rx_port = 0;
2629 const uint8_t w1_port = 1;
2630 const uint8_t w3_port = 3;
2631 const uint8_t tx_port = 4;
2634 uint32_t deq_pkts, j;
2635 struct rte_mbuf *mbufs[3];
2636 struct rte_mbuf *mbufs_out[3] = { 0 };
2637 const uint32_t MAGIC_SEQN = 1234;
2639 /* Create instance with 4 ports */
2640 if (init(t, 2, tx_port + 1) < 0 ||
2641 create_ports(t, tx_port + 1) < 0 ||
2642 (check_order ? create_ordered_qids(t, 1) :
2643 create_unordered_qids(t, 1)) < 0 ||
2644 create_directed_qids(t, 1, &tx_port)) {
2645 printf("%d: Error initializing device\n", __LINE__);
2651 * We need three ports, all mapped to the same ordered qid0. Then we'll
2652 * take a packet out to each port, re-enqueue in reverse order,
2653 * then make sure the reordering has taken place properly when we
2654 * dequeue from the tx_port.
2656 * Simplified test setup diagram:
2660 * qid0 - w2_port - qid1
2664 /* CQ mapping to QID for LB ports (directed mapped on create) */
2665 for (i = w1_port; i <= w3_port; i++) {
2666 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2669 printf("%d: error mapping lb qid\n", __LINE__);
2675 if (rte_event_dev_start(evdev) < 0) {
2676 printf("%d: Error with start call\n", __LINE__);
2680 /* Enqueue 3 packets to the rx port */
2681 for (i = 0; i < 3; i++) {
2682 struct rte_event ev;
2683 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2685 printf("%d: gen of pkt failed\n", __LINE__);
2689 ev.queue_id = t->qid[0];
2690 ev.op = RTE_EVENT_OP_NEW;
2692 mbufs[i]->seqn = MAGIC_SEQN + i;
2694 /* generate pkt and enqueue */
2695 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2697 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2703 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2705 /* use extra slot to make logic in loops easier */
2706 struct rte_event deq_ev[w3_port + 1];
2708 /* Dequeue the 3 packets, one from each worker port */
2709 for (i = w1_port; i <= w3_port; i++) {
2710 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2712 if (deq_pkts != 1) {
2713 printf("%d: Failed to deq\n", __LINE__);
2714 rte_event_dev_dump(evdev, stdout);
2719 /* Enqueue each packet in reverse order, flushing after each one */
2720 for (i = w3_port; i >= w1_port; i--) {
2722 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2723 deq_ev[i].queue_id = t->qid[1];
2724 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2726 printf("%d: Failed to enqueue\n", __LINE__);
2730 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2732 /* dequeue from the tx ports, we should get 3 packets */
2733 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2736 /* Check to see if we've got all 3 packets */
2737 if (deq_pkts != 3) {
2738 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2739 __LINE__, deq_pkts, tx_port);
2740 rte_event_dev_dump(evdev, stdout);
2744 /* Check to see if the sequence numbers are in expected order */
2746 for (j = 0 ; j < deq_pkts ; j++) {
2747 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2749 "%d: Incorrect sequence number(%d) from port %d\n",
2750 __LINE__, mbufs_out[j]->seqn, tx_port);
2756 /* Destroy the instance */
2762 ordered_basic(struct test *t)
2764 return parallel_basic(t, 1);
2768 unordered_basic(struct test *t)
2770 return parallel_basic(t, 0);
2774 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2776 const struct rte_event new_ev = {
2777 .op = RTE_EVENT_OP_NEW
2778 /* all other fields zero */
2780 struct rte_event ev = new_ev;
2781 unsigned int rx_port = 0; /* port we get the first flow on */
2782 char rx_port_used_stat[64];
2783 char rx_port_free_stat[64];
2784 char other_port_used_stat[64];
2786 if (init(t, 1, 2) < 0 ||
2787 create_ports(t, 2) < 0 ||
2788 create_atomic_qids(t, 1) < 0) {
2789 printf("%d: Error initializing device\n", __LINE__);
2792 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2793 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2795 printf("%d: Error links queue to ports\n", __LINE__);
2798 if (rte_event_dev_start(evdev) < 0) {
2799 printf("%d: Error with start call\n", __LINE__);
2803 /* send one packet and see where it goes, port 0 or 1 */
2804 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2805 printf("%d: Error doing first enqueue\n", __LINE__);
2808 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2810 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2814 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2815 "port_%u_cq_ring_used", rx_port);
2816 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2817 "port_%u_cq_ring_free", rx_port);
2818 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2819 "port_%u_cq_ring_used", rx_port ^ 1);
2820 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2822 printf("%d: Error, first event not scheduled\n", __LINE__);
2826 /* now fill up the rx port's queue with one flow to cause HOLB */
2829 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2830 printf("%d: Error with enqueue\n", __LINE__);
2833 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2834 } while (rte_event_dev_xstats_by_name_get(evdev,
2835 rx_port_free_stat, NULL) != 0);
2837 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2839 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2840 printf("%d: Error with enqueue\n", __LINE__);
2843 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2845 /* check that the other port still has an empty CQ */
2846 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2848 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2851 /* check IQ now has one packet */
2852 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2854 printf("%d: Error, QID does not have exactly 1 packet\n",
2859 /* send another flow, which should pass the other IQ entry */
2862 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2863 printf("%d: Error with enqueue\n", __LINE__);
2866 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2868 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2870 printf("%d: Error, second flow did not pass out first\n",
2875 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2877 printf("%d: Error, QID does not have exactly 1 packet\n",
2884 rte_event_dev_dump(evdev, stdout);
2890 flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
2892 *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
2896 dev_stop_flush(struct test *t) /* test to check we can properly flush events */
2898 const struct rte_event new_ev = {
2899 .op = RTE_EVENT_OP_NEW,
2903 struct rte_event ev = new_ev;
2907 if (init(t, 1, 1) < 0 ||
2908 create_ports(t, 1) < 0 ||
2909 create_atomic_qids(t, 1) < 0) {
2910 printf("%d: Error initializing device\n", __LINE__);
2914 /* Link the queue so *_start() doesn't error out */
2915 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
2916 printf("%d: Error linking queue to port\n", __LINE__);
2920 if (rte_event_dev_start(evdev) < 0) {
2921 printf("%d: Error with start call\n", __LINE__);
2925 for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
2926 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2927 printf("%d: Error enqueuing events\n", __LINE__);
2932 /* Schedule the events from the port to the IQ. At least one event
2933 * should be remaining in the queue.
2935 rte_service_run_iter_on_app_lcore(t->service_id, 1);
2937 if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
2938 printf("%d: Error installing the flush callback\n", __LINE__);
2945 printf("%d: Error executing the flush callback\n", __LINE__);
2949 if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
2950 printf("%d: Error uninstalling the flush callback\n", __LINE__);
2956 rte_event_dev_dump(evdev, stdout);
2962 worker_loopback_worker_fn(void *arg)
2964 struct test *t = arg;
2965 uint8_t port = t->port[1];
2970 * Takes packets from the input port and then loops them back through
2971 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2972 * so each packet goes through 8*16 = 128 times.
2974 printf("%d: \tWorker function started\n", __LINE__);
2975 while (count < NUM_PACKETS) {
2976 #define BURST_SIZE 32
2977 struct rte_event ev[BURST_SIZE];
2978 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2985 for (i = 0; i < nb_rx; i++) {
2987 if (ev[i].queue_id != 8) {
2988 ev[i].op = RTE_EVENT_OP_FORWARD;
2989 enqd = rte_event_enqueue_burst(evdev, port,
2992 printf("%d: Can't enqueue FWD!!\n",
3000 (*counter_field(ev[i].mbuf))++;
3001 if (*counter_field(ev[i].mbuf) != 16) {
3002 ev[i].op = RTE_EVENT_OP_FORWARD;
3003 enqd = rte_event_enqueue_burst(evdev, port,
3006 printf("%d: Can't enqueue FWD!!\n",
3012 /* we have hit 16 iterations through system - drop */
3013 rte_pktmbuf_free(ev[i].mbuf);
3015 ev[i].op = RTE_EVENT_OP_RELEASE;
3016 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
3018 printf("%d drop enqueue failed\n", __LINE__);
3028 worker_loopback_producer_fn(void *arg)
3030 struct test *t = arg;
3031 uint8_t port = t->port[0];
3034 printf("%d: \tProducer function started\n", __LINE__);
3035 while (count < NUM_PACKETS) {
3036 struct rte_mbuf *m = 0;
3038 m = rte_pktmbuf_alloc(t->mbuf_pool);
3039 } while (m == NULL);
3041 *counter_field(m) = 0;
3043 struct rte_event ev = {
3044 .op = RTE_EVENT_OP_NEW,
3045 .queue_id = t->qid[0],
3046 .flow_id = (uintptr_t)m & 0xFFFF,
3050 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
3051 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
3063 worker_loopback(struct test *t, uint8_t disable_implicit_release)
3065 /* use a single producer core, and a worker core to see what happens
3066 * if the worker loops packets back multiple times
3068 struct test_event_dev_stats stats;
3069 uint64_t print_cycles = 0, cycles = 0;
3070 uint64_t tx_pkts = 0;
3072 int w_lcore, p_lcore;
3074 static const struct rte_mbuf_dynfield counter_dynfield_desc = {
3075 .name = "rte_event_sw_dynfield_selftest_counter",
3076 .size = sizeof(counter_dynfield_t),
3077 .align = __alignof__(counter_dynfield_t),
3079 counter_dynfield_offset =
3080 rte_mbuf_dynfield_register(&counter_dynfield_desc);
3081 if (counter_dynfield_offset < 0) {
3082 printf("Error registering mbuf field\n");
3086 if (init(t, 8, 2) < 0 ||
3087 create_atomic_qids(t, 8) < 0) {
3088 printf("%d: Error initializing device\n", __LINE__);
3092 /* RX with low max events */
3093 static struct rte_event_port_conf conf = {
3094 .dequeue_depth = 32,
3095 .enqueue_depth = 64,
3097 /* beware: this cannot be initialized in the static above as it would
3098 * only be initialized once - and this needs to be set for multiple runs
3100 conf.new_event_threshold = 512;
3101 conf.event_port_cfg = disable_implicit_release ?
3102 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL : 0;
3104 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
3105 printf("Error setting up RX port\n");
3109 /* TX with higher max events */
3110 conf.new_event_threshold = 4096;
3111 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
3112 printf("Error setting up TX port\n");
3117 /* CQ mapping to QID */
3118 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
3119 if (err != 8) { /* should have mapped all queues*/
3120 printf("%d: error mapping port 2 to all qids\n", __LINE__);
3124 if (rte_event_dev_start(evdev) < 0) {
3125 printf("%d: Error with start call\n", __LINE__);
3129 p_lcore = rte_get_next_lcore(
3130 /* start core */ -1,
3133 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3135 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3136 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3138 print_cycles = cycles = rte_get_timer_cycles();
3139 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3140 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3142 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3144 uint64_t new_cycles = rte_get_timer_cycles();
3146 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3147 test_event_dev_stats_get(evdev, &stats);
3149 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3150 __LINE__, stats.rx_pkts, stats.tx_pkts);
3152 print_cycles = new_cycles;
3154 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3155 test_event_dev_stats_get(evdev, &stats);
3156 if (stats.tx_pkts == tx_pkts) {
3157 rte_event_dev_dump(evdev, stdout);
3158 printf("Dumping xstats:\n");
3161 "%d: No schedules for seconds, deadlock\n",
3165 tx_pkts = stats.tx_pkts;
3166 cycles = new_cycles;
3169 rte_service_run_iter_on_app_lcore(t->service_id, 1);
3170 /* ensure all completions are flushed */
3172 rte_eal_mp_wait_lcore();
3178 static struct rte_mempool *eventdev_func_mempool;
3181 test_sw_eventdev(void)
3186 t = malloc(sizeof(struct test));
3189 /* manually initialize the op, older gcc's complain on static
3190 * initialization of struct elements that are a bitfield.
3192 release_ev.op = RTE_EVENT_OP_RELEASE;
3194 const char *eventdev_name = "event_sw";
3195 evdev = rte_event_dev_get_dev_id(eventdev_name);
3197 printf("%d: Eventdev %s not found - creating.\n",
3198 __LINE__, eventdev_name);
3199 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3200 printf("Error creating eventdev\n");
3203 evdev = rte_event_dev_get_dev_id(eventdev_name);
3205 printf("Error finding newly created eventdev\n");
3210 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) {
3211 printf("Failed to get service ID for software event dev\n");
3215 rte_service_runstate_set(t->service_id, 1);
3216 rte_service_set_runstate_mapped_check(t->service_id, 0);
3218 /* Only create mbuf pool once, reuse for each test run */
3219 if (!eventdev_func_mempool) {
3220 eventdev_func_mempool = rte_pktmbuf_pool_create(
3221 "EVENTDEV_SW_SA_MBUF_POOL",
3222 (1<<12), /* 4k buffers */
3223 32 /*MBUF_CACHE_SIZE*/,
3225 512, /* use very small mbufs */
3227 if (!eventdev_func_mempool) {
3228 printf("ERROR creating mempool\n");
3232 t->mbuf_pool = eventdev_func_mempool;
3233 printf("*** Running Single Directed Packet test...\n");
3234 ret = test_single_directed_packet(t);
3236 printf("ERROR - Single Directed Packet test FAILED.\n");
3239 printf("*** Running Directed Forward Credit test...\n");
3240 ret = test_directed_forward_credits(t);
3242 printf("ERROR - Directed Forward Credit test FAILED.\n");
3245 printf("*** Running Single Load Balanced Packet test...\n");
3246 ret = single_packet(t);
3248 printf("ERROR - Single Packet test FAILED.\n");
3251 printf("*** Running Unordered Basic test...\n");
3252 ret = unordered_basic(t);
3254 printf("ERROR - Unordered Basic test FAILED.\n");
3257 printf("*** Running Ordered Basic test...\n");
3258 ret = ordered_basic(t);
3260 printf("ERROR - Ordered Basic test FAILED.\n");
3263 printf("*** Running Burst Packets test...\n");
3264 ret = burst_packets(t);
3266 printf("ERROR - Burst Packets test FAILED.\n");
3269 printf("*** Running Load Balancing test...\n");
3270 ret = load_balancing(t);
3272 printf("ERROR - Load Balancing test FAILED.\n");
3275 printf("*** Running Prioritized Directed test...\n");
3276 ret = test_priority_directed(t);
3278 printf("ERROR - Prioritized Directed test FAILED.\n");
3281 printf("*** Running Prioritized Atomic test...\n");
3282 ret = test_priority_atomic(t);
3284 printf("ERROR - Prioritized Atomic test FAILED.\n");
3288 printf("*** Running Prioritized Ordered test...\n");
3289 ret = test_priority_ordered(t);
3291 printf("ERROR - Prioritized Ordered test FAILED.\n");
3294 printf("*** Running Prioritized Unordered test...\n");
3295 ret = test_priority_unordered(t);
3297 printf("ERROR - Prioritized Unordered test FAILED.\n");
3300 printf("*** Running Invalid QID test...\n");
3301 ret = invalid_qid(t);
3303 printf("ERROR - Invalid QID test FAILED.\n");
3306 printf("*** Running Load Balancing History test...\n");
3307 ret = load_balancing_history(t);
3309 printf("ERROR - Load Balancing History test FAILED.\n");
3312 printf("*** Running Inflight Count test...\n");
3313 ret = inflight_counts(t);
3315 printf("ERROR - Inflight Count test FAILED.\n");
3318 printf("*** Running Abuse Inflights test...\n");
3319 ret = abuse_inflights(t);
3321 printf("ERROR - Abuse Inflights test FAILED.\n");
3324 printf("*** Running XStats test...\n");
3325 ret = xstats_tests(t);
3327 printf("ERROR - XStats test FAILED.\n");
3330 printf("*** Running XStats ID Reset test...\n");
3331 ret = xstats_id_reset_tests(t);
3333 printf("ERROR - XStats ID Reset test FAILED.\n");
3336 printf("*** Running XStats Brute Force test...\n");
3337 ret = xstats_brute_force(t);
3339 printf("ERROR - XStats Brute Force test FAILED.\n");
3342 printf("*** Running XStats ID Abuse test...\n");
3343 ret = xstats_id_abuse_tests(t);
3345 printf("ERROR - XStats ID Abuse test FAILED.\n");
3348 printf("*** Running QID Priority test...\n");
3349 ret = qid_priorities(t);
3351 printf("ERROR - QID Priority test FAILED.\n");
3354 printf("*** Running Unlink-in-progress test...\n");
3355 ret = unlink_in_progress(t);
3357 printf("ERROR - Unlink in progress test FAILED.\n");
3360 printf("*** Running Ordered Reconfigure test...\n");
3361 ret = ordered_reconfigure(t);
3363 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3366 printf("*** Running Port LB Single Reconfig test...\n");
3367 ret = port_single_lb_reconfig(t);
3369 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3372 printf("*** Running Port Reconfig Credits test...\n");
3373 ret = port_reconfig_credits(t);
3375 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3378 printf("*** Running Head-of-line-blocking test...\n");
3381 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3384 printf("*** Running Stop Flush test...\n");
3385 ret = dev_stop_flush(t);
3387 printf("ERROR - Stop Flush test FAILED.\n");
3390 if (rte_lcore_count() >= 3) {
3391 printf("*** Running Worker loopback test...\n");
3392 ret = worker_loopback(t, 0);
3394 printf("ERROR - Worker loopback test FAILED.\n");
3398 printf("*** Running Worker loopback test (implicit release disabled)...\n");
3399 ret = worker_loopback(t, 1);
3401 printf("ERROR - Worker loopback test FAILED.\n");
3405 printf("### Not enough cores for worker loopback tests.\n");
3406 printf("### Need at least 3 cores for the tests.\n");
3410 * Free test instance, leaving mempool initialized, and a pointer to it
3411 * in static eventdev_func_mempool, as it is re-used on re-runs
3415 printf("SW Eventdev Selftest Successful.\n");
3419 printf("SW Eventdev Selftest Failed.\n");