4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
51 #include <rte_eventdev.h>
56 #define NUM_PACKETS (1<<18)
61 struct rte_mempool *mbuf_pool;
62 uint8_t port[MAX_PORTS];
63 uint8_t qid[MAX_QIDS];
67 static struct rte_event release_ev;
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
74 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
76 static const uint8_t arp_request[] = {
77 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 int pkt_len = sizeof(arp_request) - 1;
89 m = rte_pktmbuf_alloc(mp);
93 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94 arp_request, pkt_len);
95 rte_pktmbuf_pkt_len(m) = pkt_len;
96 rte_pktmbuf_data_len(m) = pkt_len;
103 /* initialization and config */
105 init(struct test *t, int nb_queues, int nb_ports)
107 struct rte_event_dev_config config = {
108 .nb_event_queues = nb_queues,
109 .nb_event_ports = nb_ports,
110 .nb_event_queue_flows = 1024,
111 .nb_events_limit = 4096,
112 .nb_event_port_dequeue_depth = 128,
113 .nb_event_port_enqueue_depth = 128,
117 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
119 memset(t, 0, sizeof(*t));
122 ret = rte_event_dev_configure(evdev, &config);
124 printf("%d: Error configuring device\n", __LINE__);
129 create_ports(struct test *t, int num_ports)
132 static const struct rte_event_port_conf conf = {
133 .new_event_threshold = 1024,
137 if (num_ports > MAX_PORTS)
140 for (i = 0; i < num_ports; i++) {
141 if (rte_event_port_setup(evdev, i, &conf) < 0) {
142 printf("Error setting up port %d\n", i);
152 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
157 const struct rte_event_queue_conf conf = {
158 .event_queue_cfg = flags,
159 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
160 .nb_atomic_flows = 1024,
161 .nb_atomic_order_sequences = 1024,
164 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
165 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
166 printf("%d: error creating qid %d\n", __LINE__, i);
171 t->nb_qids += num_qids;
172 if (t->nb_qids > MAX_QIDS)
179 create_atomic_qids(struct test *t, int num_qids)
181 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
185 create_ordered_qids(struct test *t, int num_qids)
187 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
192 create_unordered_qids(struct test *t, int num_qids)
194 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
198 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
203 static const struct rte_event_queue_conf conf = {
204 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
205 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
206 .nb_atomic_flows = 1024,
207 .nb_atomic_order_sequences = 1024,
210 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
211 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
212 printf("%d: error creating qid %d\n", __LINE__, i);
217 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
218 &t->qid[i], NULL, 1) != 1) {
219 printf("%d: error creating link for qid %d\n",
224 t->nb_qids += num_qids;
225 if (t->nb_qids > MAX_QIDS)
233 cleanup(struct test *t __rte_unused)
235 rte_event_dev_stop(evdev);
236 rte_event_dev_close(evdev);
240 struct test_event_dev_stats {
241 uint64_t rx_pkts; /**< Total packets received */
242 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
243 uint64_t tx_pkts; /**< Total packets transmitted */
245 /** Packets received on this port */
246 uint64_t port_rx_pkts[MAX_PORTS];
247 /** Packets dropped on this port */
248 uint64_t port_rx_dropped[MAX_PORTS];
249 /** Packets inflight on this port */
250 uint64_t port_inflight[MAX_PORTS];
251 /** Packets transmitted on this port */
252 uint64_t port_tx_pkts[MAX_PORTS];
253 /** Packets received on this qid */
254 uint64_t qid_rx_pkts[MAX_QIDS];
255 /** Packets dropped on this qid */
256 uint64_t qid_rx_dropped[MAX_QIDS];
257 /** Packets transmitted on this qid */
258 uint64_t qid_tx_pkts[MAX_QIDS];
262 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
265 static uint32_t total_ids[3]; /* rx, tx and drop */
266 static uint32_t port_rx_pkts_ids[MAX_PORTS];
267 static uint32_t port_rx_dropped_ids[MAX_PORTS];
268 static uint32_t port_inflight_ids[MAX_PORTS];
269 static uint32_t port_tx_pkts_ids[MAX_PORTS];
270 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
271 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
272 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
275 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
276 "dev_rx", &total_ids[0]);
277 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
278 "dev_drop", &total_ids[1]);
279 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
280 "dev_tx", &total_ids[2]);
281 for (i = 0; i < MAX_PORTS; i++) {
283 snprintf(name, sizeof(name), "port_%u_rx", i);
284 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
285 dev_id, name, &port_rx_pkts_ids[i]);
286 snprintf(name, sizeof(name), "port_%u_drop", i);
287 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
288 dev_id, name, &port_rx_dropped_ids[i]);
289 snprintf(name, sizeof(name), "port_%u_inflight", i);
290 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
291 dev_id, name, &port_inflight_ids[i]);
292 snprintf(name, sizeof(name), "port_%u_tx", i);
293 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
294 dev_id, name, &port_tx_pkts_ids[i]);
296 for (i = 0; i < MAX_QIDS; i++) {
298 snprintf(name, sizeof(name), "qid_%u_rx", i);
299 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
300 dev_id, name, &qid_rx_pkts_ids[i]);
301 snprintf(name, sizeof(name), "qid_%u_drop", i);
302 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
303 dev_id, name, &qid_rx_dropped_ids[i]);
304 snprintf(name, sizeof(name), "qid_%u_tx", i);
305 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
306 dev_id, name, &qid_tx_pkts_ids[i]);
312 /* run_prio_packet_test
313 * This performs a basic packet priority check on the test instance passed in.
314 * It is factored out of the main priority tests as the same tests must be
315 * performed to ensure prioritization of each type of QID.
318 * - An initialized test structure, including mempool
319 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
320 * - t->qid[0] is the QID to be tested
321 * - if LB QID, the CQ must be mapped to the QID.
324 run_prio_packet_test(struct test *t)
327 const uint32_t MAGIC_SEQN[] = {4711, 1234};
328 const uint32_t PRIORITY[] = {
329 RTE_EVENT_DEV_PRIORITY_NORMAL,
330 RTE_EVENT_DEV_PRIORITY_HIGHEST
333 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
334 /* generate pkt and enqueue */
336 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
338 printf("%d: gen of pkt failed\n", __LINE__);
341 arp->seqn = MAGIC_SEQN[i];
343 ev = (struct rte_event){
344 .priority = PRIORITY[i],
345 .op = RTE_EVENT_OP_NEW,
346 .queue_id = t->qid[0],
349 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
351 printf("%d: error failed to enqueue\n", __LINE__);
356 rte_event_schedule(evdev);
358 struct test_event_dev_stats stats;
359 err = test_event_dev_stats_get(evdev, &stats);
361 printf("%d: error failed to get stats\n", __LINE__);
365 if (stats.port_rx_pkts[t->port[0]] != 2) {
366 printf("%d: error stats incorrect for directed port\n",
368 rte_event_dev_dump(evdev, stdout);
372 struct rte_event ev, ev2;
374 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
376 printf("%d: error failed to deq\n", __LINE__);
377 rte_event_dev_dump(evdev, stdout);
380 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
381 printf("%d: first packet out not highest priority\n",
383 rte_event_dev_dump(evdev, stdout);
386 rte_pktmbuf_free(ev.mbuf);
388 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
390 printf("%d: error failed to deq\n", __LINE__);
391 rte_event_dev_dump(evdev, stdout);
394 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
395 printf("%d: second packet out not lower priority\n",
397 rte_event_dev_dump(evdev, stdout);
400 rte_pktmbuf_free(ev2.mbuf);
407 test_single_directed_packet(struct test *t)
409 const int rx_enq = 0;
410 const int wrk_enq = 2;
413 /* Create instance with 3 directed QIDs going to 3 ports */
414 if (init(t, 3, 3) < 0 ||
415 create_ports(t, 3) < 0 ||
416 create_directed_qids(t, 3, t->port) < 0)
419 if (rte_event_dev_start(evdev) < 0) {
420 printf("%d: Error with start call\n", __LINE__);
424 /************** FORWARD ****************/
425 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
426 struct rte_event ev = {
427 .op = RTE_EVENT_OP_NEW,
433 printf("%d: gen of pkt failed\n", __LINE__);
437 const uint32_t MAGIC_SEQN = 4711;
438 arp->seqn = MAGIC_SEQN;
440 /* generate pkt and enqueue */
441 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
443 printf("%d: error failed to enqueue\n", __LINE__);
447 /* Run schedule() as dir packets may need to be re-ordered */
448 rte_event_schedule(evdev);
450 struct test_event_dev_stats stats;
451 err = test_event_dev_stats_get(evdev, &stats);
453 printf("%d: error failed to get stats\n", __LINE__);
457 if (stats.port_rx_pkts[rx_enq] != 1) {
458 printf("%d: error stats incorrect for directed port\n",
464 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
466 printf("%d: error failed to deq\n", __LINE__);
470 err = test_event_dev_stats_get(evdev, &stats);
471 if (stats.port_rx_pkts[wrk_enq] != 0 &&
472 stats.port_rx_pkts[wrk_enq] != 1) {
473 printf("%d: error directed stats post-dequeue\n", __LINE__);
477 if (ev.mbuf->seqn != MAGIC_SEQN) {
478 printf("%d: error magic sequence number not dequeued\n",
483 rte_pktmbuf_free(ev.mbuf);
490 test_priority_directed(struct test *t)
492 if (init(t, 1, 1) < 0 ||
493 create_ports(t, 1) < 0 ||
494 create_directed_qids(t, 1, t->port) < 0) {
495 printf("%d: Error initializing device\n", __LINE__);
499 if (rte_event_dev_start(evdev) < 0) {
500 printf("%d: Error with start call\n", __LINE__);
504 return run_prio_packet_test(t);
508 test_priority_atomic(struct test *t)
510 if (init(t, 1, 1) < 0 ||
511 create_ports(t, 1) < 0 ||
512 create_atomic_qids(t, 1) < 0) {
513 printf("%d: Error initializing device\n", __LINE__);
518 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
519 printf("%d: error mapping qid to port\n", __LINE__);
522 if (rte_event_dev_start(evdev) < 0) {
523 printf("%d: Error with start call\n", __LINE__);
527 return run_prio_packet_test(t);
531 test_priority_ordered(struct test *t)
533 if (init(t, 1, 1) < 0 ||
534 create_ports(t, 1) < 0 ||
535 create_ordered_qids(t, 1) < 0) {
536 printf("%d: Error initializing device\n", __LINE__);
541 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
542 printf("%d: error mapping qid to port\n", __LINE__);
545 if (rte_event_dev_start(evdev) < 0) {
546 printf("%d: Error with start call\n", __LINE__);
550 return run_prio_packet_test(t);
554 test_priority_unordered(struct test *t)
556 if (init(t, 1, 1) < 0 ||
557 create_ports(t, 1) < 0 ||
558 create_unordered_qids(t, 1) < 0) {
559 printf("%d: Error initializing device\n", __LINE__);
564 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
565 printf("%d: error mapping qid to port\n", __LINE__);
568 if (rte_event_dev_start(evdev) < 0) {
569 printf("%d: Error with start call\n", __LINE__);
573 return run_prio_packet_test(t);
577 burst_packets(struct test *t)
579 /************** CONFIG ****************/
584 /* Create instance with 2 ports and 2 queues */
585 if (init(t, 2, 2) < 0 ||
586 create_ports(t, 2) < 0 ||
587 create_atomic_qids(t, 2) < 0) {
588 printf("%d: Error initializing device\n", __LINE__);
592 /* CQ mapping to QID */
593 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
595 printf("%d: error mapping lb qid0\n", __LINE__);
598 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
600 printf("%d: error mapping lb qid1\n", __LINE__);
604 if (rte_event_dev_start(evdev) < 0) {
605 printf("%d: Error with start call\n", __LINE__);
609 /************** FORWARD ****************/
610 const uint32_t rx_port = 0;
611 const uint32_t NUM_PKTS = 2;
613 for (i = 0; i < NUM_PKTS; i++) {
614 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
616 printf("%d: error generating pkt\n", __LINE__);
620 struct rte_event ev = {
621 .op = RTE_EVENT_OP_NEW,
626 /* generate pkt and enqueue */
627 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
629 printf("%d: Failed to enqueue\n", __LINE__);
633 rte_event_schedule(evdev);
635 /* Check stats for all NUM_PKTS arrived to sched core */
636 struct test_event_dev_stats stats;
638 err = test_event_dev_stats_get(evdev, &stats);
640 printf("%d: failed to get stats\n", __LINE__);
643 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
644 printf("%d: Sched core didn't receive all %d pkts\n",
646 rte_event_dev_dump(evdev, stdout);
654 /******** DEQ QID 1 *******/
657 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
659 rte_pktmbuf_free(ev.mbuf);
662 if (deq_pkts != NUM_PKTS/2) {
663 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
668 /******** DEQ QID 2 *******/
672 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
674 rte_pktmbuf_free(ev.mbuf);
676 if (deq_pkts != NUM_PKTS/2) {
677 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
687 abuse_inflights(struct test *t)
689 const int rx_enq = 0;
690 const int wrk_enq = 2;
693 /* Create instance with 4 ports */
694 if (init(t, 1, 4) < 0 ||
695 create_ports(t, 4) < 0 ||
696 create_atomic_qids(t, 1) < 0) {
697 printf("%d: Error initializing device\n", __LINE__);
701 /* CQ mapping to QID */
702 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
704 printf("%d: error mapping lb qid\n", __LINE__);
709 if (rte_event_dev_start(evdev) < 0) {
710 printf("%d: Error with start call\n", __LINE__);
714 /* Enqueue op only */
715 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
717 printf("%d: Failed to enqueue\n", __LINE__);
722 rte_event_schedule(evdev);
724 struct test_event_dev_stats stats;
726 err = test_event_dev_stats_get(evdev, &stats);
728 printf("%d: failed to get stats\n", __LINE__);
732 if (stats.rx_pkts != 0 ||
733 stats.tx_pkts != 0 ||
734 stats.port_inflight[wrk_enq] != 0) {
735 printf("%d: Sched core didn't handle pkt as expected\n",
745 xstats_tests(struct test *t)
747 const int wrk_enq = 2;
750 /* Create instance with 4 ports */
751 if (init(t, 1, 4) < 0 ||
752 create_ports(t, 4) < 0 ||
753 create_atomic_qids(t, 1) < 0) {
754 printf("%d: Error initializing device\n", __LINE__);
758 /* CQ mapping to QID */
759 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
761 printf("%d: error mapping lb qid\n", __LINE__);
766 if (rte_event_dev_start(evdev) < 0) {
767 printf("%d: Error with start call\n", __LINE__);
771 const uint32_t XSTATS_MAX = 1024;
774 uint32_t ids[XSTATS_MAX];
775 uint64_t values[XSTATS_MAX];
776 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
778 for (i = 0; i < XSTATS_MAX; i++)
781 /* Device names / values */
782 int ret = rte_event_dev_xstats_names_get(evdev,
783 RTE_EVENT_DEV_XSTATS_DEVICE,
784 0, xstats_names, ids, XSTATS_MAX);
786 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
789 ret = rte_event_dev_xstats_get(evdev,
790 RTE_EVENT_DEV_XSTATS_DEVICE,
791 0, ids, values, ret);
793 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
797 /* Port names / values */
798 ret = rte_event_dev_xstats_names_get(evdev,
799 RTE_EVENT_DEV_XSTATS_PORT, 0,
800 xstats_names, ids, XSTATS_MAX);
802 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
805 ret = rte_event_dev_xstats_get(evdev,
806 RTE_EVENT_DEV_XSTATS_PORT, 0,
809 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
813 /* Queue names / values */
814 ret = rte_event_dev_xstats_names_get(evdev,
815 RTE_EVENT_DEV_XSTATS_QUEUE,
816 0, xstats_names, ids, XSTATS_MAX);
818 printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
822 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
823 ret = rte_event_dev_xstats_get(evdev,
824 RTE_EVENT_DEV_XSTATS_QUEUE,
825 1, ids, values, ret);
826 if (ret != -EINVAL) {
827 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
831 ret = rte_event_dev_xstats_get(evdev,
832 RTE_EVENT_DEV_XSTATS_QUEUE,
833 0, ids, values, ret);
835 printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
839 /* enqueue packets to check values */
840 for (i = 0; i < 3; i++) {
842 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
844 printf("%d: gen of pkt failed\n", __LINE__);
847 ev.queue_id = t->qid[i];
848 ev.op = RTE_EVENT_OP_NEW;
853 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
855 printf("%d: Failed to enqueue\n", __LINE__);
860 rte_event_schedule(evdev);
862 /* Device names / values */
863 int num_stats = rte_event_dev_xstats_names_get(evdev,
864 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
865 xstats_names, ids, XSTATS_MAX);
868 ret = rte_event_dev_xstats_get(evdev,
869 RTE_EVENT_DEV_XSTATS_DEVICE,
870 0, ids, values, num_stats);
871 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
872 for (i = 0; (signed int)i < ret; i++) {
873 if (expected[i] != values[i]) {
875 "%d Error xstat %d (id %d) %s : %"PRIu64
876 ", expect %"PRIu64"\n",
877 __LINE__, i, ids[i], xstats_names[i].name,
878 values[i], expected[i]);
883 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
886 /* ensure reset statistics are zero-ed */
887 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
888 ret = rte_event_dev_xstats_get(evdev,
889 RTE_EVENT_DEV_XSTATS_DEVICE,
890 0, ids, values, num_stats);
891 for (i = 0; (signed int)i < ret; i++) {
892 if (expected_zero[i] != values[i]) {
894 "%d Error, xstat %d (id %d) %s : %"PRIu64
895 ", expect %"PRIu64"\n",
896 __LINE__, i, ids[i], xstats_names[i].name,
897 values[i], expected_zero[i]);
902 /* port reset checks */
903 num_stats = rte_event_dev_xstats_names_get(evdev,
904 RTE_EVENT_DEV_XSTATS_PORT, 0,
905 xstats_names, ids, XSTATS_MAX);
908 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
909 0, ids, values, num_stats);
911 static const uint64_t port_expected[] = {
916 0 /* avg pkt cycles */,
918 0 /* rx ring used */,
919 4096 /* rx ring free */,
920 0 /* cq ring used */,
921 32 /* cq ring free */,
922 0 /* dequeue calls */,
923 /* 10 dequeue burst buckets */
927 if (ret != RTE_DIM(port_expected)) {
929 "%s %d: wrong number of port stats (%d), expected %zu\n",
930 __func__, __LINE__, ret, RTE_DIM(port_expected));
933 for (i = 0; (signed int)i < ret; i++) {
934 if (port_expected[i] != values[i]) {
936 "%s : %d: Error stat %s is %"PRIu64
937 ", expected %"PRIu64"\n",
938 __func__, __LINE__, xstats_names[i].name,
939 values[i], port_expected[i]);
944 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
947 /* ensure reset statistics are zero-ed */
948 static const uint64_t port_expected_zero[] = {
953 0 /* avg pkt cycles */,
955 0 /* rx ring used */,
956 4096 /* rx ring free */,
957 0 /* cq ring used */,
958 32 /* cq ring free */,
959 0 /* dequeue calls */,
960 /* 10 dequeue burst buckets */
964 ret = rte_event_dev_xstats_get(evdev,
965 RTE_EVENT_DEV_XSTATS_PORT,
966 0, ids, values, num_stats);
967 for (i = 0; (signed int)i < ret; i++) {
968 if (port_expected_zero[i] != values[i]) {
970 "%d, Error, xstat %d (id %d) %s : %"PRIu64
971 ", expect %"PRIu64"\n",
972 __LINE__, i, ids[i], xstats_names[i].name,
973 values[i], port_expected_zero[i]);
978 /* QUEUE STATS TESTS */
979 num_stats = rte_event_dev_xstats_names_get(evdev,
980 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
981 xstats_names, ids, XSTATS_MAX);
982 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
983 0, ids, values, num_stats);
985 printf("xstats get returned %d\n", ret);
988 if ((unsigned int)ret > XSTATS_MAX)
989 printf("%s %d: more xstats available than space\n",
992 static const uint64_t queue_expected[] = {
998 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
999 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
1001 for (i = 0; (signed int)i < ret; i++) {
1002 if (queue_expected[i] != values[i]) {
1004 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1005 ", expect %"PRIu64"\n",
1006 __LINE__, i, ids[i], xstats_names[i].name,
1007 values[i], queue_expected[i]);
1012 /* Reset the queue stats here */
1013 ret = rte_event_dev_xstats_reset(evdev,
1014 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1018 /* Verify that the resetable stats are reset, and others are not */
1019 static const uint64_t queue_expected_zero[] = {
1025 0, 0, 0, 0, /* 4 iq used */
1026 0, 0, 1, 0, /* qid to port pinned flows */
1029 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1030 ids, values, num_stats);
1032 for (i = 0; (signed int)i < ret; i++) {
1033 if (queue_expected_zero[i] != values[i]) {
1035 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1036 ", expect %"PRIu64"\n",
1037 __LINE__, i, ids[i], xstats_names[i].name,
1038 values[i], queue_expected_zero[i]);
1043 printf("%d : %d of values were not as expected above\n",
1052 rte_event_dev_dump(0, stdout);
1059 xstats_id_abuse_tests(struct test *t)
1062 const uint32_t XSTATS_MAX = 1024;
1063 const uint32_t link_port = 2;
1065 uint32_t ids[XSTATS_MAX];
1066 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1068 /* Create instance with 4 ports */
1069 if (init(t, 1, 4) < 0 ||
1070 create_ports(t, 4) < 0 ||
1071 create_atomic_qids(t, 1) < 0) {
1072 printf("%d: Error initializing device\n", __LINE__);
1076 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1078 printf("%d: error mapping lb qid\n", __LINE__);
1082 if (rte_event_dev_start(evdev) < 0) {
1083 printf("%d: Error with start call\n", __LINE__);
1087 /* no test for device, as it ignores the port/q number */
1088 int num_stats = rte_event_dev_xstats_names_get(evdev,
1089 RTE_EVENT_DEV_XSTATS_PORT,
1090 UINT8_MAX-1, xstats_names, ids,
1092 if (num_stats != 0) {
1093 printf("%d: expected %d stats, got return %d\n", __LINE__,
1098 num_stats = rte_event_dev_xstats_names_get(evdev,
1099 RTE_EVENT_DEV_XSTATS_QUEUE,
1100 UINT8_MAX-1, xstats_names, ids,
1102 if (num_stats != 0) {
1103 printf("%d: expected %d stats, got return %d\n", __LINE__,
1116 port_reconfig_credits(struct test *t)
1118 if (init(t, 1, 1) < 0) {
1119 printf("%d: Error initializing device\n", __LINE__);
1124 const uint32_t NUM_ITERS = 32;
1125 for (i = 0; i < NUM_ITERS; i++) {
1126 const struct rte_event_queue_conf conf = {
1127 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1128 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1129 .nb_atomic_flows = 1024,
1130 .nb_atomic_order_sequences = 1024,
1132 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1133 printf("%d: error creating qid\n", __LINE__);
1138 static const struct rte_event_port_conf port_conf = {
1139 .new_event_threshold = 128,
1140 .dequeue_depth = 32,
1141 .enqueue_depth = 64,
1143 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1144 printf("%d Error setting up port\n", __LINE__);
1148 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1150 printf("%d: error mapping lb qid\n", __LINE__);
1154 if (rte_event_dev_start(evdev) < 0) {
1155 printf("%d: Error with start call\n", __LINE__);
1159 const uint32_t NPKTS = 1;
1161 for (j = 0; j < NPKTS; j++) {
1162 struct rte_event ev;
1163 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1165 printf("%d: gen of pkt failed\n", __LINE__);
1168 ev.queue_id = t->qid[0];
1169 ev.op = RTE_EVENT_OP_NEW;
1171 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1173 printf("%d: Failed to enqueue\n", __LINE__);
1174 rte_event_dev_dump(0, stdout);
1179 rte_event_schedule(evdev);
1181 struct rte_event ev[NPKTS];
1182 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1185 printf("%d error; no packet dequeued\n", __LINE__);
1187 /* let cleanup below stop the device on last iter */
1188 if (i != NUM_ITERS-1)
1189 rte_event_dev_stop(evdev);
1200 port_single_lb_reconfig(struct test *t)
1202 if (init(t, 2, 2) < 0) {
1203 printf("%d: Error initializing device\n", __LINE__);
1207 static const struct rte_event_queue_conf conf_lb_atomic = {
1208 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1209 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1210 .nb_atomic_flows = 1024,
1211 .nb_atomic_order_sequences = 1024,
1213 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1214 printf("%d: error creating qid\n", __LINE__);
1218 static const struct rte_event_queue_conf conf_single_link = {
1219 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1220 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1221 .nb_atomic_flows = 1024,
1222 .nb_atomic_order_sequences = 1024,
1224 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1225 printf("%d: error creating qid\n", __LINE__);
1229 struct rte_event_port_conf port_conf = {
1230 .new_event_threshold = 128,
1231 .dequeue_depth = 32,
1232 .enqueue_depth = 64,
1234 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1235 printf("%d Error setting up port\n", __LINE__);
1238 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1239 printf("%d Error setting up port\n", __LINE__);
1243 /* link port to lb queue */
1244 uint8_t queue_id = 0;
1245 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1246 printf("%d: error creating link for qid\n", __LINE__);
1250 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1252 printf("%d: Error unlinking lb port\n", __LINE__);
1257 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1258 printf("%d: error creating link for qid\n", __LINE__);
1263 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1265 printf("%d: error mapping lb qid\n", __LINE__);
1269 if (rte_event_dev_start(evdev) < 0) {
1270 printf("%d: Error with start call\n", __LINE__);
1282 xstats_brute_force(struct test *t)
1285 const uint32_t XSTATS_MAX = 1024;
1286 uint32_t ids[XSTATS_MAX];
1287 uint64_t values[XSTATS_MAX];
1288 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1291 /* Create instance with 4 ports */
1292 if (init(t, 1, 4) < 0 ||
1293 create_ports(t, 4) < 0 ||
1294 create_atomic_qids(t, 1) < 0) {
1295 printf("%d: Error initializing device\n", __LINE__);
1299 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1301 printf("%d: error mapping lb qid\n", __LINE__);
1305 if (rte_event_dev_start(evdev) < 0) {
1306 printf("%d: Error with start call\n", __LINE__);
1310 for (i = 0; i < XSTATS_MAX; i++)
1313 for (i = 0; i < 3; i++) {
1314 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1316 for (j = 0; j < UINT8_MAX; j++) {
1317 rte_event_dev_xstats_names_get(evdev, mode,
1318 j, xstats_names, ids, XSTATS_MAX);
1320 rte_event_dev_xstats_get(evdev, mode, j, ids,
1321 values, XSTATS_MAX);
1333 xstats_id_reset_tests(struct test *t)
1335 const int wrk_enq = 2;
1338 /* Create instance with 4 ports */
1339 if (init(t, 1, 4) < 0 ||
1340 create_ports(t, 4) < 0 ||
1341 create_atomic_qids(t, 1) < 0) {
1342 printf("%d: Error initializing device\n", __LINE__);
1346 /* CQ mapping to QID */
1347 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1349 printf("%d: error mapping lb qid\n", __LINE__);
1353 if (rte_event_dev_start(evdev) < 0) {
1354 printf("%d: Error with start call\n", __LINE__);
1358 #define XSTATS_MAX 1024
1361 uint32_t ids[XSTATS_MAX];
1362 uint64_t values[XSTATS_MAX];
1363 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1365 for (i = 0; i < XSTATS_MAX; i++)
1368 #define NUM_DEV_STATS 6
1369 /* Device names / values */
1370 int num_stats = rte_event_dev_xstats_names_get(evdev,
1371 RTE_EVENT_DEV_XSTATS_DEVICE,
1372 0, xstats_names, ids, XSTATS_MAX);
1373 if (num_stats != NUM_DEV_STATS) {
1374 printf("%d: expected %d stats, got return %d\n", __LINE__,
1375 NUM_DEV_STATS, num_stats);
1378 ret = rte_event_dev_xstats_get(evdev,
1379 RTE_EVENT_DEV_XSTATS_DEVICE,
1380 0, ids, values, num_stats);
1381 if (ret != NUM_DEV_STATS) {
1382 printf("%d: expected %d stats, got return %d\n", __LINE__,
1383 NUM_DEV_STATS, ret);
1388 for (i = 0; i < NPKTS; i++) {
1389 struct rte_event ev;
1390 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1392 printf("%d: gen of pkt failed\n", __LINE__);
1395 ev.queue_id = t->qid[i];
1396 ev.op = RTE_EVENT_OP_NEW;
1400 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1402 printf("%d: Failed to enqueue\n", __LINE__);
1407 rte_event_schedule(evdev);
1409 static const char * const dev_names[] = {
1410 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1411 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1413 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1414 for (i = 0; (int)i < ret; i++) {
1416 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1420 printf("%d: %s id incorrect, expected %d got %d\n",
1421 __LINE__, dev_names[i], i, id);
1424 if (val != dev_expected[i]) {
1425 printf("%d: %s value incorrect, expected %"
1426 PRIu64" got %d\n", __LINE__, dev_names[i],
1427 dev_expected[i], id);
1431 int reset_ret = rte_event_dev_xstats_reset(evdev,
1432 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1436 printf("%d: failed to reset successfully\n", __LINE__);
1439 dev_expected[i] = 0;
1440 /* check value again */
1441 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1442 if (val != dev_expected[i]) {
1443 printf("%d: %s value incorrect, expected %"PRIu64
1444 " got %"PRIu64"\n", __LINE__, dev_names[i],
1445 dev_expected[i], val);
1450 /* 48 is stat offset from start of the devices whole xstats.
1451 * This WILL break every time we add a statistic to a port
1452 * or the device, but there is no other way to test
1455 /* num stats for the tested port. CQ size adds more stats to a port */
1456 #define NUM_PORT_STATS 21
1457 /* the port to test. */
1459 num_stats = rte_event_dev_xstats_names_get(evdev,
1460 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1461 xstats_names, ids, XSTATS_MAX);
1462 if (num_stats != NUM_PORT_STATS) {
1463 printf("%d: expected %d stats, got return %d\n",
1464 __LINE__, NUM_PORT_STATS, num_stats);
1467 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1468 ids, values, num_stats);
1470 if (ret != NUM_PORT_STATS) {
1471 printf("%d: expected %d stats, got return %d\n",
1472 __LINE__, NUM_PORT_STATS, ret);
1475 static const char * const port_names[] = {
1480 "port_2_avg_pkt_cycles",
1482 "port_2_rx_ring_used",
1483 "port_2_rx_ring_free",
1484 "port_2_cq_ring_used",
1485 "port_2_cq_ring_free",
1486 "port_2_dequeue_calls",
1487 "port_2_dequeues_returning_0",
1488 "port_2_dequeues_returning_1-4",
1489 "port_2_dequeues_returning_5-8",
1490 "port_2_dequeues_returning_9-12",
1491 "port_2_dequeues_returning_13-16",
1492 "port_2_dequeues_returning_17-20",
1493 "port_2_dequeues_returning_21-24",
1494 "port_2_dequeues_returning_25-28",
1495 "port_2_dequeues_returning_29-32",
1496 "port_2_dequeues_returning_33-36",
1498 uint64_t port_expected[] = {
1502 NPKTS, /* inflight */
1503 0, /* avg pkt cycles */
1505 0, /* rx ring used */
1506 4096, /* rx ring free */
1507 NPKTS, /* cq ring used */
1508 25, /* cq ring free */
1509 0, /* dequeue zero calls */
1510 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1513 uint64_t port_expected_zero[] = {
1517 NPKTS, /* inflight */
1518 0, /* avg pkt cycles */
1520 0, /* rx ring used */
1521 4096, /* rx ring free */
1522 NPKTS, /* cq ring used */
1523 25, /* cq ring free */
1524 0, /* dequeue zero calls */
1525 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1528 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1529 RTE_DIM(port_names) != NUM_PORT_STATS) {
1530 printf("%d: port array of wrong size\n", __LINE__);
1535 for (i = 0; (int)i < ret; i++) {
1537 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1540 if (id != i + PORT_OFF) {
1541 printf("%d: %s id incorrect, expected %d got %d\n",
1542 __LINE__, port_names[i], i+PORT_OFF,
1546 if (val != port_expected[i]) {
1547 printf("%d: %s value incorrect, expected %"PRIu64
1548 " got %d\n", __LINE__, port_names[i],
1549 port_expected[i], id);
1553 int reset_ret = rte_event_dev_xstats_reset(evdev,
1554 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1558 printf("%d: failed to reset successfully\n", __LINE__);
1561 /* check value again */
1562 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1563 if (val != port_expected_zero[i]) {
1564 printf("%d: %s value incorrect, expected %"PRIu64
1565 " got %"PRIu64"\n", __LINE__, port_names[i],
1566 port_expected_zero[i], val);
1573 /* num queue stats */
1574 #define NUM_Q_STATS 13
1575 /* queue offset from start of the devices whole xstats.
1576 * This will break every time we add a statistic to a device/port/queue
1578 #define QUEUE_OFF 90
1579 const uint32_t queue = 0;
1580 num_stats = rte_event_dev_xstats_names_get(evdev,
1581 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1582 xstats_names, ids, XSTATS_MAX);
1583 if (num_stats != NUM_Q_STATS) {
1584 printf("%d: expected %d stats, got return %d\n",
1585 __LINE__, NUM_Q_STATS, num_stats);
1588 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1589 queue, ids, values, num_stats);
1590 if (ret != NUM_Q_STATS) {
1591 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1594 static const char * const queue_names[] = {
1604 "qid_0_port_0_pinned_flows",
1605 "qid_0_port_1_pinned_flows",
1606 "qid_0_port_2_pinned_flows",
1607 "qid_0_port_3_pinned_flows",
1609 uint64_t queue_expected[] = {
1619 0, /* qid 0 port 0 pinned flows */
1620 0, /* qid 0 port 1 pinned flows */
1621 1, /* qid 0 port 2 pinned flows */
1622 0, /* qid 0 port 4 pinned flows */
1624 uint64_t queue_expected_zero[] = {
1634 0, /* qid 0 port 0 pinned flows */
1635 0, /* qid 0 port 1 pinned flows */
1636 1, /* qid 0 port 2 pinned flows */
1637 0, /* qid 0 port 4 pinned flows */
1639 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1640 RTE_DIM(queue_names) != NUM_Q_STATS) {
1641 printf("%d : queue array of wrong size\n", __LINE__);
1646 for (i = 0; (int)i < ret; i++) {
1648 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1651 if (id != i + QUEUE_OFF) {
1652 printf("%d: %s id incorrect, expected %d got %d\n",
1653 __LINE__, queue_names[i], i+QUEUE_OFF,
1657 if (val != queue_expected[i]) {
1658 printf("%d: %s value incorrect, expected %"PRIu64
1659 " got %d\n", __LINE__, queue_names[i],
1660 queue_expected[i], id);
1664 int reset_ret = rte_event_dev_xstats_reset(evdev,
1665 RTE_EVENT_DEV_XSTATS_QUEUE,
1668 printf("%d: failed to reset successfully\n", __LINE__);
1671 /* check value again */
1672 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1674 if (val != queue_expected_zero[i]) {
1675 printf("%d: %s value incorrect, expected %"PRIu64
1676 " got %"PRIu64"\n", __LINE__, queue_names[i],
1677 queue_expected_zero[i], val);
1693 ordered_reconfigure(struct test *t)
1695 if (init(t, 1, 1) < 0 ||
1696 create_ports(t, 1) < 0) {
1697 printf("%d: Error initializing device\n", __LINE__);
1701 const struct rte_event_queue_conf conf = {
1702 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
1703 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1704 .nb_atomic_flows = 1024,
1705 .nb_atomic_order_sequences = 1024,
1708 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1709 printf("%d: error creating qid\n", __LINE__);
1713 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1714 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1718 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1719 if (rte_event_dev_start(evdev) < 0) {
1720 printf("%d: Error with start call\n", __LINE__);
1732 qid_priorities(struct test *t)
1734 /* Test works by having a CQ with enough empty space for all packets,
1735 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1736 * priority of the QID, not the ingress order, to pass the test
1739 /* Create instance with 1 ports, and 3 qids */
1740 if (init(t, 3, 1) < 0 ||
1741 create_ports(t, 1) < 0) {
1742 printf("%d: Error initializing device\n", __LINE__);
1746 for (i = 0; i < 3; i++) {
1748 const struct rte_event_queue_conf conf = {
1749 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1750 /* increase priority (0 == highest), as we go */
1751 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1752 .nb_atomic_flows = 1024,
1753 .nb_atomic_order_sequences = 1024,
1756 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1757 printf("%d: error creating qid %d\n", __LINE__, i);
1763 /* map all QIDs to port */
1764 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1766 if (rte_event_dev_start(evdev) < 0) {
1767 printf("%d: Error with start call\n", __LINE__);
1771 /* enqueue 3 packets, setting seqn and QID to check priority */
1772 for (i = 0; i < 3; i++) {
1773 struct rte_event ev;
1774 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1776 printf("%d: gen of pkt failed\n", __LINE__);
1779 ev.queue_id = t->qid[i];
1780 ev.op = RTE_EVENT_OP_NEW;
1784 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1786 printf("%d: Failed to enqueue\n", __LINE__);
1791 rte_event_schedule(evdev);
1793 /* dequeue packets, verify priority was upheld */
1794 struct rte_event ev[32];
1796 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1797 if (deq_pkts != 3) {
1798 printf("%d: failed to deq packets\n", __LINE__);
1799 rte_event_dev_dump(evdev, stdout);
1802 for (i = 0; i < 3; i++) {
1803 if (ev[i].mbuf->seqn != 2-i) {
1805 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1815 load_balancing(struct test *t)
1817 const int rx_enq = 0;
1821 if (init(t, 1, 4) < 0 ||
1822 create_ports(t, 4) < 0 ||
1823 create_atomic_qids(t, 1) < 0) {
1824 printf("%d: Error initializing device\n", __LINE__);
1828 for (i = 0; i < 3; i++) {
1829 /* map port 1 - 3 inclusive */
1830 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1832 printf("%d: error mapping qid to port %d\n",
1838 if (rte_event_dev_start(evdev) < 0) {
1839 printf("%d: Error with start call\n", __LINE__);
1843 /************** FORWARD ****************/
1845 * Create a set of flows that test the load-balancing operation of the
1846 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1847 * with a new flow, which should be sent to the 3rd mapped CQ
1849 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1851 for (i = 0; i < RTE_DIM(flows); i++) {
1852 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1854 printf("%d: gen of pkt failed\n", __LINE__);
1858 struct rte_event ev = {
1859 .op = RTE_EVENT_OP_NEW,
1860 .queue_id = t->qid[0],
1861 .flow_id = flows[i],
1864 /* generate pkt and enqueue */
1865 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1867 printf("%d: Failed to enqueue\n", __LINE__);
1872 rte_event_schedule(evdev);
1874 struct test_event_dev_stats stats;
1875 err = test_event_dev_stats_get(evdev, &stats);
1877 printf("%d: failed to get stats\n", __LINE__);
1881 if (stats.port_inflight[1] != 4) {
1882 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1886 if (stats.port_inflight[2] != 2) {
1887 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1891 if (stats.port_inflight[3] != 3) {
1892 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1902 load_balancing_history(struct test *t)
1904 struct test_event_dev_stats stats = {0};
1905 const int rx_enq = 0;
1909 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
1910 if (init(t, 1, 4) < 0 ||
1911 create_ports(t, 4) < 0 ||
1912 create_atomic_qids(t, 1) < 0)
1915 /* CQ mapping to QID */
1916 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
1917 printf("%d: error mapping port 1 qid\n", __LINE__);
1920 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
1921 printf("%d: error mapping port 2 qid\n", __LINE__);
1924 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
1925 printf("%d: error mapping port 3 qid\n", __LINE__);
1928 if (rte_event_dev_start(evdev) < 0) {
1929 printf("%d: Error with start call\n", __LINE__);
1934 * Create a set of flows that test the load-balancing operation of the
1935 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
1936 * the packet from CQ 0, send in a new set of flows. Ensure that:
1937 * 1. The new flow 3 gets into the empty CQ0
1938 * 2. packets for existing flow gets added into CQ1
1939 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
1940 * more outstanding pkts
1942 * This test makes sure that when a flow ends (i.e. all packets
1943 * have been completed for that flow), that the flow can be moved
1944 * to a different CQ when new packets come in for that flow.
1946 static uint32_t flows1[] = {0, 1, 1, 2};
1948 for (i = 0; i < RTE_DIM(flows1); i++) {
1949 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1950 struct rte_event ev = {
1951 .flow_id = flows1[i],
1952 .op = RTE_EVENT_OP_NEW,
1953 .queue_id = t->qid[0],
1954 .event_type = RTE_EVENT_TYPE_CPU,
1955 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1960 printf("%d: gen of pkt failed\n", __LINE__);
1963 arp->hash.rss = flows1[i];
1964 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1966 printf("%d: Failed to enqueue\n", __LINE__);
1971 /* call the scheduler */
1972 rte_event_schedule(evdev);
1974 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
1975 struct rte_event ev;
1976 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
1977 printf("%d: failed to dequeue\n", __LINE__);
1980 if (ev.mbuf->hash.rss != flows1[0]) {
1981 printf("%d: unexpected flow received\n", __LINE__);
1985 /* drop the flow 0 packet from port 1 */
1986 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
1988 /* call the scheduler */
1989 rte_event_schedule(evdev);
1992 * Set up the next set of flows, first a new flow to fill up
1993 * CQ 0, so that the next flow 0 packet should go to CQ2
1995 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
1997 for (i = 0; i < RTE_DIM(flows2); i++) {
1998 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1999 struct rte_event ev = {
2000 .flow_id = flows2[i],
2001 .op = RTE_EVENT_OP_NEW,
2002 .queue_id = t->qid[0],
2003 .event_type = RTE_EVENT_TYPE_CPU,
2004 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2009 printf("%d: gen of pkt failed\n", __LINE__);
2012 arp->hash.rss = flows2[i];
2014 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2016 printf("%d: Failed to enqueue\n", __LINE__);
2022 rte_event_schedule(evdev);
2024 err = test_event_dev_stats_get(evdev, &stats);
2026 printf("%d:failed to get stats\n", __LINE__);
2031 * Now check the resulting inflights on each port.
2033 if (stats.port_inflight[1] != 3) {
2034 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2036 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2037 (unsigned int)stats.port_inflight[1],
2038 (unsigned int)stats.port_inflight[2],
2039 (unsigned int)stats.port_inflight[3]);
2042 if (stats.port_inflight[2] != 4) {
2043 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2045 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2046 (unsigned int)stats.port_inflight[1],
2047 (unsigned int)stats.port_inflight[2],
2048 (unsigned int)stats.port_inflight[3]);
2051 if (stats.port_inflight[3] != 2) {
2052 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2054 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2055 (unsigned int)stats.port_inflight[1],
2056 (unsigned int)stats.port_inflight[2],
2057 (unsigned int)stats.port_inflight[3]);
2061 for (i = 1; i <= 3; i++) {
2062 struct rte_event ev;
2063 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2064 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2066 rte_event_schedule(evdev);
2073 invalid_qid(struct test *t)
2075 struct test_event_dev_stats stats;
2076 const int rx_enq = 0;
2080 if (init(t, 1, 4) < 0 ||
2081 create_ports(t, 4) < 0 ||
2082 create_atomic_qids(t, 1) < 0) {
2083 printf("%d: Error initializing device\n", __LINE__);
2087 /* CQ mapping to QID */
2088 for (i = 0; i < 4; i++) {
2089 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2092 printf("%d: error mapping port 1 qid\n", __LINE__);
2097 if (rte_event_dev_start(evdev) < 0) {
2098 printf("%d: Error with start call\n", __LINE__);
2103 * Send in a packet with an invalid qid to the scheduler.
2104 * We should see the packed enqueued OK, but the inflights for
2105 * that packet should not be incremented, and the rx_dropped
2106 * should be incremented.
2108 static uint32_t flows1[] = {20};
2110 for (i = 0; i < RTE_DIM(flows1); i++) {
2111 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2113 printf("%d: gen of pkt failed\n", __LINE__);
2117 struct rte_event ev = {
2118 .op = RTE_EVENT_OP_NEW,
2119 .queue_id = t->qid[0] + flows1[i],
2123 /* generate pkt and enqueue */
2124 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2126 printf("%d: Failed to enqueue\n", __LINE__);
2131 /* call the scheduler */
2132 rte_event_schedule(evdev);
2134 err = test_event_dev_stats_get(evdev, &stats);
2136 printf("%d: failed to get stats\n", __LINE__);
2141 * Now check the resulting inflights on the port, and the rx_dropped.
2143 if (stats.port_inflight[0] != 0) {
2144 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2146 rte_event_dev_dump(evdev, stdout);
2149 if (stats.port_rx_dropped[0] != 1) {
2150 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2151 rte_event_dev_dump(evdev, stdout);
2154 /* each packet drop should only be counted in one place - port or dev */
2155 if (stats.rx_dropped != 0) {
2156 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2158 rte_event_dev_dump(evdev, stdout);
2167 single_packet(struct test *t)
2169 const uint32_t MAGIC_SEQN = 7321;
2170 struct rte_event ev;
2171 struct test_event_dev_stats stats;
2172 const int rx_enq = 0;
2173 const int wrk_enq = 2;
2176 /* Create instance with 4 ports */
2177 if (init(t, 1, 4) < 0 ||
2178 create_ports(t, 4) < 0 ||
2179 create_atomic_qids(t, 1) < 0) {
2180 printf("%d: Error initializing device\n", __LINE__);
2184 /* CQ mapping to QID */
2185 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2187 printf("%d: error mapping lb qid\n", __LINE__);
2192 if (rte_event_dev_start(evdev) < 0) {
2193 printf("%d: Error with start call\n", __LINE__);
2197 /************** Gen pkt and enqueue ****************/
2198 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2200 printf("%d: gen of pkt failed\n", __LINE__);
2204 ev.op = RTE_EVENT_OP_NEW;
2205 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2209 arp->seqn = MAGIC_SEQN;
2211 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2213 printf("%d: Failed to enqueue\n", __LINE__);
2217 rte_event_schedule(evdev);
2219 err = test_event_dev_stats_get(evdev, &stats);
2221 printf("%d: failed to get stats\n", __LINE__);
2225 if (stats.rx_pkts != 1 ||
2226 stats.tx_pkts != 1 ||
2227 stats.port_inflight[wrk_enq] != 1) {
2228 printf("%d: Sched core didn't handle pkt as expected\n",
2230 rte_event_dev_dump(evdev, stdout);
2236 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2238 printf("%d: Failed to deq\n", __LINE__);
2242 err = test_event_dev_stats_get(evdev, &stats);
2244 printf("%d: failed to get stats\n", __LINE__);
2248 err = test_event_dev_stats_get(evdev, &stats);
2249 if (ev.mbuf->seqn != MAGIC_SEQN) {
2250 printf("%d: magic sequence number not dequeued\n", __LINE__);
2254 rte_pktmbuf_free(ev.mbuf);
2255 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2257 printf("%d: Failed to enqueue\n", __LINE__);
2260 rte_event_schedule(evdev);
2262 err = test_event_dev_stats_get(evdev, &stats);
2263 if (stats.port_inflight[wrk_enq] != 0) {
2264 printf("%d: port inflight not correct\n", __LINE__);
2273 inflight_counts(struct test *t)
2275 struct rte_event ev;
2276 struct test_event_dev_stats stats;
2277 const int rx_enq = 0;
2283 /* Create instance with 4 ports */
2284 if (init(t, 2, 3) < 0 ||
2285 create_ports(t, 3) < 0 ||
2286 create_atomic_qids(t, 2) < 0) {
2287 printf("%d: Error initializing device\n", __LINE__);
2291 /* CQ mapping to QID */
2292 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2294 printf("%d: error mapping lb qid\n", __LINE__);
2298 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2300 printf("%d: error mapping lb qid\n", __LINE__);
2305 if (rte_event_dev_start(evdev) < 0) {
2306 printf("%d: Error with start call\n", __LINE__);
2310 /************** FORWARD ****************/
2312 for (i = 0; i < QID1_NUM; i++) {
2313 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2316 printf("%d: gen of pkt failed\n", __LINE__);
2320 ev.queue_id = t->qid[0];
2321 ev.op = RTE_EVENT_OP_NEW;
2323 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2325 printf("%d: Failed to enqueue\n", __LINE__);
2330 for (i = 0; i < QID2_NUM; i++) {
2331 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2334 printf("%d: gen of pkt failed\n", __LINE__);
2337 ev.queue_id = t->qid[1];
2338 ev.op = RTE_EVENT_OP_NEW;
2340 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2342 printf("%d: Failed to enqueue\n", __LINE__);
2348 rte_event_schedule(evdev);
2350 err = test_event_dev_stats_get(evdev, &stats);
2352 printf("%d: failed to get stats\n", __LINE__);
2356 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2357 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2358 printf("%d: Sched core didn't handle pkt as expected\n",
2363 if (stats.port_inflight[p1] != QID1_NUM) {
2364 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2368 if (stats.port_inflight[p2] != QID2_NUM) {
2369 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2374 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2376 struct rte_event events[QID1_NUM + QID2_NUM];
2377 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2378 RTE_DIM(events), 0);
2380 if (deq_pkts != QID1_NUM) {
2381 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2384 err = test_event_dev_stats_get(evdev, &stats);
2385 if (stats.port_inflight[p1] != QID1_NUM) {
2386 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2390 for (i = 0; i < QID1_NUM; i++) {
2391 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2394 printf("%d: %s rte enqueue of inf release failed\n",
2395 __LINE__, __func__);
2401 * As the scheduler core decrements inflights, it needs to run to
2402 * process packets to act on the drop messages
2404 rte_event_schedule(evdev);
2406 err = test_event_dev_stats_get(evdev, &stats);
2407 if (stats.port_inflight[p1] != 0) {
2408 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2413 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2414 RTE_DIM(events), 0);
2415 if (deq_pkts != QID2_NUM) {
2416 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2419 err = test_event_dev_stats_get(evdev, &stats);
2420 if (stats.port_inflight[p2] != QID2_NUM) {
2421 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2425 for (i = 0; i < QID2_NUM; i++) {
2426 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2429 printf("%d: %s rte enqueue of inf release failed\n",
2430 __LINE__, __func__);
2436 * As the scheduler core decrements inflights, it needs to run to
2437 * process packets to act on the drop messages
2439 rte_event_schedule(evdev);
2441 err = test_event_dev_stats_get(evdev, &stats);
2442 if (stats.port_inflight[p2] != 0) {
2443 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2450 rte_event_dev_dump(evdev, stdout);
2456 parallel_basic(struct test *t, int check_order)
2458 const uint8_t rx_port = 0;
2459 const uint8_t w1_port = 1;
2460 const uint8_t w3_port = 3;
2461 const uint8_t tx_port = 4;
2464 uint32_t deq_pkts, j;
2465 struct rte_mbuf *mbufs[3];
2466 struct rte_mbuf *mbufs_out[3];
2467 const uint32_t MAGIC_SEQN = 1234;
2469 /* Create instance with 4 ports */
2470 if (init(t, 2, tx_port + 1) < 0 ||
2471 create_ports(t, tx_port + 1) < 0 ||
2472 (check_order ? create_ordered_qids(t, 1) :
2473 create_unordered_qids(t, 1)) < 0 ||
2474 create_directed_qids(t, 1, &tx_port)) {
2475 printf("%d: Error initializing device\n", __LINE__);
2481 * We need three ports, all mapped to the same ordered qid0. Then we'll
2482 * take a packet out to each port, re-enqueue in reverse order,
2483 * then make sure the reordering has taken place properly when we
2484 * dequeue from the tx_port.
2486 * Simplified test setup diagram:
2490 * qid0 - w2_port - qid1
2494 /* CQ mapping to QID for LB ports (directed mapped on create) */
2495 for (i = w1_port; i <= w3_port; i++) {
2496 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2499 printf("%d: error mapping lb qid\n", __LINE__);
2505 if (rte_event_dev_start(evdev) < 0) {
2506 printf("%d: Error with start call\n", __LINE__);
2510 /* Enqueue 3 packets to the rx port */
2511 for (i = 0; i < 3; i++) {
2512 struct rte_event ev;
2513 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2515 printf("%d: gen of pkt failed\n", __LINE__);
2519 ev.queue_id = t->qid[0];
2520 ev.op = RTE_EVENT_OP_NEW;
2522 mbufs[i]->seqn = MAGIC_SEQN + i;
2524 /* generate pkt and enqueue */
2525 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2527 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2533 rte_event_schedule(evdev);
2535 /* use extra slot to make logic in loops easier */
2536 struct rte_event deq_ev[w3_port + 1];
2538 /* Dequeue the 3 packets, one from each worker port */
2539 for (i = w1_port; i <= w3_port; i++) {
2540 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2542 if (deq_pkts != 1) {
2543 printf("%d: Failed to deq\n", __LINE__);
2544 rte_event_dev_dump(evdev, stdout);
2549 /* Enqueue each packet in reverse order, flushing after each one */
2550 for (i = w3_port; i >= w1_port; i--) {
2552 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2553 deq_ev[i].queue_id = t->qid[1];
2554 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2556 printf("%d: Failed to enqueue\n", __LINE__);
2560 rte_event_schedule(evdev);
2562 /* dequeue from the tx ports, we should get 3 packets */
2563 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2566 /* Check to see if we've got all 3 packets */
2567 if (deq_pkts != 3) {
2568 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2569 __LINE__, deq_pkts, tx_port);
2570 rte_event_dev_dump(evdev, stdout);
2574 /* Check to see if the sequence numbers are in expected order */
2576 for (j = 0 ; j < deq_pkts ; j++) {
2577 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2579 "%d: Incorrect sequence number(%d) from port %d\n",
2580 __LINE__, mbufs_out[j]->seqn, tx_port);
2586 /* Destroy the instance */
2592 ordered_basic(struct test *t)
2594 return parallel_basic(t, 1);
2598 unordered_basic(struct test *t)
2600 return parallel_basic(t, 0);
2603 static struct rte_mempool *eventdev_func_mempool;
2606 test_sw_eventdev(void)
2608 struct test *t = malloc(sizeof(struct test));
2611 /* manually initialize the op, older gcc's complain on static
2612 * initialization of struct elements that are a bitfield.
2614 release_ev.op = RTE_EVENT_OP_RELEASE;
2616 const char *eventdev_name = "event_sw0";
2617 evdev = rte_event_dev_get_dev_id(eventdev_name);
2619 printf("%d: Eventdev %s not found - creating.\n",
2620 __LINE__, eventdev_name);
2621 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
2622 printf("Error creating eventdev\n");
2625 evdev = rte_event_dev_get_dev_id(eventdev_name);
2627 printf("Error finding newly created eventdev\n");
2632 /* Only create mbuf pool once, reuse for each test run */
2633 if (!eventdev_func_mempool) {
2634 eventdev_func_mempool = rte_pktmbuf_pool_create(
2635 "EVENTDEV_SW_SA_MBUF_POOL",
2636 (1<<12), /* 4k buffers */
2637 32 /*MBUF_CACHE_SIZE*/,
2639 512, /* use very small mbufs */
2641 if (!eventdev_func_mempool) {
2642 printf("ERROR creating mempool\n");
2646 t->mbuf_pool = eventdev_func_mempool;
2648 printf("*** Running Single Directed Packet test...\n");
2649 ret = test_single_directed_packet(t);
2651 printf("ERROR - Single Directed Packet test FAILED.\n");
2654 printf("*** Running Single Load Balanced Packet test...\n");
2655 ret = single_packet(t);
2657 printf("ERROR - Single Packet test FAILED.\n");
2660 printf("*** Running Unordered Basic test...\n");
2661 ret = unordered_basic(t);
2663 printf("ERROR - Unordered Basic test FAILED.\n");
2666 printf("*** Running Ordered Basic test...\n");
2667 ret = ordered_basic(t);
2669 printf("ERROR - Ordered Basic test FAILED.\n");
2672 printf("*** Running Burst Packets test...\n");
2673 ret = burst_packets(t);
2675 printf("ERROR - Burst Packets test FAILED.\n");
2678 printf("*** Running Load Balancing test...\n");
2679 ret = load_balancing(t);
2681 printf("ERROR - Load Balancing test FAILED.\n");
2684 printf("*** Running Prioritized Directed test...\n");
2685 ret = test_priority_directed(t);
2687 printf("ERROR - Prioritized Directed test FAILED.\n");
2690 printf("*** Running Prioritized Atomic test...\n");
2691 ret = test_priority_atomic(t);
2693 printf("ERROR - Prioritized Atomic test FAILED.\n");
2697 printf("*** Running Prioritized Ordered test...\n");
2698 ret = test_priority_ordered(t);
2700 printf("ERROR - Prioritized Ordered test FAILED.\n");
2703 printf("*** Running Prioritized Unordered test...\n");
2704 ret = test_priority_unordered(t);
2706 printf("ERROR - Prioritized Unordered test FAILED.\n");
2709 printf("*** Running Invalid QID test...\n");
2710 ret = invalid_qid(t);
2712 printf("ERROR - Invalid QID test FAILED.\n");
2715 printf("*** Running Load Balancing History test...\n");
2716 ret = load_balancing_history(t);
2718 printf("ERROR - Load Balancing History test FAILED.\n");
2721 printf("*** Running Inflight Count test...\n");
2722 ret = inflight_counts(t);
2724 printf("ERROR - Inflight Count test FAILED.\n");
2727 printf("*** Running Abuse Inflights test...\n");
2728 ret = abuse_inflights(t);
2730 printf("ERROR - Abuse Inflights test FAILED.\n");
2733 printf("*** Running XStats test...\n");
2734 ret = xstats_tests(t);
2736 printf("ERROR - XStats test FAILED.\n");
2739 printf("*** Running XStats ID Reset test...\n");
2740 ret = xstats_id_reset_tests(t);
2742 printf("ERROR - XStats ID Reset test FAILED.\n");
2745 printf("*** Running XStats Brute Force test...\n");
2746 ret = xstats_brute_force(t);
2748 printf("ERROR - XStats Brute Force test FAILED.\n");
2751 printf("*** Running XStats ID Abuse test...\n");
2752 ret = xstats_id_abuse_tests(t);
2754 printf("ERROR - XStats ID Abuse test FAILED.\n");
2757 printf("*** Running QID Priority test...\n");
2758 ret = qid_priorities(t);
2760 printf("ERROR - QID Priority test FAILED.\n");
2763 printf("*** Running Ordered Reconfigure test...\n");
2764 ret = ordered_reconfigure(t);
2766 printf("ERROR - Ordered Reconfigure test FAILED.\n");
2769 printf("*** Running Port LB Single Reconfig test...\n");
2770 ret = port_single_lb_reconfig(t);
2772 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
2775 printf("*** Running Port Reconfig Credits test...\n");
2776 ret = port_reconfig_credits(t);
2778 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
2782 * Free test instance, leaving mempool initialized, and a pointer to it
2783 * in static eventdev_func_mempool, as it is re-used on re-runs
2790 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);