4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
51 #include <rte_eventdev.h>
56 #define NUM_PACKETS (1<<18)
61 struct rte_mempool *mbuf_pool;
62 uint8_t port[MAX_PORTS];
63 uint8_t qid[MAX_QIDS];
67 static struct rte_event release_ev;
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
74 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
76 static const uint8_t arp_request[] = {
77 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 int pkt_len = sizeof(arp_request) - 1;
89 m = rte_pktmbuf_alloc(mp);
93 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94 arp_request, pkt_len);
95 rte_pktmbuf_pkt_len(m) = pkt_len;
96 rte_pktmbuf_data_len(m) = pkt_len;
103 /* initialization and config */
105 init(struct test *t, int nb_queues, int nb_ports)
107 struct rte_event_dev_config config = {
108 .nb_event_queues = nb_queues,
109 .nb_event_ports = nb_ports,
110 .nb_event_queue_flows = 1024,
111 .nb_events_limit = 4096,
112 .nb_event_port_dequeue_depth = 128,
113 .nb_event_port_enqueue_depth = 128,
117 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
119 memset(t, 0, sizeof(*t));
122 ret = rte_event_dev_configure(evdev, &config);
124 printf("%d: Error configuring device\n", __LINE__);
129 create_ports(struct test *t, int num_ports)
132 static const struct rte_event_port_conf conf = {
133 .new_event_threshold = 1024,
137 if (num_ports > MAX_PORTS)
140 for (i = 0; i < num_ports; i++) {
141 if (rte_event_port_setup(evdev, i, &conf) < 0) {
142 printf("Error setting up port %d\n", i);
152 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
157 const struct rte_event_queue_conf conf = {
158 .event_queue_cfg = flags,
159 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
160 .nb_atomic_flows = 1024,
161 .nb_atomic_order_sequences = 1024,
164 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
165 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
166 printf("%d: error creating qid %d\n", __LINE__, i);
171 t->nb_qids += num_qids;
172 if (t->nb_qids > MAX_QIDS)
179 create_atomic_qids(struct test *t, int num_qids)
181 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
185 create_ordered_qids(struct test *t, int num_qids)
187 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
192 create_unordered_qids(struct test *t, int num_qids)
194 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
198 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
203 static const struct rte_event_queue_conf conf = {
204 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
205 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
206 .nb_atomic_flows = 1024,
207 .nb_atomic_order_sequences = 1024,
210 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
211 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
212 printf("%d: error creating qid %d\n", __LINE__, i);
217 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
218 &t->qid[i], NULL, 1) != 1) {
219 printf("%d: error creating link for qid %d\n",
224 t->nb_qids += num_qids;
225 if (t->nb_qids > MAX_QIDS)
233 cleanup(struct test *t __rte_unused)
235 rte_event_dev_stop(evdev);
236 rte_event_dev_close(evdev);
240 struct test_event_dev_stats {
241 uint64_t rx_pkts; /**< Total packets received */
242 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
243 uint64_t tx_pkts; /**< Total packets transmitted */
245 /** Packets received on this port */
246 uint64_t port_rx_pkts[MAX_PORTS];
247 /** Packets dropped on this port */
248 uint64_t port_rx_dropped[MAX_PORTS];
249 /** Packets inflight on this port */
250 uint64_t port_inflight[MAX_PORTS];
251 /** Packets transmitted on this port */
252 uint64_t port_tx_pkts[MAX_PORTS];
253 /** Packets received on this qid */
254 uint64_t qid_rx_pkts[MAX_QIDS];
255 /** Packets dropped on this qid */
256 uint64_t qid_rx_dropped[MAX_QIDS];
257 /** Packets transmitted on this qid */
258 uint64_t qid_tx_pkts[MAX_QIDS];
262 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
265 static uint32_t total_ids[3]; /* rx, tx and drop */
266 static uint32_t port_rx_pkts_ids[MAX_PORTS];
267 static uint32_t port_rx_dropped_ids[MAX_PORTS];
268 static uint32_t port_inflight_ids[MAX_PORTS];
269 static uint32_t port_tx_pkts_ids[MAX_PORTS];
270 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
271 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
272 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
275 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
276 "dev_rx", &total_ids[0]);
277 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
278 "dev_drop", &total_ids[1]);
279 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
280 "dev_tx", &total_ids[2]);
281 for (i = 0; i < MAX_PORTS; i++) {
283 snprintf(name, sizeof(name), "port_%u_rx", i);
284 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
285 dev_id, name, &port_rx_pkts_ids[i]);
286 snprintf(name, sizeof(name), "port_%u_drop", i);
287 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
288 dev_id, name, &port_rx_dropped_ids[i]);
289 snprintf(name, sizeof(name), "port_%u_inflight", i);
290 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
291 dev_id, name, &port_inflight_ids[i]);
292 snprintf(name, sizeof(name), "port_%u_tx", i);
293 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
294 dev_id, name, &port_tx_pkts_ids[i]);
296 for (i = 0; i < MAX_QIDS; i++) {
298 snprintf(name, sizeof(name), "qid_%u_rx", i);
299 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
300 dev_id, name, &qid_rx_pkts_ids[i]);
301 snprintf(name, sizeof(name), "qid_%u_drop", i);
302 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
303 dev_id, name, &qid_rx_dropped_ids[i]);
304 snprintf(name, sizeof(name), "qid_%u_tx", i);
305 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
306 dev_id, name, &qid_tx_pkts_ids[i]);
312 /* run_prio_packet_test
313 * This performs a basic packet priority check on the test instance passed in.
314 * It is factored out of the main priority tests as the same tests must be
315 * performed to ensure prioritization of each type of QID.
318 * - An initialized test structure, including mempool
319 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
320 * - t->qid[0] is the QID to be tested
321 * - if LB QID, the CQ must be mapped to the QID.
324 run_prio_packet_test(struct test *t)
327 const uint32_t MAGIC_SEQN[] = {4711, 1234};
328 const uint32_t PRIORITY[] = {
329 RTE_EVENT_DEV_PRIORITY_NORMAL,
330 RTE_EVENT_DEV_PRIORITY_HIGHEST
333 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
334 /* generate pkt and enqueue */
336 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
338 printf("%d: gen of pkt failed\n", __LINE__);
341 arp->seqn = MAGIC_SEQN[i];
343 ev = (struct rte_event){
344 .priority = PRIORITY[i],
345 .op = RTE_EVENT_OP_NEW,
346 .queue_id = t->qid[0],
349 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
351 printf("%d: error failed to enqueue\n", __LINE__);
356 rte_event_schedule(evdev);
358 struct test_event_dev_stats stats;
359 err = test_event_dev_stats_get(evdev, &stats);
361 printf("%d: error failed to get stats\n", __LINE__);
365 if (stats.port_rx_pkts[t->port[0]] != 2) {
366 printf("%d: error stats incorrect for directed port\n",
368 rte_event_dev_dump(evdev, stdout);
372 struct rte_event ev, ev2;
374 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
376 printf("%d: error failed to deq\n", __LINE__);
377 rte_event_dev_dump(evdev, stdout);
380 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
381 printf("%d: first packet out not highest priority\n",
383 rte_event_dev_dump(evdev, stdout);
386 rte_pktmbuf_free(ev.mbuf);
388 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
390 printf("%d: error failed to deq\n", __LINE__);
391 rte_event_dev_dump(evdev, stdout);
394 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
395 printf("%d: second packet out not lower priority\n",
397 rte_event_dev_dump(evdev, stdout);
400 rte_pktmbuf_free(ev2.mbuf);
407 test_single_directed_packet(struct test *t)
409 const int rx_enq = 0;
410 const int wrk_enq = 2;
413 /* Create instance with 3 directed QIDs going to 3 ports */
414 if (init(t, 3, 3) < 0 ||
415 create_ports(t, 3) < 0 ||
416 create_directed_qids(t, 3, t->port) < 0)
419 if (rte_event_dev_start(evdev) < 0) {
420 printf("%d: Error with start call\n", __LINE__);
424 /************** FORWARD ****************/
425 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
426 struct rte_event ev = {
427 .op = RTE_EVENT_OP_NEW,
433 printf("%d: gen of pkt failed\n", __LINE__);
437 const uint32_t MAGIC_SEQN = 4711;
438 arp->seqn = MAGIC_SEQN;
440 /* generate pkt and enqueue */
441 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
443 printf("%d: error failed to enqueue\n", __LINE__);
447 /* Run schedule() as dir packets may need to be re-ordered */
448 rte_event_schedule(evdev);
450 struct test_event_dev_stats stats;
451 err = test_event_dev_stats_get(evdev, &stats);
453 printf("%d: error failed to get stats\n", __LINE__);
457 if (stats.port_rx_pkts[rx_enq] != 1) {
458 printf("%d: error stats incorrect for directed port\n",
464 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
466 printf("%d: error failed to deq\n", __LINE__);
470 err = test_event_dev_stats_get(evdev, &stats);
471 if (stats.port_rx_pkts[wrk_enq] != 0 &&
472 stats.port_rx_pkts[wrk_enq] != 1) {
473 printf("%d: error directed stats post-dequeue\n", __LINE__);
477 if (ev.mbuf->seqn != MAGIC_SEQN) {
478 printf("%d: error magic sequence number not dequeued\n",
483 rte_pktmbuf_free(ev.mbuf);
490 test_priority_directed(struct test *t)
492 if (init(t, 1, 1) < 0 ||
493 create_ports(t, 1) < 0 ||
494 create_directed_qids(t, 1, t->port) < 0) {
495 printf("%d: Error initializing device\n", __LINE__);
499 if (rte_event_dev_start(evdev) < 0) {
500 printf("%d: Error with start call\n", __LINE__);
504 return run_prio_packet_test(t);
508 test_priority_atomic(struct test *t)
510 if (init(t, 1, 1) < 0 ||
511 create_ports(t, 1) < 0 ||
512 create_atomic_qids(t, 1) < 0) {
513 printf("%d: Error initializing device\n", __LINE__);
518 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
519 printf("%d: error mapping qid to port\n", __LINE__);
522 if (rte_event_dev_start(evdev) < 0) {
523 printf("%d: Error with start call\n", __LINE__);
527 return run_prio_packet_test(t);
531 test_priority_ordered(struct test *t)
533 if (init(t, 1, 1) < 0 ||
534 create_ports(t, 1) < 0 ||
535 create_ordered_qids(t, 1) < 0) {
536 printf("%d: Error initializing device\n", __LINE__);
541 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
542 printf("%d: error mapping qid to port\n", __LINE__);
545 if (rte_event_dev_start(evdev) < 0) {
546 printf("%d: Error with start call\n", __LINE__);
550 return run_prio_packet_test(t);
554 test_priority_unordered(struct test *t)
556 if (init(t, 1, 1) < 0 ||
557 create_ports(t, 1) < 0 ||
558 create_unordered_qids(t, 1) < 0) {
559 printf("%d: Error initializing device\n", __LINE__);
564 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
565 printf("%d: error mapping qid to port\n", __LINE__);
568 if (rte_event_dev_start(evdev) < 0) {
569 printf("%d: Error with start call\n", __LINE__);
573 return run_prio_packet_test(t);
577 burst_packets(struct test *t)
579 /************** CONFIG ****************/
584 /* Create instance with 2 ports and 2 queues */
585 if (init(t, 2, 2) < 0 ||
586 create_ports(t, 2) < 0 ||
587 create_atomic_qids(t, 2) < 0) {
588 printf("%d: Error initializing device\n", __LINE__);
592 /* CQ mapping to QID */
593 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
595 printf("%d: error mapping lb qid0\n", __LINE__);
598 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
600 printf("%d: error mapping lb qid1\n", __LINE__);
604 if (rte_event_dev_start(evdev) < 0) {
605 printf("%d: Error with start call\n", __LINE__);
609 /************** FORWARD ****************/
610 const uint32_t rx_port = 0;
611 const uint32_t NUM_PKTS = 2;
613 for (i = 0; i < NUM_PKTS; i++) {
614 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
616 printf("%d: error generating pkt\n", __LINE__);
620 struct rte_event ev = {
621 .op = RTE_EVENT_OP_NEW,
626 /* generate pkt and enqueue */
627 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
629 printf("%d: Failed to enqueue\n", __LINE__);
633 rte_event_schedule(evdev);
635 /* Check stats for all NUM_PKTS arrived to sched core */
636 struct test_event_dev_stats stats;
638 err = test_event_dev_stats_get(evdev, &stats);
640 printf("%d: failed to get stats\n", __LINE__);
643 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
644 printf("%d: Sched core didn't receive all %d pkts\n",
646 rte_event_dev_dump(evdev, stdout);
654 /******** DEQ QID 1 *******/
657 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
659 rte_pktmbuf_free(ev.mbuf);
662 if (deq_pkts != NUM_PKTS/2) {
663 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
668 /******** DEQ QID 2 *******/
672 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
674 rte_pktmbuf_free(ev.mbuf);
676 if (deq_pkts != NUM_PKTS/2) {
677 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
687 abuse_inflights(struct test *t)
689 const int rx_enq = 0;
690 const int wrk_enq = 2;
693 /* Create instance with 4 ports */
694 if (init(t, 1, 4) < 0 ||
695 create_ports(t, 4) < 0 ||
696 create_atomic_qids(t, 1) < 0) {
697 printf("%d: Error initializing device\n", __LINE__);
701 /* CQ mapping to QID */
702 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
704 printf("%d: error mapping lb qid\n", __LINE__);
709 if (rte_event_dev_start(evdev) < 0) {
710 printf("%d: Error with start call\n", __LINE__);
714 /* Enqueue op only */
715 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
717 printf("%d: Failed to enqueue\n", __LINE__);
722 rte_event_schedule(evdev);
724 struct test_event_dev_stats stats;
726 err = test_event_dev_stats_get(evdev, &stats);
728 printf("%d: failed to get stats\n", __LINE__);
732 if (stats.rx_pkts != 0 ||
733 stats.tx_pkts != 0 ||
734 stats.port_inflight[wrk_enq] != 0) {
735 printf("%d: Sched core didn't handle pkt as expected\n",
745 port_reconfig_credits(struct test *t)
747 if (init(t, 1, 1) < 0) {
748 printf("%d: Error initializing device\n", __LINE__);
753 const uint32_t NUM_ITERS = 32;
754 for (i = 0; i < NUM_ITERS; i++) {
755 const struct rte_event_queue_conf conf = {
756 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
757 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
758 .nb_atomic_flows = 1024,
759 .nb_atomic_order_sequences = 1024,
761 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
762 printf("%d: error creating qid\n", __LINE__);
767 static const struct rte_event_port_conf port_conf = {
768 .new_event_threshold = 128,
772 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
773 printf("%d Error setting up port\n", __LINE__);
777 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
779 printf("%d: error mapping lb qid\n", __LINE__);
783 if (rte_event_dev_start(evdev) < 0) {
784 printf("%d: Error with start call\n", __LINE__);
788 const uint32_t NPKTS = 1;
790 for (j = 0; j < NPKTS; j++) {
792 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
794 printf("%d: gen of pkt failed\n", __LINE__);
797 ev.queue_id = t->qid[0];
798 ev.op = RTE_EVENT_OP_NEW;
800 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
802 printf("%d: Failed to enqueue\n", __LINE__);
803 rte_event_dev_dump(0, stdout);
808 rte_event_schedule(evdev);
810 struct rte_event ev[NPKTS];
811 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
814 printf("%d error; no packet dequeued\n", __LINE__);
816 /* let cleanup below stop the device on last iter */
817 if (i != NUM_ITERS-1)
818 rte_event_dev_stop(evdev);
829 port_single_lb_reconfig(struct test *t)
831 if (init(t, 2, 2) < 0) {
832 printf("%d: Error initializing device\n", __LINE__);
836 static const struct rte_event_queue_conf conf_lb_atomic = {
837 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
838 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
839 .nb_atomic_flows = 1024,
840 .nb_atomic_order_sequences = 1024,
842 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
843 printf("%d: error creating qid\n", __LINE__);
847 static const struct rte_event_queue_conf conf_single_link = {
848 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
849 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
850 .nb_atomic_flows = 1024,
851 .nb_atomic_order_sequences = 1024,
853 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
854 printf("%d: error creating qid\n", __LINE__);
858 struct rte_event_port_conf port_conf = {
859 .new_event_threshold = 128,
863 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
864 printf("%d Error setting up port\n", __LINE__);
867 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
868 printf("%d Error setting up port\n", __LINE__);
872 /* link port to lb queue */
873 uint8_t queue_id = 0;
874 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
875 printf("%d: error creating link for qid\n", __LINE__);
879 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
881 printf("%d: Error unlinking lb port\n", __LINE__);
886 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
887 printf("%d: error creating link for qid\n", __LINE__);
892 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
894 printf("%d: error mapping lb qid\n", __LINE__);
898 if (rte_event_dev_start(evdev) < 0) {
899 printf("%d: Error with start call\n", __LINE__);
911 ordered_reconfigure(struct test *t)
913 if (init(t, 1, 1) < 0 ||
914 create_ports(t, 1) < 0) {
915 printf("%d: Error initializing device\n", __LINE__);
919 const struct rte_event_queue_conf conf = {
920 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
921 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
922 .nb_atomic_flows = 1024,
923 .nb_atomic_order_sequences = 1024,
926 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
927 printf("%d: error creating qid\n", __LINE__);
931 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
932 printf("%d: error creating qid, for 2nd time\n", __LINE__);
936 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
937 if (rte_event_dev_start(evdev) < 0) {
938 printf("%d: Error with start call\n", __LINE__);
950 qid_priorities(struct test *t)
952 /* Test works by having a CQ with enough empty space for all packets,
953 * and enqueueing 3 packets to 3 QIDs. They must return based on the
954 * priority of the QID, not the ingress order, to pass the test
957 /* Create instance with 1 ports, and 3 qids */
958 if (init(t, 3, 1) < 0 ||
959 create_ports(t, 1) < 0) {
960 printf("%d: Error initializing device\n", __LINE__);
964 for (i = 0; i < 3; i++) {
966 const struct rte_event_queue_conf conf = {
967 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
968 /* increase priority (0 == highest), as we go */
969 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
970 .nb_atomic_flows = 1024,
971 .nb_atomic_order_sequences = 1024,
974 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
975 printf("%d: error creating qid %d\n", __LINE__, i);
981 /* map all QIDs to port */
982 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
984 if (rte_event_dev_start(evdev) < 0) {
985 printf("%d: Error with start call\n", __LINE__);
989 /* enqueue 3 packets, setting seqn and QID to check priority */
990 for (i = 0; i < 3; i++) {
992 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
994 printf("%d: gen of pkt failed\n", __LINE__);
997 ev.queue_id = t->qid[i];
998 ev.op = RTE_EVENT_OP_NEW;
1002 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1004 printf("%d: Failed to enqueue\n", __LINE__);
1009 rte_event_schedule(evdev);
1011 /* dequeue packets, verify priority was upheld */
1012 struct rte_event ev[32];
1014 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1015 if (deq_pkts != 3) {
1016 printf("%d: failed to deq packets\n", __LINE__);
1017 rte_event_dev_dump(evdev, stdout);
1020 for (i = 0; i < 3; i++) {
1021 if (ev[i].mbuf->seqn != 2-i) {
1023 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1033 load_balancing(struct test *t)
1035 const int rx_enq = 0;
1039 if (init(t, 1, 4) < 0 ||
1040 create_ports(t, 4) < 0 ||
1041 create_atomic_qids(t, 1) < 0) {
1042 printf("%d: Error initializing device\n", __LINE__);
1046 for (i = 0; i < 3; i++) {
1047 /* map port 1 - 3 inclusive */
1048 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1050 printf("%d: error mapping qid to port %d\n",
1056 if (rte_event_dev_start(evdev) < 0) {
1057 printf("%d: Error with start call\n", __LINE__);
1061 /************** FORWARD ****************/
1063 * Create a set of flows that test the load-balancing operation of the
1064 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1065 * with a new flow, which should be sent to the 3rd mapped CQ
1067 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1069 for (i = 0; i < RTE_DIM(flows); i++) {
1070 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1072 printf("%d: gen of pkt failed\n", __LINE__);
1076 struct rte_event ev = {
1077 .op = RTE_EVENT_OP_NEW,
1078 .queue_id = t->qid[0],
1079 .flow_id = flows[i],
1082 /* generate pkt and enqueue */
1083 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1085 printf("%d: Failed to enqueue\n", __LINE__);
1090 rte_event_schedule(evdev);
1092 struct test_event_dev_stats stats;
1093 err = test_event_dev_stats_get(evdev, &stats);
1095 printf("%d: failed to get stats\n", __LINE__);
1099 if (stats.port_inflight[1] != 4) {
1100 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1104 if (stats.port_inflight[2] != 2) {
1105 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1109 if (stats.port_inflight[3] != 3) {
1110 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1120 load_balancing_history(struct test *t)
1122 struct test_event_dev_stats stats = {0};
1123 const int rx_enq = 0;
1127 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
1128 if (init(t, 1, 4) < 0 ||
1129 create_ports(t, 4) < 0 ||
1130 create_atomic_qids(t, 1) < 0)
1133 /* CQ mapping to QID */
1134 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
1135 printf("%d: error mapping port 1 qid\n", __LINE__);
1138 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
1139 printf("%d: error mapping port 2 qid\n", __LINE__);
1142 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
1143 printf("%d: error mapping port 3 qid\n", __LINE__);
1146 if (rte_event_dev_start(evdev) < 0) {
1147 printf("%d: Error with start call\n", __LINE__);
1152 * Create a set of flows that test the load-balancing operation of the
1153 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
1154 * the packet from CQ 0, send in a new set of flows. Ensure that:
1155 * 1. The new flow 3 gets into the empty CQ0
1156 * 2. packets for existing flow gets added into CQ1
1157 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
1158 * more outstanding pkts
1160 * This test makes sure that when a flow ends (i.e. all packets
1161 * have been completed for that flow), that the flow can be moved
1162 * to a different CQ when new packets come in for that flow.
1164 static uint32_t flows1[] = {0, 1, 1, 2};
1166 for (i = 0; i < RTE_DIM(flows1); i++) {
1167 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1168 struct rte_event ev = {
1169 .flow_id = flows1[i],
1170 .op = RTE_EVENT_OP_NEW,
1171 .queue_id = t->qid[0],
1172 .event_type = RTE_EVENT_TYPE_CPU,
1173 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1178 printf("%d: gen of pkt failed\n", __LINE__);
1181 arp->hash.rss = flows1[i];
1182 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1184 printf("%d: Failed to enqueue\n", __LINE__);
1189 /* call the scheduler */
1190 rte_event_schedule(evdev);
1192 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
1193 struct rte_event ev;
1194 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
1195 printf("%d: failed to dequeue\n", __LINE__);
1198 if (ev.mbuf->hash.rss != flows1[0]) {
1199 printf("%d: unexpected flow received\n", __LINE__);
1203 /* drop the flow 0 packet from port 1 */
1204 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
1206 /* call the scheduler */
1207 rte_event_schedule(evdev);
1210 * Set up the next set of flows, first a new flow to fill up
1211 * CQ 0, so that the next flow 0 packet should go to CQ2
1213 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
1215 for (i = 0; i < RTE_DIM(flows2); i++) {
1216 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1217 struct rte_event ev = {
1218 .flow_id = flows2[i],
1219 .op = RTE_EVENT_OP_NEW,
1220 .queue_id = t->qid[0],
1221 .event_type = RTE_EVENT_TYPE_CPU,
1222 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1227 printf("%d: gen of pkt failed\n", __LINE__);
1230 arp->hash.rss = flows2[i];
1232 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1234 printf("%d: Failed to enqueue\n", __LINE__);
1240 rte_event_schedule(evdev);
1242 err = test_event_dev_stats_get(evdev, &stats);
1244 printf("%d:failed to get stats\n", __LINE__);
1249 * Now check the resulting inflights on each port.
1251 if (stats.port_inflight[1] != 3) {
1252 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
1254 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
1255 (unsigned int)stats.port_inflight[1],
1256 (unsigned int)stats.port_inflight[2],
1257 (unsigned int)stats.port_inflight[3]);
1260 if (stats.port_inflight[2] != 4) {
1261 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
1263 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
1264 (unsigned int)stats.port_inflight[1],
1265 (unsigned int)stats.port_inflight[2],
1266 (unsigned int)stats.port_inflight[3]);
1269 if (stats.port_inflight[3] != 2) {
1270 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
1272 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
1273 (unsigned int)stats.port_inflight[1],
1274 (unsigned int)stats.port_inflight[2],
1275 (unsigned int)stats.port_inflight[3]);
1279 for (i = 1; i <= 3; i++) {
1280 struct rte_event ev;
1281 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
1282 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
1284 rte_event_schedule(evdev);
1291 invalid_qid(struct test *t)
1293 struct test_event_dev_stats stats;
1294 const int rx_enq = 0;
1298 if (init(t, 1, 4) < 0 ||
1299 create_ports(t, 4) < 0 ||
1300 create_atomic_qids(t, 1) < 0) {
1301 printf("%d: Error initializing device\n", __LINE__);
1305 /* CQ mapping to QID */
1306 for (i = 0; i < 4; i++) {
1307 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
1310 printf("%d: error mapping port 1 qid\n", __LINE__);
1315 if (rte_event_dev_start(evdev) < 0) {
1316 printf("%d: Error with start call\n", __LINE__);
1321 * Send in a packet with an invalid qid to the scheduler.
1322 * We should see the packed enqueued OK, but the inflights for
1323 * that packet should not be incremented, and the rx_dropped
1324 * should be incremented.
1326 static uint32_t flows1[] = {20};
1328 for (i = 0; i < RTE_DIM(flows1); i++) {
1329 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1331 printf("%d: gen of pkt failed\n", __LINE__);
1335 struct rte_event ev = {
1336 .op = RTE_EVENT_OP_NEW,
1337 .queue_id = t->qid[0] + flows1[i],
1341 /* generate pkt and enqueue */
1342 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1344 printf("%d: Failed to enqueue\n", __LINE__);
1349 /* call the scheduler */
1350 rte_event_schedule(evdev);
1352 err = test_event_dev_stats_get(evdev, &stats);
1354 printf("%d: failed to get stats\n", __LINE__);
1359 * Now check the resulting inflights on the port, and the rx_dropped.
1361 if (stats.port_inflight[0] != 0) {
1362 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
1364 rte_event_dev_dump(evdev, stdout);
1367 if (stats.port_rx_dropped[0] != 1) {
1368 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
1369 rte_event_dev_dump(evdev, stdout);
1372 /* each packet drop should only be counted in one place - port or dev */
1373 if (stats.rx_dropped != 0) {
1374 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
1376 rte_event_dev_dump(evdev, stdout);
1385 single_packet(struct test *t)
1387 const uint32_t MAGIC_SEQN = 7321;
1388 struct rte_event ev;
1389 struct test_event_dev_stats stats;
1390 const int rx_enq = 0;
1391 const int wrk_enq = 2;
1394 /* Create instance with 4 ports */
1395 if (init(t, 1, 4) < 0 ||
1396 create_ports(t, 4) < 0 ||
1397 create_atomic_qids(t, 1) < 0) {
1398 printf("%d: Error initializing device\n", __LINE__);
1402 /* CQ mapping to QID */
1403 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1405 printf("%d: error mapping lb qid\n", __LINE__);
1410 if (rte_event_dev_start(evdev) < 0) {
1411 printf("%d: Error with start call\n", __LINE__);
1415 /************** Gen pkt and enqueue ****************/
1416 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1418 printf("%d: gen of pkt failed\n", __LINE__);
1422 ev.op = RTE_EVENT_OP_NEW;
1423 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
1427 arp->seqn = MAGIC_SEQN;
1429 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1431 printf("%d: Failed to enqueue\n", __LINE__);
1435 rte_event_schedule(evdev);
1437 err = test_event_dev_stats_get(evdev, &stats);
1439 printf("%d: failed to get stats\n", __LINE__);
1443 if (stats.rx_pkts != 1 ||
1444 stats.tx_pkts != 1 ||
1445 stats.port_inflight[wrk_enq] != 1) {
1446 printf("%d: Sched core didn't handle pkt as expected\n",
1448 rte_event_dev_dump(evdev, stdout);
1454 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
1456 printf("%d: Failed to deq\n", __LINE__);
1460 err = test_event_dev_stats_get(evdev, &stats);
1462 printf("%d: failed to get stats\n", __LINE__);
1466 err = test_event_dev_stats_get(evdev, &stats);
1467 if (ev.mbuf->seqn != MAGIC_SEQN) {
1468 printf("%d: magic sequence number not dequeued\n", __LINE__);
1472 rte_pktmbuf_free(ev.mbuf);
1473 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
1475 printf("%d: Failed to enqueue\n", __LINE__);
1478 rte_event_schedule(evdev);
1480 err = test_event_dev_stats_get(evdev, &stats);
1481 if (stats.port_inflight[wrk_enq] != 0) {
1482 printf("%d: port inflight not correct\n", __LINE__);
1491 inflight_counts(struct test *t)
1493 struct rte_event ev;
1494 struct test_event_dev_stats stats;
1495 const int rx_enq = 0;
1501 /* Create instance with 4 ports */
1502 if (init(t, 2, 3) < 0 ||
1503 create_ports(t, 3) < 0 ||
1504 create_atomic_qids(t, 2) < 0) {
1505 printf("%d: Error initializing device\n", __LINE__);
1509 /* CQ mapping to QID */
1510 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
1512 printf("%d: error mapping lb qid\n", __LINE__);
1516 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
1518 printf("%d: error mapping lb qid\n", __LINE__);
1523 if (rte_event_dev_start(evdev) < 0) {
1524 printf("%d: Error with start call\n", __LINE__);
1528 /************** FORWARD ****************/
1530 for (i = 0; i < QID1_NUM; i++) {
1531 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1534 printf("%d: gen of pkt failed\n", __LINE__);
1538 ev.queue_id = t->qid[0];
1539 ev.op = RTE_EVENT_OP_NEW;
1541 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1543 printf("%d: Failed to enqueue\n", __LINE__);
1548 for (i = 0; i < QID2_NUM; i++) {
1549 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1552 printf("%d: gen of pkt failed\n", __LINE__);
1555 ev.queue_id = t->qid[1];
1556 ev.op = RTE_EVENT_OP_NEW;
1558 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1560 printf("%d: Failed to enqueue\n", __LINE__);
1566 rte_event_schedule(evdev);
1568 err = test_event_dev_stats_get(evdev, &stats);
1570 printf("%d: failed to get stats\n", __LINE__);
1574 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
1575 stats.tx_pkts != QID1_NUM + QID2_NUM) {
1576 printf("%d: Sched core didn't handle pkt as expected\n",
1581 if (stats.port_inflight[p1] != QID1_NUM) {
1582 printf("%d: %s port 1 inflight not correct\n", __LINE__,
1586 if (stats.port_inflight[p2] != QID2_NUM) {
1587 printf("%d: %s port 2 inflight not correct\n", __LINE__,
1592 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
1594 struct rte_event events[QID1_NUM + QID2_NUM];
1595 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
1596 RTE_DIM(events), 0);
1598 if (deq_pkts != QID1_NUM) {
1599 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
1602 err = test_event_dev_stats_get(evdev, &stats);
1603 if (stats.port_inflight[p1] != QID1_NUM) {
1604 printf("%d: port 1 inflight decrement after DEQ != 0\n",
1608 for (i = 0; i < QID1_NUM; i++) {
1609 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
1612 printf("%d: %s rte enqueue of inf release failed\n",
1613 __LINE__, __func__);
1619 * As the scheduler core decrements inflights, it needs to run to
1620 * process packets to act on the drop messages
1622 rte_event_schedule(evdev);
1624 err = test_event_dev_stats_get(evdev, &stats);
1625 if (stats.port_inflight[p1] != 0) {
1626 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
1631 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
1632 RTE_DIM(events), 0);
1633 if (deq_pkts != QID2_NUM) {
1634 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
1637 err = test_event_dev_stats_get(evdev, &stats);
1638 if (stats.port_inflight[p2] != QID2_NUM) {
1639 printf("%d: port 1 inflight decrement after DEQ != 0\n",
1643 for (i = 0; i < QID2_NUM; i++) {
1644 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
1647 printf("%d: %s rte enqueue of inf release failed\n",
1648 __LINE__, __func__);
1654 * As the scheduler core decrements inflights, it needs to run to
1655 * process packets to act on the drop messages
1657 rte_event_schedule(evdev);
1659 err = test_event_dev_stats_get(evdev, &stats);
1660 if (stats.port_inflight[p2] != 0) {
1661 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
1668 rte_event_dev_dump(evdev, stdout);
1674 parallel_basic(struct test *t, int check_order)
1676 const uint8_t rx_port = 0;
1677 const uint8_t w1_port = 1;
1678 const uint8_t w3_port = 3;
1679 const uint8_t tx_port = 4;
1682 uint32_t deq_pkts, j;
1683 struct rte_mbuf *mbufs[3];
1684 struct rte_mbuf *mbufs_out[3];
1685 const uint32_t MAGIC_SEQN = 1234;
1687 /* Create instance with 4 ports */
1688 if (init(t, 2, tx_port + 1) < 0 ||
1689 create_ports(t, tx_port + 1) < 0 ||
1690 (check_order ? create_ordered_qids(t, 1) :
1691 create_unordered_qids(t, 1)) < 0 ||
1692 create_directed_qids(t, 1, &tx_port)) {
1693 printf("%d: Error initializing device\n", __LINE__);
1699 * We need three ports, all mapped to the same ordered qid0. Then we'll
1700 * take a packet out to each port, re-enqueue in reverse order,
1701 * then make sure the reordering has taken place properly when we
1702 * dequeue from the tx_port.
1704 * Simplified test setup diagram:
1708 * qid0 - w2_port - qid1
1712 /* CQ mapping to QID for LB ports (directed mapped on create) */
1713 for (i = w1_port; i <= w3_port; i++) {
1714 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
1717 printf("%d: error mapping lb qid\n", __LINE__);
1723 if (rte_event_dev_start(evdev) < 0) {
1724 printf("%d: Error with start call\n", __LINE__);
1728 /* Enqueue 3 packets to the rx port */
1729 for (i = 0; i < 3; i++) {
1730 struct rte_event ev;
1731 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
1733 printf("%d: gen of pkt failed\n", __LINE__);
1737 ev.queue_id = t->qid[0];
1738 ev.op = RTE_EVENT_OP_NEW;
1740 mbufs[i]->seqn = MAGIC_SEQN + i;
1742 /* generate pkt and enqueue */
1743 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
1745 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
1751 rte_event_schedule(evdev);
1753 /* use extra slot to make logic in loops easier */
1754 struct rte_event deq_ev[w3_port + 1];
1756 /* Dequeue the 3 packets, one from each worker port */
1757 for (i = w1_port; i <= w3_port; i++) {
1758 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
1760 if (deq_pkts != 1) {
1761 printf("%d: Failed to deq\n", __LINE__);
1762 rte_event_dev_dump(evdev, stdout);
1767 /* Enqueue each packet in reverse order, flushing after each one */
1768 for (i = w3_port; i >= w1_port; i--) {
1770 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
1771 deq_ev[i].queue_id = t->qid[1];
1772 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
1774 printf("%d: Failed to enqueue\n", __LINE__);
1778 rte_event_schedule(evdev);
1780 /* dequeue from the tx ports, we should get 3 packets */
1781 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
1784 /* Check to see if we've got all 3 packets */
1785 if (deq_pkts != 3) {
1786 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
1787 __LINE__, deq_pkts, tx_port);
1788 rte_event_dev_dump(evdev, stdout);
1792 /* Check to see if the sequence numbers are in expected order */
1794 for (j = 0 ; j < deq_pkts ; j++) {
1795 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
1797 "%d: Incorrect sequence number(%d) from port %d\n",
1798 __LINE__, mbufs_out[j]->seqn, tx_port);
1804 /* Destroy the instance */
1810 ordered_basic(struct test *t)
1812 return parallel_basic(t, 1);
1816 unordered_basic(struct test *t)
1818 return parallel_basic(t, 0);
1821 static struct rte_mempool *eventdev_func_mempool;
1824 test_sw_eventdev(void)
1826 struct test *t = malloc(sizeof(struct test));
1829 /* manually initialize the op, older gcc's complain on static
1830 * initialization of struct elements that are a bitfield.
1832 release_ev.op = RTE_EVENT_OP_RELEASE;
1834 const char *eventdev_name = "event_sw0";
1835 evdev = rte_event_dev_get_dev_id(eventdev_name);
1837 printf("%d: Eventdev %s not found - creating.\n",
1838 __LINE__, eventdev_name);
1839 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
1840 printf("Error creating eventdev\n");
1843 evdev = rte_event_dev_get_dev_id(eventdev_name);
1845 printf("Error finding newly created eventdev\n");
1850 /* Only create mbuf pool once, reuse for each test run */
1851 if (!eventdev_func_mempool) {
1852 eventdev_func_mempool = rte_pktmbuf_pool_create(
1853 "EVENTDEV_SW_SA_MBUF_POOL",
1854 (1<<12), /* 4k buffers */
1855 32 /*MBUF_CACHE_SIZE*/,
1857 512, /* use very small mbufs */
1859 if (!eventdev_func_mempool) {
1860 printf("ERROR creating mempool\n");
1864 t->mbuf_pool = eventdev_func_mempool;
1866 printf("*** Running Single Directed Packet test...\n");
1867 ret = test_single_directed_packet(t);
1869 printf("ERROR - Single Directed Packet test FAILED.\n");
1872 printf("*** Running Single Load Balanced Packet test...\n");
1873 ret = single_packet(t);
1875 printf("ERROR - Single Packet test FAILED.\n");
1878 printf("*** Running Unordered Basic test...\n");
1879 ret = unordered_basic(t);
1881 printf("ERROR - Unordered Basic test FAILED.\n");
1884 printf("*** Running Ordered Basic test...\n");
1885 ret = ordered_basic(t);
1887 printf("ERROR - Ordered Basic test FAILED.\n");
1890 printf("*** Running Burst Packets test...\n");
1891 ret = burst_packets(t);
1893 printf("ERROR - Burst Packets test FAILED.\n");
1896 printf("*** Running Load Balancing test...\n");
1897 ret = load_balancing(t);
1899 printf("ERROR - Load Balancing test FAILED.\n");
1902 printf("*** Running Prioritized Directed test...\n");
1903 ret = test_priority_directed(t);
1905 printf("ERROR - Prioritized Directed test FAILED.\n");
1908 printf("*** Running Prioritized Atomic test...\n");
1909 ret = test_priority_atomic(t);
1911 printf("ERROR - Prioritized Atomic test FAILED.\n");
1915 printf("*** Running Prioritized Ordered test...\n");
1916 ret = test_priority_ordered(t);
1918 printf("ERROR - Prioritized Ordered test FAILED.\n");
1921 printf("*** Running Prioritized Unordered test...\n");
1922 ret = test_priority_unordered(t);
1924 printf("ERROR - Prioritized Unordered test FAILED.\n");
1927 printf("*** Running Invalid QID test...\n");
1928 ret = invalid_qid(t);
1930 printf("ERROR - Invalid QID test FAILED.\n");
1933 printf("*** Running Load Balancing History test...\n");
1934 ret = load_balancing_history(t);
1936 printf("ERROR - Load Balancing History test FAILED.\n");
1939 printf("*** Running Inflight Count test...\n");
1940 ret = inflight_counts(t);
1942 printf("ERROR - Inflight Count test FAILED.\n");
1945 printf("*** Running Abuse Inflights test...\n");
1946 ret = abuse_inflights(t);
1948 printf("ERROR - Abuse Inflights test FAILED.\n");
1951 printf("*** Running QID Priority test...\n");
1952 ret = qid_priorities(t);
1954 printf("ERROR - QID Priority test FAILED.\n");
1957 printf("*** Running Ordered Reconfigure test...\n");
1958 ret = ordered_reconfigure(t);
1960 printf("ERROR - Ordered Reconfigure test FAILED.\n");
1963 printf("*** Running Port LB Single Reconfig test...\n");
1964 ret = port_single_lb_reconfig(t);
1966 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
1969 printf("*** Running Port Reconfig Credits test...\n");
1970 ret = port_reconfig_credits(t);
1972 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
1976 * Free test instance, leaving mempool initialized, and a pointer to it
1977 * in static eventdev_func_mempool, as it is re-used on re-runs
1984 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);