4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
51 #include <rte_eventdev.h>
56 #define NUM_PACKETS (1<<18)
61 struct rte_mempool *mbuf_pool;
62 uint8_t port[MAX_PORTS];
63 uint8_t qid[MAX_QIDS];
67 static struct rte_event release_ev;
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
74 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
76 static const uint8_t arp_request[] = {
77 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x00, 0x00, 0x00, 0x00
87 int pkt_len = sizeof(arp_request) - 1;
89 m = rte_pktmbuf_alloc(mp);
93 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94 arp_request, pkt_len);
95 rte_pktmbuf_pkt_len(m) = pkt_len;
96 rte_pktmbuf_data_len(m) = pkt_len;
103 /* initialization and config */
105 init(struct test *t, int nb_queues, int nb_ports)
107 struct rte_event_dev_config config = {
108 .nb_event_queues = nb_queues,
109 .nb_event_ports = nb_ports,
110 .nb_event_queue_flows = 1024,
111 .nb_events_limit = 4096,
112 .nb_event_port_dequeue_depth = 128,
113 .nb_event_port_enqueue_depth = 128,
117 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
119 memset(t, 0, sizeof(*t));
122 ret = rte_event_dev_configure(evdev, &config);
124 printf("%d: Error configuring device\n", __LINE__);
129 create_ports(struct test *t, int num_ports)
132 static const struct rte_event_port_conf conf = {
133 .new_event_threshold = 1024,
137 if (num_ports > MAX_PORTS)
140 for (i = 0; i < num_ports; i++) {
141 if (rte_event_port_setup(evdev, i, &conf) < 0) {
142 printf("Error setting up port %d\n", i);
152 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
157 const struct rte_event_queue_conf conf = {
158 .event_queue_cfg = flags,
159 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
160 .nb_atomic_flows = 1024,
161 .nb_atomic_order_sequences = 1024,
164 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
165 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
166 printf("%d: error creating qid %d\n", __LINE__, i);
171 t->nb_qids += num_qids;
172 if (t->nb_qids > MAX_QIDS)
179 create_atomic_qids(struct test *t, int num_qids)
181 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
185 create_ordered_qids(struct test *t, int num_qids)
187 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
192 create_unordered_qids(struct test *t, int num_qids)
194 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
198 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
203 static const struct rte_event_queue_conf conf = {
204 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
205 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
206 .nb_atomic_flows = 1024,
207 .nb_atomic_order_sequences = 1024,
210 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
211 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
212 printf("%d: error creating qid %d\n", __LINE__, i);
217 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
218 &t->qid[i], NULL, 1) != 1) {
219 printf("%d: error creating link for qid %d\n",
224 t->nb_qids += num_qids;
225 if (t->nb_qids > MAX_QIDS)
233 cleanup(struct test *t __rte_unused)
235 rte_event_dev_stop(evdev);
236 rte_event_dev_close(evdev);
240 struct test_event_dev_stats {
241 uint64_t rx_pkts; /**< Total packets received */
242 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
243 uint64_t tx_pkts; /**< Total packets transmitted */
245 /** Packets received on this port */
246 uint64_t port_rx_pkts[MAX_PORTS];
247 /** Packets dropped on this port */
248 uint64_t port_rx_dropped[MAX_PORTS];
249 /** Packets inflight on this port */
250 uint64_t port_inflight[MAX_PORTS];
251 /** Packets transmitted on this port */
252 uint64_t port_tx_pkts[MAX_PORTS];
253 /** Packets received on this qid */
254 uint64_t qid_rx_pkts[MAX_QIDS];
255 /** Packets dropped on this qid */
256 uint64_t qid_rx_dropped[MAX_QIDS];
257 /** Packets transmitted on this qid */
258 uint64_t qid_tx_pkts[MAX_QIDS];
262 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
265 static uint32_t total_ids[3]; /* rx, tx and drop */
266 static uint32_t port_rx_pkts_ids[MAX_PORTS];
267 static uint32_t port_rx_dropped_ids[MAX_PORTS];
268 static uint32_t port_inflight_ids[MAX_PORTS];
269 static uint32_t port_tx_pkts_ids[MAX_PORTS];
270 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
271 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
272 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
275 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
276 "dev_rx", &total_ids[0]);
277 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
278 "dev_drop", &total_ids[1]);
279 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
280 "dev_tx", &total_ids[2]);
281 for (i = 0; i < MAX_PORTS; i++) {
283 snprintf(name, sizeof(name), "port_%u_rx", i);
284 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
285 dev_id, name, &port_rx_pkts_ids[i]);
286 snprintf(name, sizeof(name), "port_%u_drop", i);
287 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
288 dev_id, name, &port_rx_dropped_ids[i]);
289 snprintf(name, sizeof(name), "port_%u_inflight", i);
290 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
291 dev_id, name, &port_inflight_ids[i]);
292 snprintf(name, sizeof(name), "port_%u_tx", i);
293 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
294 dev_id, name, &port_tx_pkts_ids[i]);
296 for (i = 0; i < MAX_QIDS; i++) {
298 snprintf(name, sizeof(name), "qid_%u_rx", i);
299 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
300 dev_id, name, &qid_rx_pkts_ids[i]);
301 snprintf(name, sizeof(name), "qid_%u_drop", i);
302 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
303 dev_id, name, &qid_rx_dropped_ids[i]);
304 snprintf(name, sizeof(name), "qid_%u_tx", i);
305 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
306 dev_id, name, &qid_tx_pkts_ids[i]);
313 test_single_directed_packet(struct test *t)
315 const int rx_enq = 0;
316 const int wrk_enq = 2;
319 /* Create instance with 3 directed QIDs going to 3 ports */
320 if (init(t, 3, 3) < 0 ||
321 create_ports(t, 3) < 0 ||
322 create_directed_qids(t, 3, t->port) < 0)
325 if (rte_event_dev_start(evdev) < 0) {
326 printf("%d: Error with start call\n", __LINE__);
330 /************** FORWARD ****************/
331 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
332 struct rte_event ev = {
333 .op = RTE_EVENT_OP_NEW,
339 printf("%d: gen of pkt failed\n", __LINE__);
343 const uint32_t MAGIC_SEQN = 4711;
344 arp->seqn = MAGIC_SEQN;
346 /* generate pkt and enqueue */
347 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
349 printf("%d: error failed to enqueue\n", __LINE__);
353 /* Run schedule() as dir packets may need to be re-ordered */
354 rte_event_schedule(evdev);
356 struct test_event_dev_stats stats;
357 err = test_event_dev_stats_get(evdev, &stats);
359 printf("%d: error failed to get stats\n", __LINE__);
363 if (stats.port_rx_pkts[rx_enq] != 1) {
364 printf("%d: error stats incorrect for directed port\n",
370 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
372 printf("%d: error failed to deq\n", __LINE__);
376 err = test_event_dev_stats_get(evdev, &stats);
377 if (stats.port_rx_pkts[wrk_enq] != 0 &&
378 stats.port_rx_pkts[wrk_enq] != 1) {
379 printf("%d: error directed stats post-dequeue\n", __LINE__);
383 if (ev.mbuf->seqn != MAGIC_SEQN) {
384 printf("%d: error magic sequence number not dequeued\n",
389 rte_pktmbuf_free(ev.mbuf);
395 burst_packets(struct test *t)
397 /************** CONFIG ****************/
402 /* Create instance with 2 ports and 2 queues */
403 if (init(t, 2, 2) < 0 ||
404 create_ports(t, 2) < 0 ||
405 create_atomic_qids(t, 2) < 0) {
406 printf("%d: Error initializing device\n", __LINE__);
410 /* CQ mapping to QID */
411 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
413 printf("%d: error mapping lb qid0\n", __LINE__);
416 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
418 printf("%d: error mapping lb qid1\n", __LINE__);
422 if (rte_event_dev_start(evdev) < 0) {
423 printf("%d: Error with start call\n", __LINE__);
427 /************** FORWARD ****************/
428 const uint32_t rx_port = 0;
429 const uint32_t NUM_PKTS = 2;
431 for (i = 0; i < NUM_PKTS; i++) {
432 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
434 printf("%d: error generating pkt\n", __LINE__);
438 struct rte_event ev = {
439 .op = RTE_EVENT_OP_NEW,
444 /* generate pkt and enqueue */
445 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
447 printf("%d: Failed to enqueue\n", __LINE__);
451 rte_event_schedule(evdev);
453 /* Check stats for all NUM_PKTS arrived to sched core */
454 struct test_event_dev_stats stats;
456 err = test_event_dev_stats_get(evdev, &stats);
458 printf("%d: failed to get stats\n", __LINE__);
461 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
462 printf("%d: Sched core didn't receive all %d pkts\n",
464 rte_event_dev_dump(evdev, stdout);
472 /******** DEQ QID 1 *******/
475 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
477 rte_pktmbuf_free(ev.mbuf);
480 if (deq_pkts != NUM_PKTS/2) {
481 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
486 /******** DEQ QID 2 *******/
490 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
492 rte_pktmbuf_free(ev.mbuf);
494 if (deq_pkts != NUM_PKTS/2) {
495 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
505 abuse_inflights(struct test *t)
507 const int rx_enq = 0;
508 const int wrk_enq = 2;
511 /* Create instance with 4 ports */
512 if (init(t, 1, 4) < 0 ||
513 create_ports(t, 4) < 0 ||
514 create_atomic_qids(t, 1) < 0) {
515 printf("%d: Error initializing device\n", __LINE__);
519 /* CQ mapping to QID */
520 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
522 printf("%d: error mapping lb qid\n", __LINE__);
527 if (rte_event_dev_start(evdev) < 0) {
528 printf("%d: Error with start call\n", __LINE__);
532 /* Enqueue op only */
533 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
535 printf("%d: Failed to enqueue\n", __LINE__);
540 rte_event_schedule(evdev);
542 struct test_event_dev_stats stats;
544 err = test_event_dev_stats_get(evdev, &stats);
546 printf("%d: failed to get stats\n", __LINE__);
550 if (stats.rx_pkts != 0 ||
551 stats.tx_pkts != 0 ||
552 stats.port_inflight[wrk_enq] != 0) {
553 printf("%d: Sched core didn't handle pkt as expected\n",
563 port_reconfig_credits(struct test *t)
565 if (init(t, 1, 1) < 0) {
566 printf("%d: Error initializing device\n", __LINE__);
571 const uint32_t NUM_ITERS = 32;
572 for (i = 0; i < NUM_ITERS; i++) {
573 const struct rte_event_queue_conf conf = {
574 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
575 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
576 .nb_atomic_flows = 1024,
577 .nb_atomic_order_sequences = 1024,
579 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
580 printf("%d: error creating qid\n", __LINE__);
585 static const struct rte_event_port_conf port_conf = {
586 .new_event_threshold = 128,
590 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
591 printf("%d Error setting up port\n", __LINE__);
595 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
597 printf("%d: error mapping lb qid\n", __LINE__);
601 if (rte_event_dev_start(evdev) < 0) {
602 printf("%d: Error with start call\n", __LINE__);
606 const uint32_t NPKTS = 1;
608 for (j = 0; j < NPKTS; j++) {
610 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
612 printf("%d: gen of pkt failed\n", __LINE__);
615 ev.queue_id = t->qid[0];
616 ev.op = RTE_EVENT_OP_NEW;
618 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
620 printf("%d: Failed to enqueue\n", __LINE__);
621 rte_event_dev_dump(0, stdout);
626 rte_event_schedule(evdev);
628 struct rte_event ev[NPKTS];
629 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
632 printf("%d error; no packet dequeued\n", __LINE__);
634 /* let cleanup below stop the device on last iter */
635 if (i != NUM_ITERS-1)
636 rte_event_dev_stop(evdev);
647 port_single_lb_reconfig(struct test *t)
649 if (init(t, 2, 2) < 0) {
650 printf("%d: Error initializing device\n", __LINE__);
654 static const struct rte_event_queue_conf conf_lb_atomic = {
655 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
656 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
657 .nb_atomic_flows = 1024,
658 .nb_atomic_order_sequences = 1024,
660 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
661 printf("%d: error creating qid\n", __LINE__);
665 static const struct rte_event_queue_conf conf_single_link = {
666 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
667 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
668 .nb_atomic_flows = 1024,
669 .nb_atomic_order_sequences = 1024,
671 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
672 printf("%d: error creating qid\n", __LINE__);
676 struct rte_event_port_conf port_conf = {
677 .new_event_threshold = 128,
681 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
682 printf("%d Error setting up port\n", __LINE__);
685 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
686 printf("%d Error setting up port\n", __LINE__);
690 /* link port to lb queue */
691 uint8_t queue_id = 0;
692 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
693 printf("%d: error creating link for qid\n", __LINE__);
697 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
699 printf("%d: Error unlinking lb port\n", __LINE__);
704 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
705 printf("%d: error creating link for qid\n", __LINE__);
710 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
712 printf("%d: error mapping lb qid\n", __LINE__);
716 if (rte_event_dev_start(evdev) < 0) {
717 printf("%d: Error with start call\n", __LINE__);
729 ordered_reconfigure(struct test *t)
731 if (init(t, 1, 1) < 0 ||
732 create_ports(t, 1) < 0) {
733 printf("%d: Error initializing device\n", __LINE__);
737 const struct rte_event_queue_conf conf = {
738 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
739 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
740 .nb_atomic_flows = 1024,
741 .nb_atomic_order_sequences = 1024,
744 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
745 printf("%d: error creating qid\n", __LINE__);
749 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
750 printf("%d: error creating qid, for 2nd time\n", __LINE__);
754 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
755 if (rte_event_dev_start(evdev) < 0) {
756 printf("%d: Error with start call\n", __LINE__);
768 invalid_qid(struct test *t)
770 struct test_event_dev_stats stats;
771 const int rx_enq = 0;
775 if (init(t, 1, 4) < 0 ||
776 create_ports(t, 4) < 0 ||
777 create_atomic_qids(t, 1) < 0) {
778 printf("%d: Error initializing device\n", __LINE__);
782 /* CQ mapping to QID */
783 for (i = 0; i < 4; i++) {
784 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
787 printf("%d: error mapping port 1 qid\n", __LINE__);
792 if (rte_event_dev_start(evdev) < 0) {
793 printf("%d: Error with start call\n", __LINE__);
798 * Send in a packet with an invalid qid to the scheduler.
799 * We should see the packed enqueued OK, but the inflights for
800 * that packet should not be incremented, and the rx_dropped
801 * should be incremented.
803 static uint32_t flows1[] = {20};
805 for (i = 0; i < RTE_DIM(flows1); i++) {
806 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
808 printf("%d: gen of pkt failed\n", __LINE__);
812 struct rte_event ev = {
813 .op = RTE_EVENT_OP_NEW,
814 .queue_id = t->qid[0] + flows1[i],
818 /* generate pkt and enqueue */
819 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
821 printf("%d: Failed to enqueue\n", __LINE__);
826 /* call the scheduler */
827 rte_event_schedule(evdev);
829 err = test_event_dev_stats_get(evdev, &stats);
831 printf("%d: failed to get stats\n", __LINE__);
836 * Now check the resulting inflights on the port, and the rx_dropped.
838 if (stats.port_inflight[0] != 0) {
839 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
841 rte_event_dev_dump(evdev, stdout);
844 if (stats.port_rx_dropped[0] != 1) {
845 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
846 rte_event_dev_dump(evdev, stdout);
849 /* each packet drop should only be counted in one place - port or dev */
850 if (stats.rx_dropped != 0) {
851 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
853 rte_event_dev_dump(evdev, stdout);
862 single_packet(struct test *t)
864 const uint32_t MAGIC_SEQN = 7321;
866 struct test_event_dev_stats stats;
867 const int rx_enq = 0;
868 const int wrk_enq = 2;
871 /* Create instance with 4 ports */
872 if (init(t, 1, 4) < 0 ||
873 create_ports(t, 4) < 0 ||
874 create_atomic_qids(t, 1) < 0) {
875 printf("%d: Error initializing device\n", __LINE__);
879 /* CQ mapping to QID */
880 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
882 printf("%d: error mapping lb qid\n", __LINE__);
887 if (rte_event_dev_start(evdev) < 0) {
888 printf("%d: Error with start call\n", __LINE__);
892 /************** Gen pkt and enqueue ****************/
893 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
895 printf("%d: gen of pkt failed\n", __LINE__);
899 ev.op = RTE_EVENT_OP_NEW;
900 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
904 arp->seqn = MAGIC_SEQN;
906 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
908 printf("%d: Failed to enqueue\n", __LINE__);
912 rte_event_schedule(evdev);
914 err = test_event_dev_stats_get(evdev, &stats);
916 printf("%d: failed to get stats\n", __LINE__);
920 if (stats.rx_pkts != 1 ||
921 stats.tx_pkts != 1 ||
922 stats.port_inflight[wrk_enq] != 1) {
923 printf("%d: Sched core didn't handle pkt as expected\n",
925 rte_event_dev_dump(evdev, stdout);
931 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
933 printf("%d: Failed to deq\n", __LINE__);
937 err = test_event_dev_stats_get(evdev, &stats);
939 printf("%d: failed to get stats\n", __LINE__);
943 err = test_event_dev_stats_get(evdev, &stats);
944 if (ev.mbuf->seqn != MAGIC_SEQN) {
945 printf("%d: magic sequence number not dequeued\n", __LINE__);
949 rte_pktmbuf_free(ev.mbuf);
950 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
952 printf("%d: Failed to enqueue\n", __LINE__);
955 rte_event_schedule(evdev);
957 err = test_event_dev_stats_get(evdev, &stats);
958 if (stats.port_inflight[wrk_enq] != 0) {
959 printf("%d: port inflight not correct\n", __LINE__);
968 inflight_counts(struct test *t)
971 struct test_event_dev_stats stats;
972 const int rx_enq = 0;
978 /* Create instance with 4 ports */
979 if (init(t, 2, 3) < 0 ||
980 create_ports(t, 3) < 0 ||
981 create_atomic_qids(t, 2) < 0) {
982 printf("%d: Error initializing device\n", __LINE__);
986 /* CQ mapping to QID */
987 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
989 printf("%d: error mapping lb qid\n", __LINE__);
993 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
995 printf("%d: error mapping lb qid\n", __LINE__);
1000 if (rte_event_dev_start(evdev) < 0) {
1001 printf("%d: Error with start call\n", __LINE__);
1005 /************** FORWARD ****************/
1007 for (i = 0; i < QID1_NUM; i++) {
1008 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1011 printf("%d: gen of pkt failed\n", __LINE__);
1015 ev.queue_id = t->qid[0];
1016 ev.op = RTE_EVENT_OP_NEW;
1018 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1020 printf("%d: Failed to enqueue\n", __LINE__);
1025 for (i = 0; i < QID2_NUM; i++) {
1026 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1029 printf("%d: gen of pkt failed\n", __LINE__);
1032 ev.queue_id = t->qid[1];
1033 ev.op = RTE_EVENT_OP_NEW;
1035 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1037 printf("%d: Failed to enqueue\n", __LINE__);
1043 rte_event_schedule(evdev);
1045 err = test_event_dev_stats_get(evdev, &stats);
1047 printf("%d: failed to get stats\n", __LINE__);
1051 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
1052 stats.tx_pkts != QID1_NUM + QID2_NUM) {
1053 printf("%d: Sched core didn't handle pkt as expected\n",
1058 if (stats.port_inflight[p1] != QID1_NUM) {
1059 printf("%d: %s port 1 inflight not correct\n", __LINE__,
1063 if (stats.port_inflight[p2] != QID2_NUM) {
1064 printf("%d: %s port 2 inflight not correct\n", __LINE__,
1069 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
1071 struct rte_event events[QID1_NUM + QID2_NUM];
1072 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
1073 RTE_DIM(events), 0);
1075 if (deq_pkts != QID1_NUM) {
1076 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
1079 err = test_event_dev_stats_get(evdev, &stats);
1080 if (stats.port_inflight[p1] != QID1_NUM) {
1081 printf("%d: port 1 inflight decrement after DEQ != 0\n",
1085 for (i = 0; i < QID1_NUM; i++) {
1086 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
1089 printf("%d: %s rte enqueue of inf release failed\n",
1090 __LINE__, __func__);
1096 * As the scheduler core decrements inflights, it needs to run to
1097 * process packets to act on the drop messages
1099 rte_event_schedule(evdev);
1101 err = test_event_dev_stats_get(evdev, &stats);
1102 if (stats.port_inflight[p1] != 0) {
1103 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
1108 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
1109 RTE_DIM(events), 0);
1110 if (deq_pkts != QID2_NUM) {
1111 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
1114 err = test_event_dev_stats_get(evdev, &stats);
1115 if (stats.port_inflight[p2] != QID2_NUM) {
1116 printf("%d: port 1 inflight decrement after DEQ != 0\n",
1120 for (i = 0; i < QID2_NUM; i++) {
1121 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
1124 printf("%d: %s rte enqueue of inf release failed\n",
1125 __LINE__, __func__);
1131 * As the scheduler core decrements inflights, it needs to run to
1132 * process packets to act on the drop messages
1134 rte_event_schedule(evdev);
1136 err = test_event_dev_stats_get(evdev, &stats);
1137 if (stats.port_inflight[p2] != 0) {
1138 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
1145 rte_event_dev_dump(evdev, stdout);
1151 parallel_basic(struct test *t, int check_order)
1153 const uint8_t rx_port = 0;
1154 const uint8_t w1_port = 1;
1155 const uint8_t w3_port = 3;
1156 const uint8_t tx_port = 4;
1159 uint32_t deq_pkts, j;
1160 struct rte_mbuf *mbufs[3];
1161 struct rte_mbuf *mbufs_out[3];
1162 const uint32_t MAGIC_SEQN = 1234;
1164 /* Create instance with 4 ports */
1165 if (init(t, 2, tx_port + 1) < 0 ||
1166 create_ports(t, tx_port + 1) < 0 ||
1167 (check_order ? create_ordered_qids(t, 1) :
1168 create_unordered_qids(t, 1)) < 0 ||
1169 create_directed_qids(t, 1, &tx_port)) {
1170 printf("%d: Error initializing device\n", __LINE__);
1176 * We need three ports, all mapped to the same ordered qid0. Then we'll
1177 * take a packet out to each port, re-enqueue in reverse order,
1178 * then make sure the reordering has taken place properly when we
1179 * dequeue from the tx_port.
1181 * Simplified test setup diagram:
1185 * qid0 - w2_port - qid1
1189 /* CQ mapping to QID for LB ports (directed mapped on create) */
1190 for (i = w1_port; i <= w3_port; i++) {
1191 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
1194 printf("%d: error mapping lb qid\n", __LINE__);
1200 if (rte_event_dev_start(evdev) < 0) {
1201 printf("%d: Error with start call\n", __LINE__);
1205 /* Enqueue 3 packets to the rx port */
1206 for (i = 0; i < 3; i++) {
1207 struct rte_event ev;
1208 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
1210 printf("%d: gen of pkt failed\n", __LINE__);
1214 ev.queue_id = t->qid[0];
1215 ev.op = RTE_EVENT_OP_NEW;
1217 mbufs[i]->seqn = MAGIC_SEQN + i;
1219 /* generate pkt and enqueue */
1220 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
1222 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
1228 rte_event_schedule(evdev);
1230 /* use extra slot to make logic in loops easier */
1231 struct rte_event deq_ev[w3_port + 1];
1233 /* Dequeue the 3 packets, one from each worker port */
1234 for (i = w1_port; i <= w3_port; i++) {
1235 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
1237 if (deq_pkts != 1) {
1238 printf("%d: Failed to deq\n", __LINE__);
1239 rte_event_dev_dump(evdev, stdout);
1244 /* Enqueue each packet in reverse order, flushing after each one */
1245 for (i = w3_port; i >= w1_port; i--) {
1247 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
1248 deq_ev[i].queue_id = t->qid[1];
1249 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
1251 printf("%d: Failed to enqueue\n", __LINE__);
1255 rte_event_schedule(evdev);
1257 /* dequeue from the tx ports, we should get 3 packets */
1258 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
1261 /* Check to see if we've got all 3 packets */
1262 if (deq_pkts != 3) {
1263 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
1264 __LINE__, deq_pkts, tx_port);
1265 rte_event_dev_dump(evdev, stdout);
1269 /* Check to see if the sequence numbers are in expected order */
1271 for (j = 0 ; j < deq_pkts ; j++) {
1272 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
1274 "%d: Incorrect sequence number(%d) from port %d\n",
1275 __LINE__, mbufs_out[j]->seqn, tx_port);
1281 /* Destroy the instance */
1287 ordered_basic(struct test *t)
1289 return parallel_basic(t, 1);
1293 unordered_basic(struct test *t)
1295 return parallel_basic(t, 0);
1298 static struct rte_mempool *eventdev_func_mempool;
1301 test_sw_eventdev(void)
1303 struct test *t = malloc(sizeof(struct test));
1306 /* manually initialize the op, older gcc's complain on static
1307 * initialization of struct elements that are a bitfield.
1309 release_ev.op = RTE_EVENT_OP_RELEASE;
1311 const char *eventdev_name = "event_sw0";
1312 evdev = rte_event_dev_get_dev_id(eventdev_name);
1314 printf("%d: Eventdev %s not found - creating.\n",
1315 __LINE__, eventdev_name);
1316 if (rte_eal_vdev_init(eventdev_name, NULL) < 0) {
1317 printf("Error creating eventdev\n");
1320 evdev = rte_event_dev_get_dev_id(eventdev_name);
1322 printf("Error finding newly created eventdev\n");
1327 /* Only create mbuf pool once, reuse for each test run */
1328 if (!eventdev_func_mempool) {
1329 eventdev_func_mempool = rte_pktmbuf_pool_create(
1330 "EVENTDEV_SW_SA_MBUF_POOL",
1331 (1<<12), /* 4k buffers */
1332 32 /*MBUF_CACHE_SIZE*/,
1334 512, /* use very small mbufs */
1336 if (!eventdev_func_mempool) {
1337 printf("ERROR creating mempool\n");
1341 t->mbuf_pool = eventdev_func_mempool;
1343 printf("*** Running Single Directed Packet test...\n");
1344 ret = test_single_directed_packet(t);
1346 printf("ERROR - Single Directed Packet test FAILED.\n");
1349 printf("*** Running Single Load Balanced Packet test...\n");
1350 ret = single_packet(t);
1352 printf("ERROR - Single Packet test FAILED.\n");
1355 printf("*** Running Unordered Basic test...\n");
1356 ret = unordered_basic(t);
1358 printf("ERROR - Unordered Basic test FAILED.\n");
1361 printf("*** Running Ordered Basic test...\n");
1362 ret = ordered_basic(t);
1364 printf("ERROR - Ordered Basic test FAILED.\n");
1367 printf("*** Running Burst Packets test...\n");
1368 ret = burst_packets(t);
1370 printf("ERROR - Burst Packets test FAILED.\n");
1373 printf("*** Running Invalid QID test...\n");
1374 ret = invalid_qid(t);
1376 printf("ERROR - Invalid QID test FAILED.\n");
1379 printf("*** Running Inflight Count test...\n");
1380 ret = inflight_counts(t);
1382 printf("ERROR - Inflight Count test FAILED.\n");
1385 printf("*** Running Abuse Inflights test...\n");
1386 ret = abuse_inflights(t);
1388 printf("ERROR - Abuse Inflights test FAILED.\n");
1391 printf("*** Running Ordered Reconfigure test...\n");
1392 ret = ordered_reconfigure(t);
1394 printf("ERROR - Ordered Reconfigure test FAILED.\n");
1397 printf("*** Running Port LB Single Reconfig test...\n");
1398 ret = port_single_lb_reconfig(t);
1400 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
1403 printf("*** Running Port Reconfig Credits test...\n");
1404 ret = port_reconfig_credits(t);
1406 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
1410 * Free test instance, leaving mempool initialized, and a pointer to it
1411 * in static eventdev_func_mempool, as it is re-used on re-runs
1418 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);