4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/queue.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
50 #include <rte_eventdev.h>
51 #include <rte_pause.h>
57 #define NUM_PACKETS (1<<18)
62 struct rte_mempool *mbuf_pool;
63 uint8_t port[MAX_PORTS];
64 uint8_t qid[MAX_QIDS];
68 static struct rte_event release_ev;
70 static inline struct rte_mbuf *
71 rte_gen_arp(int portid, struct rte_mempool *mp)
75 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
77 static const uint8_t arp_request[] = {
78 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
79 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
80 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
81 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
82 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
83 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00
88 int pkt_len = sizeof(arp_request) - 1;
90 m = rte_pktmbuf_alloc(mp);
94 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
95 arp_request, pkt_len);
96 rte_pktmbuf_pkt_len(m) = pkt_len;
97 rte_pktmbuf_data_len(m) = pkt_len;
107 const uint32_t XSTATS_MAX = 1024;
109 uint32_t ids[XSTATS_MAX];
110 uint64_t values[XSTATS_MAX];
111 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
113 for (i = 0; i < XSTATS_MAX; i++)
116 /* Device names / values */
117 int ret = rte_event_dev_xstats_names_get(evdev,
118 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
119 xstats_names, ids, XSTATS_MAX);
121 printf("%d: xstats names get() returned error\n",
125 ret = rte_event_dev_xstats_get(evdev,
126 RTE_EVENT_DEV_XSTATS_DEVICE,
127 0, ids, values, ret);
128 if (ret > (signed int)XSTATS_MAX)
129 printf("%s %d: more xstats available than space\n",
131 for (i = 0; (signed int)i < ret; i++) {
132 printf("%d : %s : %"PRIu64"\n",
133 i, xstats_names[i].name, values[i]);
136 /* Port names / values */
137 ret = rte_event_dev_xstats_names_get(evdev,
138 RTE_EVENT_DEV_XSTATS_PORT, 0,
139 xstats_names, ids, XSTATS_MAX);
140 ret = rte_event_dev_xstats_get(evdev,
141 RTE_EVENT_DEV_XSTATS_PORT, 1,
143 if (ret > (signed int)XSTATS_MAX)
144 printf("%s %d: more xstats available than space\n",
146 for (i = 0; (signed int)i < ret; i++) {
147 printf("%d : %s : %"PRIu64"\n",
148 i, xstats_names[i].name, values[i]);
151 /* Queue names / values */
152 ret = rte_event_dev_xstats_names_get(evdev,
153 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
154 xstats_names, ids, XSTATS_MAX);
155 ret = rte_event_dev_xstats_get(evdev,
156 RTE_EVENT_DEV_XSTATS_QUEUE,
157 1, ids, values, ret);
158 if (ret > (signed int)XSTATS_MAX)
159 printf("%s %d: more xstats available than space\n",
161 for (i = 0; (signed int)i < ret; i++) {
162 printf("%d : %s : %"PRIu64"\n",
163 i, xstats_names[i].name, values[i]);
167 /* initialization and config */
169 init(struct test *t, int nb_queues, int nb_ports)
171 struct rte_event_dev_config config = {
172 .nb_event_queues = nb_queues,
173 .nb_event_ports = nb_ports,
174 .nb_event_queue_flows = 1024,
175 .nb_events_limit = 4096,
176 .nb_event_port_dequeue_depth = 128,
177 .nb_event_port_enqueue_depth = 128,
181 void *temp = t->mbuf_pool; /* save and restore mbuf pool */
183 memset(t, 0, sizeof(*t));
186 ret = rte_event_dev_configure(evdev, &config);
188 printf("%d: Error configuring device\n", __LINE__);
193 create_ports(struct test *t, int num_ports)
196 static const struct rte_event_port_conf conf = {
197 .new_event_threshold = 1024,
201 if (num_ports > MAX_PORTS)
204 for (i = 0; i < num_ports; i++) {
205 if (rte_event_port_setup(evdev, i, &conf) < 0) {
206 printf("Error setting up port %d\n", i);
216 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
221 const struct rte_event_queue_conf conf = {
222 .event_queue_cfg = flags,
223 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
224 .nb_atomic_flows = 1024,
225 .nb_atomic_order_sequences = 1024,
228 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
229 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
230 printf("%d: error creating qid %d\n", __LINE__, i);
235 t->nb_qids += num_qids;
236 if (t->nb_qids > MAX_QIDS)
243 create_atomic_qids(struct test *t, int num_qids)
245 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
249 create_ordered_qids(struct test *t, int num_qids)
251 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
256 create_unordered_qids(struct test *t, int num_qids)
258 return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
262 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
267 static const struct rte_event_queue_conf conf = {
268 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
269 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
272 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
273 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
274 printf("%d: error creating qid %d\n", __LINE__, i);
279 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
280 &t->qid[i], NULL, 1) != 1) {
281 printf("%d: error creating link for qid %d\n",
286 t->nb_qids += num_qids;
287 if (t->nb_qids > MAX_QIDS)
295 cleanup(struct test *t __rte_unused)
297 rte_event_dev_stop(evdev);
298 rte_event_dev_close(evdev);
302 struct test_event_dev_stats {
303 uint64_t rx_pkts; /**< Total packets received */
304 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
305 uint64_t tx_pkts; /**< Total packets transmitted */
307 /** Packets received on this port */
308 uint64_t port_rx_pkts[MAX_PORTS];
309 /** Packets dropped on this port */
310 uint64_t port_rx_dropped[MAX_PORTS];
311 /** Packets inflight on this port */
312 uint64_t port_inflight[MAX_PORTS];
313 /** Packets transmitted on this port */
314 uint64_t port_tx_pkts[MAX_PORTS];
315 /** Packets received on this qid */
316 uint64_t qid_rx_pkts[MAX_QIDS];
317 /** Packets dropped on this qid */
318 uint64_t qid_rx_dropped[MAX_QIDS];
319 /** Packets transmitted on this qid */
320 uint64_t qid_tx_pkts[MAX_QIDS];
324 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
327 static uint32_t total_ids[3]; /* rx, tx and drop */
328 static uint32_t port_rx_pkts_ids[MAX_PORTS];
329 static uint32_t port_rx_dropped_ids[MAX_PORTS];
330 static uint32_t port_inflight_ids[MAX_PORTS];
331 static uint32_t port_tx_pkts_ids[MAX_PORTS];
332 static uint32_t qid_rx_pkts_ids[MAX_QIDS];
333 static uint32_t qid_rx_dropped_ids[MAX_QIDS];
334 static uint32_t qid_tx_pkts_ids[MAX_QIDS];
337 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
338 "dev_rx", &total_ids[0]);
339 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
340 "dev_drop", &total_ids[1]);
341 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
342 "dev_tx", &total_ids[2]);
343 for (i = 0; i < MAX_PORTS; i++) {
345 snprintf(name, sizeof(name), "port_%u_rx", i);
346 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
347 dev_id, name, &port_rx_pkts_ids[i]);
348 snprintf(name, sizeof(name), "port_%u_drop", i);
349 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
350 dev_id, name, &port_rx_dropped_ids[i]);
351 snprintf(name, sizeof(name), "port_%u_inflight", i);
352 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
353 dev_id, name, &port_inflight_ids[i]);
354 snprintf(name, sizeof(name), "port_%u_tx", i);
355 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
356 dev_id, name, &port_tx_pkts_ids[i]);
358 for (i = 0; i < MAX_QIDS; i++) {
360 snprintf(name, sizeof(name), "qid_%u_rx", i);
361 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
362 dev_id, name, &qid_rx_pkts_ids[i]);
363 snprintf(name, sizeof(name), "qid_%u_drop", i);
364 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
365 dev_id, name, &qid_rx_dropped_ids[i]);
366 snprintf(name, sizeof(name), "qid_%u_tx", i);
367 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
368 dev_id, name, &qid_tx_pkts_ids[i]);
374 /* run_prio_packet_test
375 * This performs a basic packet priority check on the test instance passed in.
376 * It is factored out of the main priority tests as the same tests must be
377 * performed to ensure prioritization of each type of QID.
380 * - An initialized test structure, including mempool
381 * - t->port[0] is initialized for both Enq / Deq of packets to the QID
382 * - t->qid[0] is the QID to be tested
383 * - if LB QID, the CQ must be mapped to the QID.
386 run_prio_packet_test(struct test *t)
389 const uint32_t MAGIC_SEQN[] = {4711, 1234};
390 const uint32_t PRIORITY[] = {
391 RTE_EVENT_DEV_PRIORITY_NORMAL,
392 RTE_EVENT_DEV_PRIORITY_HIGHEST
395 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
396 /* generate pkt and enqueue */
398 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
400 printf("%d: gen of pkt failed\n", __LINE__);
403 arp->seqn = MAGIC_SEQN[i];
405 ev = (struct rte_event){
406 .priority = PRIORITY[i],
407 .op = RTE_EVENT_OP_NEW,
408 .queue_id = t->qid[0],
411 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
413 printf("%d: error failed to enqueue\n", __LINE__);
418 rte_event_schedule(evdev);
420 struct test_event_dev_stats stats;
421 err = test_event_dev_stats_get(evdev, &stats);
423 printf("%d: error failed to get stats\n", __LINE__);
427 if (stats.port_rx_pkts[t->port[0]] != 2) {
428 printf("%d: error stats incorrect for directed port\n",
430 rte_event_dev_dump(evdev, stdout);
434 struct rte_event ev, ev2;
436 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
438 printf("%d: error failed to deq\n", __LINE__);
439 rte_event_dev_dump(evdev, stdout);
442 if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
443 printf("%d: first packet out not highest priority\n",
445 rte_event_dev_dump(evdev, stdout);
448 rte_pktmbuf_free(ev.mbuf);
450 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
452 printf("%d: error failed to deq\n", __LINE__);
453 rte_event_dev_dump(evdev, stdout);
456 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
457 printf("%d: second packet out not lower priority\n",
459 rte_event_dev_dump(evdev, stdout);
462 rte_pktmbuf_free(ev2.mbuf);
469 test_single_directed_packet(struct test *t)
471 const int rx_enq = 0;
472 const int wrk_enq = 2;
475 /* Create instance with 3 directed QIDs going to 3 ports */
476 if (init(t, 3, 3) < 0 ||
477 create_ports(t, 3) < 0 ||
478 create_directed_qids(t, 3, t->port) < 0)
481 if (rte_event_dev_start(evdev) < 0) {
482 printf("%d: Error with start call\n", __LINE__);
486 /************** FORWARD ****************/
487 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
488 struct rte_event ev = {
489 .op = RTE_EVENT_OP_NEW,
495 printf("%d: gen of pkt failed\n", __LINE__);
499 const uint32_t MAGIC_SEQN = 4711;
500 arp->seqn = MAGIC_SEQN;
502 /* generate pkt and enqueue */
503 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
505 printf("%d: error failed to enqueue\n", __LINE__);
509 /* Run schedule() as dir packets may need to be re-ordered */
510 rte_event_schedule(evdev);
512 struct test_event_dev_stats stats;
513 err = test_event_dev_stats_get(evdev, &stats);
515 printf("%d: error failed to get stats\n", __LINE__);
519 if (stats.port_rx_pkts[rx_enq] != 1) {
520 printf("%d: error stats incorrect for directed port\n",
526 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
528 printf("%d: error failed to deq\n", __LINE__);
532 err = test_event_dev_stats_get(evdev, &stats);
533 if (stats.port_rx_pkts[wrk_enq] != 0 &&
534 stats.port_rx_pkts[wrk_enq] != 1) {
535 printf("%d: error directed stats post-dequeue\n", __LINE__);
539 if (ev.mbuf->seqn != MAGIC_SEQN) {
540 printf("%d: error magic sequence number not dequeued\n",
545 rte_pktmbuf_free(ev.mbuf);
551 test_directed_forward_credits(struct test *t)
556 if (init(t, 1, 1) < 0 ||
557 create_ports(t, 1) < 0 ||
558 create_directed_qids(t, 1, t->port) < 0)
561 if (rte_event_dev_start(evdev) < 0) {
562 printf("%d: Error with start call\n", __LINE__);
566 struct rte_event ev = {
567 .op = RTE_EVENT_OP_NEW,
571 for (i = 0; i < 1000; i++) {
572 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
574 printf("%d: error failed to enqueue\n", __LINE__);
577 rte_event_schedule(evdev);
580 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
582 printf("%d: error failed to deq\n", __LINE__);
586 /* re-write event to be a forward, and continue looping it */
587 ev.op = RTE_EVENT_OP_FORWARD;
596 test_priority_directed(struct test *t)
598 if (init(t, 1, 1) < 0 ||
599 create_ports(t, 1) < 0 ||
600 create_directed_qids(t, 1, t->port) < 0) {
601 printf("%d: Error initializing device\n", __LINE__);
605 if (rte_event_dev_start(evdev) < 0) {
606 printf("%d: Error with start call\n", __LINE__);
610 return run_prio_packet_test(t);
614 test_priority_atomic(struct test *t)
616 if (init(t, 1, 1) < 0 ||
617 create_ports(t, 1) < 0 ||
618 create_atomic_qids(t, 1) < 0) {
619 printf("%d: Error initializing device\n", __LINE__);
624 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
625 printf("%d: error mapping qid to port\n", __LINE__);
628 if (rte_event_dev_start(evdev) < 0) {
629 printf("%d: Error with start call\n", __LINE__);
633 return run_prio_packet_test(t);
637 test_priority_ordered(struct test *t)
639 if (init(t, 1, 1) < 0 ||
640 create_ports(t, 1) < 0 ||
641 create_ordered_qids(t, 1) < 0) {
642 printf("%d: Error initializing device\n", __LINE__);
647 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
648 printf("%d: error mapping qid to port\n", __LINE__);
651 if (rte_event_dev_start(evdev) < 0) {
652 printf("%d: Error with start call\n", __LINE__);
656 return run_prio_packet_test(t);
660 test_priority_unordered(struct test *t)
662 if (init(t, 1, 1) < 0 ||
663 create_ports(t, 1) < 0 ||
664 create_unordered_qids(t, 1) < 0) {
665 printf("%d: Error initializing device\n", __LINE__);
670 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
671 printf("%d: error mapping qid to port\n", __LINE__);
674 if (rte_event_dev_start(evdev) < 0) {
675 printf("%d: Error with start call\n", __LINE__);
679 return run_prio_packet_test(t);
683 burst_packets(struct test *t)
685 /************** CONFIG ****************/
690 /* Create instance with 2 ports and 2 queues */
691 if (init(t, 2, 2) < 0 ||
692 create_ports(t, 2) < 0 ||
693 create_atomic_qids(t, 2) < 0) {
694 printf("%d: Error initializing device\n", __LINE__);
698 /* CQ mapping to QID */
699 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
701 printf("%d: error mapping lb qid0\n", __LINE__);
704 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
706 printf("%d: error mapping lb qid1\n", __LINE__);
710 if (rte_event_dev_start(evdev) < 0) {
711 printf("%d: Error with start call\n", __LINE__);
715 /************** FORWARD ****************/
716 const uint32_t rx_port = 0;
717 const uint32_t NUM_PKTS = 2;
719 for (i = 0; i < NUM_PKTS; i++) {
720 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
722 printf("%d: error generating pkt\n", __LINE__);
726 struct rte_event ev = {
727 .op = RTE_EVENT_OP_NEW,
732 /* generate pkt and enqueue */
733 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
735 printf("%d: Failed to enqueue\n", __LINE__);
739 rte_event_schedule(evdev);
741 /* Check stats for all NUM_PKTS arrived to sched core */
742 struct test_event_dev_stats stats;
744 err = test_event_dev_stats_get(evdev, &stats);
746 printf("%d: failed to get stats\n", __LINE__);
749 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
750 printf("%d: Sched core didn't receive all %d pkts\n",
752 rte_event_dev_dump(evdev, stdout);
760 /******** DEQ QID 1 *******/
763 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
765 rte_pktmbuf_free(ev.mbuf);
768 if (deq_pkts != NUM_PKTS/2) {
769 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
774 /******** DEQ QID 2 *******/
778 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
780 rte_pktmbuf_free(ev.mbuf);
782 if (deq_pkts != NUM_PKTS/2) {
783 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
793 abuse_inflights(struct test *t)
795 const int rx_enq = 0;
796 const int wrk_enq = 2;
799 /* Create instance with 4 ports */
800 if (init(t, 1, 4) < 0 ||
801 create_ports(t, 4) < 0 ||
802 create_atomic_qids(t, 1) < 0) {
803 printf("%d: Error initializing device\n", __LINE__);
807 /* CQ mapping to QID */
808 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
810 printf("%d: error mapping lb qid\n", __LINE__);
815 if (rte_event_dev_start(evdev) < 0) {
816 printf("%d: Error with start call\n", __LINE__);
820 /* Enqueue op only */
821 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
823 printf("%d: Failed to enqueue\n", __LINE__);
828 rte_event_schedule(evdev);
830 struct test_event_dev_stats stats;
832 err = test_event_dev_stats_get(evdev, &stats);
834 printf("%d: failed to get stats\n", __LINE__);
838 if (stats.rx_pkts != 0 ||
839 stats.tx_pkts != 0 ||
840 stats.port_inflight[wrk_enq] != 0) {
841 printf("%d: Sched core didn't handle pkt as expected\n",
851 xstats_tests(struct test *t)
853 const int wrk_enq = 2;
856 /* Create instance with 4 ports */
857 if (init(t, 1, 4) < 0 ||
858 create_ports(t, 4) < 0 ||
859 create_atomic_qids(t, 1) < 0) {
860 printf("%d: Error initializing device\n", __LINE__);
864 /* CQ mapping to QID */
865 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
867 printf("%d: error mapping lb qid\n", __LINE__);
872 if (rte_event_dev_start(evdev) < 0) {
873 printf("%d: Error with start call\n", __LINE__);
877 const uint32_t XSTATS_MAX = 1024;
880 uint32_t ids[XSTATS_MAX];
881 uint64_t values[XSTATS_MAX];
882 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
884 for (i = 0; i < XSTATS_MAX; i++)
887 /* Device names / values */
888 int ret = rte_event_dev_xstats_names_get(evdev,
889 RTE_EVENT_DEV_XSTATS_DEVICE,
890 0, xstats_names, ids, XSTATS_MAX);
892 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
895 ret = rte_event_dev_xstats_get(evdev,
896 RTE_EVENT_DEV_XSTATS_DEVICE,
897 0, ids, values, ret);
899 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
903 /* Port names / values */
904 ret = rte_event_dev_xstats_names_get(evdev,
905 RTE_EVENT_DEV_XSTATS_PORT, 0,
906 xstats_names, ids, XSTATS_MAX);
908 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
911 ret = rte_event_dev_xstats_get(evdev,
912 RTE_EVENT_DEV_XSTATS_PORT, 0,
915 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
919 /* Queue names / values */
920 ret = rte_event_dev_xstats_names_get(evdev,
921 RTE_EVENT_DEV_XSTATS_QUEUE,
922 0, xstats_names, ids, XSTATS_MAX);
924 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
928 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
929 ret = rte_event_dev_xstats_get(evdev,
930 RTE_EVENT_DEV_XSTATS_QUEUE,
931 1, ids, values, ret);
932 if (ret != -EINVAL) {
933 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
937 ret = rte_event_dev_xstats_get(evdev,
938 RTE_EVENT_DEV_XSTATS_QUEUE,
939 0, ids, values, ret);
941 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
945 /* enqueue packets to check values */
946 for (i = 0; i < 3; i++) {
948 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
950 printf("%d: gen of pkt failed\n", __LINE__);
953 ev.queue_id = t->qid[i];
954 ev.op = RTE_EVENT_OP_NEW;
959 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
961 printf("%d: Failed to enqueue\n", __LINE__);
966 rte_event_schedule(evdev);
968 /* Device names / values */
969 int num_stats = rte_event_dev_xstats_names_get(evdev,
970 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
971 xstats_names, ids, XSTATS_MAX);
974 ret = rte_event_dev_xstats_get(evdev,
975 RTE_EVENT_DEV_XSTATS_DEVICE,
976 0, ids, values, num_stats);
977 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
978 for (i = 0; (signed int)i < ret; i++) {
979 if (expected[i] != values[i]) {
981 "%d Error xstat %d (id %d) %s : %"PRIu64
982 ", expect %"PRIu64"\n",
983 __LINE__, i, ids[i], xstats_names[i].name,
984 values[i], expected[i]);
989 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
992 /* ensure reset statistics are zero-ed */
993 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
994 ret = rte_event_dev_xstats_get(evdev,
995 RTE_EVENT_DEV_XSTATS_DEVICE,
996 0, ids, values, num_stats);
997 for (i = 0; (signed int)i < ret; i++) {
998 if (expected_zero[i] != values[i]) {
1000 "%d Error, xstat %d (id %d) %s : %"PRIu64
1001 ", expect %"PRIu64"\n",
1002 __LINE__, i, ids[i], xstats_names[i].name,
1003 values[i], expected_zero[i]);
1008 /* port reset checks */
1009 num_stats = rte_event_dev_xstats_names_get(evdev,
1010 RTE_EVENT_DEV_XSTATS_PORT, 0,
1011 xstats_names, ids, XSTATS_MAX);
1014 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1015 0, ids, values, num_stats);
1017 static const uint64_t port_expected[] = {
1022 0 /* avg pkt cycles */,
1024 0 /* rx ring used */,
1025 4096 /* rx ring free */,
1026 0 /* cq ring used */,
1027 32 /* cq ring free */,
1028 0 /* dequeue calls */,
1029 /* 10 dequeue burst buckets */
1033 if (ret != RTE_DIM(port_expected)) {
1035 "%s %d: wrong number of port stats (%d), expected %zu\n",
1036 __func__, __LINE__, ret, RTE_DIM(port_expected));
1039 for (i = 0; (signed int)i < ret; i++) {
1040 if (port_expected[i] != values[i]) {
1042 "%s : %d: Error stat %s is %"PRIu64
1043 ", expected %"PRIu64"\n",
1044 __func__, __LINE__, xstats_names[i].name,
1045 values[i], port_expected[i]);
1050 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1053 /* ensure reset statistics are zero-ed */
1054 static const uint64_t port_expected_zero[] = {
1059 0 /* avg pkt cycles */,
1061 0 /* rx ring used */,
1062 4096 /* rx ring free */,
1063 0 /* cq ring used */,
1064 32 /* cq ring free */,
1065 0 /* dequeue calls */,
1066 /* 10 dequeue burst buckets */
1070 ret = rte_event_dev_xstats_get(evdev,
1071 RTE_EVENT_DEV_XSTATS_PORT,
1072 0, ids, values, num_stats);
1073 for (i = 0; (signed int)i < ret; i++) {
1074 if (port_expected_zero[i] != values[i]) {
1076 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1077 ", expect %"PRIu64"\n",
1078 __LINE__, i, ids[i], xstats_names[i].name,
1079 values[i], port_expected_zero[i]);
1084 /* QUEUE STATS TESTS */
1085 num_stats = rte_event_dev_xstats_names_get(evdev,
1086 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1087 xstats_names, ids, XSTATS_MAX);
1088 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1089 0, ids, values, num_stats);
1091 printf("xstats get returned %d\n", ret);
1094 if ((unsigned int)ret > XSTATS_MAX)
1095 printf("%s %d: more xstats available than space\n",
1096 __func__, __LINE__);
1098 static const uint64_t queue_expected[] = {
1104 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1105 /* QID-to-Port: pinned_flows, packets */
1111 for (i = 0; (signed int)i < ret; i++) {
1112 if (queue_expected[i] != values[i]) {
1114 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1115 ", expect %"PRIu64"\n",
1116 __LINE__, i, ids[i], xstats_names[i].name,
1117 values[i], queue_expected[i]);
1122 /* Reset the queue stats here */
1123 ret = rte_event_dev_xstats_reset(evdev,
1124 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1128 /* Verify that the resetable stats are reset, and others are not */
1129 static const uint64_t queue_expected_zero[] = {
1135 0, 0, 0, 0, /* 4 iq used */
1136 /* QID-to-Port: pinned_flows, packets */
1143 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1144 ids, values, num_stats);
1146 for (i = 0; (signed int)i < ret; i++) {
1147 if (queue_expected_zero[i] != values[i]) {
1149 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1150 ", expect %"PRIu64"\n",
1151 __LINE__, i, ids[i], xstats_names[i].name,
1152 values[i], queue_expected_zero[i]);
1157 printf("%d : %d of values were not as expected above\n",
1166 rte_event_dev_dump(0, stdout);
1173 xstats_id_abuse_tests(struct test *t)
1176 const uint32_t XSTATS_MAX = 1024;
1177 const uint32_t link_port = 2;
1179 uint32_t ids[XSTATS_MAX];
1180 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1182 /* Create instance with 4 ports */
1183 if (init(t, 1, 4) < 0 ||
1184 create_ports(t, 4) < 0 ||
1185 create_atomic_qids(t, 1) < 0) {
1186 printf("%d: Error initializing device\n", __LINE__);
1190 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1192 printf("%d: error mapping lb qid\n", __LINE__);
1196 if (rte_event_dev_start(evdev) < 0) {
1197 printf("%d: Error with start call\n", __LINE__);
1201 /* no test for device, as it ignores the port/q number */
1202 int num_stats = rte_event_dev_xstats_names_get(evdev,
1203 RTE_EVENT_DEV_XSTATS_PORT,
1204 UINT8_MAX-1, xstats_names, ids,
1206 if (num_stats != 0) {
1207 printf("%d: expected %d stats, got return %d\n", __LINE__,
1212 num_stats = rte_event_dev_xstats_names_get(evdev,
1213 RTE_EVENT_DEV_XSTATS_QUEUE,
1214 UINT8_MAX-1, xstats_names, ids,
1216 if (num_stats != 0) {
1217 printf("%d: expected %d stats, got return %d\n", __LINE__,
1230 port_reconfig_credits(struct test *t)
1232 if (init(t, 1, 1) < 0) {
1233 printf("%d: Error initializing device\n", __LINE__);
1238 const uint32_t NUM_ITERS = 32;
1239 for (i = 0; i < NUM_ITERS; i++) {
1240 const struct rte_event_queue_conf conf = {
1241 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1242 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1243 .nb_atomic_flows = 1024,
1244 .nb_atomic_order_sequences = 1024,
1246 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1247 printf("%d: error creating qid\n", __LINE__);
1252 static const struct rte_event_port_conf port_conf = {
1253 .new_event_threshold = 128,
1254 .dequeue_depth = 32,
1255 .enqueue_depth = 64,
1257 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1258 printf("%d Error setting up port\n", __LINE__);
1262 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1264 printf("%d: error mapping lb qid\n", __LINE__);
1268 if (rte_event_dev_start(evdev) < 0) {
1269 printf("%d: Error with start call\n", __LINE__);
1273 const uint32_t NPKTS = 1;
1275 for (j = 0; j < NPKTS; j++) {
1276 struct rte_event ev;
1277 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1279 printf("%d: gen of pkt failed\n", __LINE__);
1282 ev.queue_id = t->qid[0];
1283 ev.op = RTE_EVENT_OP_NEW;
1285 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1287 printf("%d: Failed to enqueue\n", __LINE__);
1288 rte_event_dev_dump(0, stdout);
1293 rte_event_schedule(evdev);
1295 struct rte_event ev[NPKTS];
1296 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1299 printf("%d error; no packet dequeued\n", __LINE__);
1301 /* let cleanup below stop the device on last iter */
1302 if (i != NUM_ITERS-1)
1303 rte_event_dev_stop(evdev);
1314 port_single_lb_reconfig(struct test *t)
1316 if (init(t, 2, 2) < 0) {
1317 printf("%d: Error initializing device\n", __LINE__);
1321 static const struct rte_event_queue_conf conf_lb_atomic = {
1322 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1323 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1324 .nb_atomic_flows = 1024,
1325 .nb_atomic_order_sequences = 1024,
1327 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1328 printf("%d: error creating qid\n", __LINE__);
1332 static const struct rte_event_queue_conf conf_single_link = {
1333 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1334 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1336 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1337 printf("%d: error creating qid\n", __LINE__);
1341 struct rte_event_port_conf port_conf = {
1342 .new_event_threshold = 128,
1343 .dequeue_depth = 32,
1344 .enqueue_depth = 64,
1346 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1347 printf("%d Error setting up port\n", __LINE__);
1350 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1351 printf("%d Error setting up port\n", __LINE__);
1355 /* link port to lb queue */
1356 uint8_t queue_id = 0;
1357 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1358 printf("%d: error creating link for qid\n", __LINE__);
1362 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1364 printf("%d: Error unlinking lb port\n", __LINE__);
1369 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1370 printf("%d: error creating link for qid\n", __LINE__);
1375 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1377 printf("%d: error mapping lb qid\n", __LINE__);
1381 if (rte_event_dev_start(evdev) < 0) {
1382 printf("%d: Error with start call\n", __LINE__);
1394 xstats_brute_force(struct test *t)
1397 const uint32_t XSTATS_MAX = 1024;
1398 uint32_t ids[XSTATS_MAX];
1399 uint64_t values[XSTATS_MAX];
1400 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1403 /* Create instance with 4 ports */
1404 if (init(t, 1, 4) < 0 ||
1405 create_ports(t, 4) < 0 ||
1406 create_atomic_qids(t, 1) < 0) {
1407 printf("%d: Error initializing device\n", __LINE__);
1411 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1413 printf("%d: error mapping lb qid\n", __LINE__);
1417 if (rte_event_dev_start(evdev) < 0) {
1418 printf("%d: Error with start call\n", __LINE__);
1422 for (i = 0; i < XSTATS_MAX; i++)
1425 for (i = 0; i < 3; i++) {
1426 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1428 for (j = 0; j < UINT8_MAX; j++) {
1429 rte_event_dev_xstats_names_get(evdev, mode,
1430 j, xstats_names, ids, XSTATS_MAX);
1432 rte_event_dev_xstats_get(evdev, mode, j, ids,
1433 values, XSTATS_MAX);
1445 xstats_id_reset_tests(struct test *t)
1447 const int wrk_enq = 2;
1450 /* Create instance with 4 ports */
1451 if (init(t, 1, 4) < 0 ||
1452 create_ports(t, 4) < 0 ||
1453 create_atomic_qids(t, 1) < 0) {
1454 printf("%d: Error initializing device\n", __LINE__);
1458 /* CQ mapping to QID */
1459 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1461 printf("%d: error mapping lb qid\n", __LINE__);
1465 if (rte_event_dev_start(evdev) < 0) {
1466 printf("%d: Error with start call\n", __LINE__);
1470 #define XSTATS_MAX 1024
1473 uint32_t ids[XSTATS_MAX];
1474 uint64_t values[XSTATS_MAX];
1475 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1477 for (i = 0; i < XSTATS_MAX; i++)
1480 #define NUM_DEV_STATS 6
1481 /* Device names / values */
1482 int num_stats = rte_event_dev_xstats_names_get(evdev,
1483 RTE_EVENT_DEV_XSTATS_DEVICE,
1484 0, xstats_names, ids, XSTATS_MAX);
1485 if (num_stats != NUM_DEV_STATS) {
1486 printf("%d: expected %d stats, got return %d\n", __LINE__,
1487 NUM_DEV_STATS, num_stats);
1490 ret = rte_event_dev_xstats_get(evdev,
1491 RTE_EVENT_DEV_XSTATS_DEVICE,
1492 0, ids, values, num_stats);
1493 if (ret != NUM_DEV_STATS) {
1494 printf("%d: expected %d stats, got return %d\n", __LINE__,
1495 NUM_DEV_STATS, ret);
1500 for (i = 0; i < NPKTS; i++) {
1501 struct rte_event ev;
1502 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1504 printf("%d: gen of pkt failed\n", __LINE__);
1507 ev.queue_id = t->qid[i];
1508 ev.op = RTE_EVENT_OP_NEW;
1512 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1514 printf("%d: Failed to enqueue\n", __LINE__);
1519 rte_event_schedule(evdev);
1521 static const char * const dev_names[] = {
1522 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1523 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1525 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1526 for (i = 0; (int)i < ret; i++) {
1528 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1532 printf("%d: %s id incorrect, expected %d got %d\n",
1533 __LINE__, dev_names[i], i, id);
1536 if (val != dev_expected[i]) {
1537 printf("%d: %s value incorrect, expected %"
1538 PRIu64" got %d\n", __LINE__, dev_names[i],
1539 dev_expected[i], id);
1543 int reset_ret = rte_event_dev_xstats_reset(evdev,
1544 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1548 printf("%d: failed to reset successfully\n", __LINE__);
1551 dev_expected[i] = 0;
1552 /* check value again */
1553 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1554 if (val != dev_expected[i]) {
1555 printf("%d: %s value incorrect, expected %"PRIu64
1556 " got %"PRIu64"\n", __LINE__, dev_names[i],
1557 dev_expected[i], val);
1562 /* 48 is stat offset from start of the devices whole xstats.
1563 * This WILL break every time we add a statistic to a port
1564 * or the device, but there is no other way to test
1567 /* num stats for the tested port. CQ size adds more stats to a port */
1568 #define NUM_PORT_STATS 21
1569 /* the port to test. */
1571 num_stats = rte_event_dev_xstats_names_get(evdev,
1572 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1573 xstats_names, ids, XSTATS_MAX);
1574 if (num_stats != NUM_PORT_STATS) {
1575 printf("%d: expected %d stats, got return %d\n",
1576 __LINE__, NUM_PORT_STATS, num_stats);
1579 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1580 ids, values, num_stats);
1582 if (ret != NUM_PORT_STATS) {
1583 printf("%d: expected %d stats, got return %d\n",
1584 __LINE__, NUM_PORT_STATS, ret);
1587 static const char * const port_names[] = {
1592 "port_2_avg_pkt_cycles",
1594 "port_2_rx_ring_used",
1595 "port_2_rx_ring_free",
1596 "port_2_cq_ring_used",
1597 "port_2_cq_ring_free",
1598 "port_2_dequeue_calls",
1599 "port_2_dequeues_returning_0",
1600 "port_2_dequeues_returning_1-4",
1601 "port_2_dequeues_returning_5-8",
1602 "port_2_dequeues_returning_9-12",
1603 "port_2_dequeues_returning_13-16",
1604 "port_2_dequeues_returning_17-20",
1605 "port_2_dequeues_returning_21-24",
1606 "port_2_dequeues_returning_25-28",
1607 "port_2_dequeues_returning_29-32",
1608 "port_2_dequeues_returning_33-36",
1610 uint64_t port_expected[] = {
1614 NPKTS, /* inflight */
1615 0, /* avg pkt cycles */
1617 0, /* rx ring used */
1618 4096, /* rx ring free */
1619 NPKTS, /* cq ring used */
1620 25, /* cq ring free */
1621 0, /* dequeue zero calls */
1622 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1625 uint64_t port_expected_zero[] = {
1629 NPKTS, /* inflight */
1630 0, /* avg pkt cycles */
1632 0, /* rx ring used */
1633 4096, /* rx ring free */
1634 NPKTS, /* cq ring used */
1635 25, /* cq ring free */
1636 0, /* dequeue zero calls */
1637 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1640 if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1641 RTE_DIM(port_names) != NUM_PORT_STATS) {
1642 printf("%d: port array of wrong size\n", __LINE__);
1647 for (i = 0; (int)i < ret; i++) {
1649 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1652 if (id != i + PORT_OFF) {
1653 printf("%d: %s id incorrect, expected %d got %d\n",
1654 __LINE__, port_names[i], i+PORT_OFF,
1658 if (val != port_expected[i]) {
1659 printf("%d: %s value incorrect, expected %"PRIu64
1660 " got %d\n", __LINE__, port_names[i],
1661 port_expected[i], id);
1665 int reset_ret = rte_event_dev_xstats_reset(evdev,
1666 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1670 printf("%d: failed to reset successfully\n", __LINE__);
1673 /* check value again */
1674 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1675 if (val != port_expected_zero[i]) {
1676 printf("%d: %s value incorrect, expected %"PRIu64
1677 " got %"PRIu64"\n", __LINE__, port_names[i],
1678 port_expected_zero[i], val);
1685 /* num queue stats */
1686 #define NUM_Q_STATS 17
1687 /* queue offset from start of the devices whole xstats.
1688 * This will break every time we add a statistic to a device/port/queue
1690 #define QUEUE_OFF 90
1691 const uint32_t queue = 0;
1692 num_stats = rte_event_dev_xstats_names_get(evdev,
1693 RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1694 xstats_names, ids, XSTATS_MAX);
1695 if (num_stats != NUM_Q_STATS) {
1696 printf("%d: expected %d stats, got return %d\n",
1697 __LINE__, NUM_Q_STATS, num_stats);
1700 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1701 queue, ids, values, num_stats);
1702 if (ret != NUM_Q_STATS) {
1703 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1706 static const char * const queue_names[] = {
1716 "qid_0_port_0_pinned_flows",
1717 "qid_0_port_0_packets",
1718 "qid_0_port_1_pinned_flows",
1719 "qid_0_port_1_packets",
1720 "qid_0_port_2_pinned_flows",
1721 "qid_0_port_2_packets",
1722 "qid_0_port_3_pinned_flows",
1723 "qid_0_port_3_packets",
1725 uint64_t queue_expected[] = {
1735 /* QID-to-Port: pinned_flows, packets */
1741 uint64_t queue_expected_zero[] = {
1751 /* QID-to-Port: pinned_flows, packets */
1757 if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1758 RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1759 RTE_DIM(queue_names) != NUM_Q_STATS) {
1760 printf("%d : queue array of wrong size\n", __LINE__);
1765 for (i = 0; (int)i < ret; i++) {
1767 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1770 if (id != i + QUEUE_OFF) {
1771 printf("%d: %s id incorrect, expected %d got %d\n",
1772 __LINE__, queue_names[i], i+QUEUE_OFF,
1776 if (val != queue_expected[i]) {
1777 printf("%d: %d: %s value , expected %"PRIu64
1778 " got %"PRIu64"\n", i, __LINE__,
1779 queue_names[i], queue_expected[i], val);
1783 int reset_ret = rte_event_dev_xstats_reset(evdev,
1784 RTE_EVENT_DEV_XSTATS_QUEUE,
1787 printf("%d: failed to reset successfully\n", __LINE__);
1790 /* check value again */
1791 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1793 if (val != queue_expected_zero[i]) {
1794 printf("%d: %s value incorrect, expected %"PRIu64
1795 " got %"PRIu64"\n", __LINE__, queue_names[i],
1796 queue_expected_zero[i], val);
1812 ordered_reconfigure(struct test *t)
1814 if (init(t, 1, 1) < 0 ||
1815 create_ports(t, 1) < 0) {
1816 printf("%d: Error initializing device\n", __LINE__);
1820 const struct rte_event_queue_conf conf = {
1821 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
1822 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1823 .nb_atomic_flows = 1024,
1824 .nb_atomic_order_sequences = 1024,
1827 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1828 printf("%d: error creating qid\n", __LINE__);
1832 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1833 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1837 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1838 if (rte_event_dev_start(evdev) < 0) {
1839 printf("%d: Error with start call\n", __LINE__);
1851 qid_priorities(struct test *t)
1853 /* Test works by having a CQ with enough empty space for all packets,
1854 * and enqueueing 3 packets to 3 QIDs. They must return based on the
1855 * priority of the QID, not the ingress order, to pass the test
1858 /* Create instance with 1 ports, and 3 qids */
1859 if (init(t, 3, 1) < 0 ||
1860 create_ports(t, 1) < 0) {
1861 printf("%d: Error initializing device\n", __LINE__);
1865 for (i = 0; i < 3; i++) {
1867 const struct rte_event_queue_conf conf = {
1868 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1869 /* increase priority (0 == highest), as we go */
1870 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1871 .nb_atomic_flows = 1024,
1872 .nb_atomic_order_sequences = 1024,
1875 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1876 printf("%d: error creating qid %d\n", __LINE__, i);
1882 /* map all QIDs to port */
1883 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1885 if (rte_event_dev_start(evdev) < 0) {
1886 printf("%d: Error with start call\n", __LINE__);
1890 /* enqueue 3 packets, setting seqn and QID to check priority */
1891 for (i = 0; i < 3; i++) {
1892 struct rte_event ev;
1893 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1895 printf("%d: gen of pkt failed\n", __LINE__);
1898 ev.queue_id = t->qid[i];
1899 ev.op = RTE_EVENT_OP_NEW;
1903 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1905 printf("%d: Failed to enqueue\n", __LINE__);
1910 rte_event_schedule(evdev);
1912 /* dequeue packets, verify priority was upheld */
1913 struct rte_event ev[32];
1915 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1916 if (deq_pkts != 3) {
1917 printf("%d: failed to deq packets\n", __LINE__);
1918 rte_event_dev_dump(evdev, stdout);
1921 for (i = 0; i < 3; i++) {
1922 if (ev[i].mbuf->seqn != 2-i) {
1924 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1934 load_balancing(struct test *t)
1936 const int rx_enq = 0;
1940 if (init(t, 1, 4) < 0 ||
1941 create_ports(t, 4) < 0 ||
1942 create_atomic_qids(t, 1) < 0) {
1943 printf("%d: Error initializing device\n", __LINE__);
1947 for (i = 0; i < 3; i++) {
1948 /* map port 1 - 3 inclusive */
1949 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1951 printf("%d: error mapping qid to port %d\n",
1957 if (rte_event_dev_start(evdev) < 0) {
1958 printf("%d: Error with start call\n", __LINE__);
1962 /************** FORWARD ****************/
1964 * Create a set of flows that test the load-balancing operation of the
1965 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1966 * with a new flow, which should be sent to the 3rd mapped CQ
1968 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1970 for (i = 0; i < RTE_DIM(flows); i++) {
1971 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1973 printf("%d: gen of pkt failed\n", __LINE__);
1977 struct rte_event ev = {
1978 .op = RTE_EVENT_OP_NEW,
1979 .queue_id = t->qid[0],
1980 .flow_id = flows[i],
1983 /* generate pkt and enqueue */
1984 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1986 printf("%d: Failed to enqueue\n", __LINE__);
1991 rte_event_schedule(evdev);
1993 struct test_event_dev_stats stats;
1994 err = test_event_dev_stats_get(evdev, &stats);
1996 printf("%d: failed to get stats\n", __LINE__);
2000 if (stats.port_inflight[1] != 4) {
2001 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2005 if (stats.port_inflight[2] != 2) {
2006 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2010 if (stats.port_inflight[3] != 3) {
2011 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2021 load_balancing_history(struct test *t)
2023 struct test_event_dev_stats stats = {0};
2024 const int rx_enq = 0;
2028 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2029 if (init(t, 1, 4) < 0 ||
2030 create_ports(t, 4) < 0 ||
2031 create_atomic_qids(t, 1) < 0)
2034 /* CQ mapping to QID */
2035 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2036 printf("%d: error mapping port 1 qid\n", __LINE__);
2039 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2040 printf("%d: error mapping port 2 qid\n", __LINE__);
2043 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2044 printf("%d: error mapping port 3 qid\n", __LINE__);
2047 if (rte_event_dev_start(evdev) < 0) {
2048 printf("%d: Error with start call\n", __LINE__);
2053 * Create a set of flows that test the load-balancing operation of the
2054 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2055 * the packet from CQ 0, send in a new set of flows. Ensure that:
2056 * 1. The new flow 3 gets into the empty CQ0
2057 * 2. packets for existing flow gets added into CQ1
2058 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2059 * more outstanding pkts
2061 * This test makes sure that when a flow ends (i.e. all packets
2062 * have been completed for that flow), that the flow can be moved
2063 * to a different CQ when new packets come in for that flow.
2065 static uint32_t flows1[] = {0, 1, 1, 2};
2067 for (i = 0; i < RTE_DIM(flows1); i++) {
2068 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2069 struct rte_event ev = {
2070 .flow_id = flows1[i],
2071 .op = RTE_EVENT_OP_NEW,
2072 .queue_id = t->qid[0],
2073 .event_type = RTE_EVENT_TYPE_CPU,
2074 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2079 printf("%d: gen of pkt failed\n", __LINE__);
2082 arp->hash.rss = flows1[i];
2083 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2085 printf("%d: Failed to enqueue\n", __LINE__);
2090 /* call the scheduler */
2091 rte_event_schedule(evdev);
2093 /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2094 struct rte_event ev;
2095 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2096 printf("%d: failed to dequeue\n", __LINE__);
2099 if (ev.mbuf->hash.rss != flows1[0]) {
2100 printf("%d: unexpected flow received\n", __LINE__);
2104 /* drop the flow 0 packet from port 1 */
2105 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2107 /* call the scheduler */
2108 rte_event_schedule(evdev);
2111 * Set up the next set of flows, first a new flow to fill up
2112 * CQ 0, so that the next flow 0 packet should go to CQ2
2114 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2116 for (i = 0; i < RTE_DIM(flows2); i++) {
2117 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2118 struct rte_event ev = {
2119 .flow_id = flows2[i],
2120 .op = RTE_EVENT_OP_NEW,
2121 .queue_id = t->qid[0],
2122 .event_type = RTE_EVENT_TYPE_CPU,
2123 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2128 printf("%d: gen of pkt failed\n", __LINE__);
2131 arp->hash.rss = flows2[i];
2133 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2135 printf("%d: Failed to enqueue\n", __LINE__);
2141 rte_event_schedule(evdev);
2143 err = test_event_dev_stats_get(evdev, &stats);
2145 printf("%d:failed to get stats\n", __LINE__);
2150 * Now check the resulting inflights on each port.
2152 if (stats.port_inflight[1] != 3) {
2153 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2155 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2156 (unsigned int)stats.port_inflight[1],
2157 (unsigned int)stats.port_inflight[2],
2158 (unsigned int)stats.port_inflight[3]);
2161 if (stats.port_inflight[2] != 4) {
2162 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2164 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2165 (unsigned int)stats.port_inflight[1],
2166 (unsigned int)stats.port_inflight[2],
2167 (unsigned int)stats.port_inflight[3]);
2170 if (stats.port_inflight[3] != 2) {
2171 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2173 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2174 (unsigned int)stats.port_inflight[1],
2175 (unsigned int)stats.port_inflight[2],
2176 (unsigned int)stats.port_inflight[3]);
2180 for (i = 1; i <= 3; i++) {
2181 struct rte_event ev;
2182 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2183 rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2185 rte_event_schedule(evdev);
2192 invalid_qid(struct test *t)
2194 struct test_event_dev_stats stats;
2195 const int rx_enq = 0;
2199 if (init(t, 1, 4) < 0 ||
2200 create_ports(t, 4) < 0 ||
2201 create_atomic_qids(t, 1) < 0) {
2202 printf("%d: Error initializing device\n", __LINE__);
2206 /* CQ mapping to QID */
2207 for (i = 0; i < 4; i++) {
2208 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2211 printf("%d: error mapping port 1 qid\n", __LINE__);
2216 if (rte_event_dev_start(evdev) < 0) {
2217 printf("%d: Error with start call\n", __LINE__);
2222 * Send in a packet with an invalid qid to the scheduler.
2223 * We should see the packed enqueued OK, but the inflights for
2224 * that packet should not be incremented, and the rx_dropped
2225 * should be incremented.
2227 static uint32_t flows1[] = {20};
2229 for (i = 0; i < RTE_DIM(flows1); i++) {
2230 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2232 printf("%d: gen of pkt failed\n", __LINE__);
2236 struct rte_event ev = {
2237 .op = RTE_EVENT_OP_NEW,
2238 .queue_id = t->qid[0] + flows1[i],
2242 /* generate pkt and enqueue */
2243 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2245 printf("%d: Failed to enqueue\n", __LINE__);
2250 /* call the scheduler */
2251 rte_event_schedule(evdev);
2253 err = test_event_dev_stats_get(evdev, &stats);
2255 printf("%d: failed to get stats\n", __LINE__);
2260 * Now check the resulting inflights on the port, and the rx_dropped.
2262 if (stats.port_inflight[0] != 0) {
2263 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2265 rte_event_dev_dump(evdev, stdout);
2268 if (stats.port_rx_dropped[0] != 1) {
2269 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2270 rte_event_dev_dump(evdev, stdout);
2273 /* each packet drop should only be counted in one place - port or dev */
2274 if (stats.rx_dropped != 0) {
2275 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2277 rte_event_dev_dump(evdev, stdout);
2286 single_packet(struct test *t)
2288 const uint32_t MAGIC_SEQN = 7321;
2289 struct rte_event ev;
2290 struct test_event_dev_stats stats;
2291 const int rx_enq = 0;
2292 const int wrk_enq = 2;
2295 /* Create instance with 4 ports */
2296 if (init(t, 1, 4) < 0 ||
2297 create_ports(t, 4) < 0 ||
2298 create_atomic_qids(t, 1) < 0) {
2299 printf("%d: Error initializing device\n", __LINE__);
2303 /* CQ mapping to QID */
2304 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2306 printf("%d: error mapping lb qid\n", __LINE__);
2311 if (rte_event_dev_start(evdev) < 0) {
2312 printf("%d: Error with start call\n", __LINE__);
2316 /************** Gen pkt and enqueue ****************/
2317 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2319 printf("%d: gen of pkt failed\n", __LINE__);
2323 ev.op = RTE_EVENT_OP_NEW;
2324 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2328 arp->seqn = MAGIC_SEQN;
2330 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2332 printf("%d: Failed to enqueue\n", __LINE__);
2336 rte_event_schedule(evdev);
2338 err = test_event_dev_stats_get(evdev, &stats);
2340 printf("%d: failed to get stats\n", __LINE__);
2344 if (stats.rx_pkts != 1 ||
2345 stats.tx_pkts != 1 ||
2346 stats.port_inflight[wrk_enq] != 1) {
2347 printf("%d: Sched core didn't handle pkt as expected\n",
2349 rte_event_dev_dump(evdev, stdout);
2355 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2357 printf("%d: Failed to deq\n", __LINE__);
2361 err = test_event_dev_stats_get(evdev, &stats);
2363 printf("%d: failed to get stats\n", __LINE__);
2367 err = test_event_dev_stats_get(evdev, &stats);
2368 if (ev.mbuf->seqn != MAGIC_SEQN) {
2369 printf("%d: magic sequence number not dequeued\n", __LINE__);
2373 rte_pktmbuf_free(ev.mbuf);
2374 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2376 printf("%d: Failed to enqueue\n", __LINE__);
2379 rte_event_schedule(evdev);
2381 err = test_event_dev_stats_get(evdev, &stats);
2382 if (stats.port_inflight[wrk_enq] != 0) {
2383 printf("%d: port inflight not correct\n", __LINE__);
2392 inflight_counts(struct test *t)
2394 struct rte_event ev;
2395 struct test_event_dev_stats stats;
2396 const int rx_enq = 0;
2402 /* Create instance with 4 ports */
2403 if (init(t, 2, 3) < 0 ||
2404 create_ports(t, 3) < 0 ||
2405 create_atomic_qids(t, 2) < 0) {
2406 printf("%d: Error initializing device\n", __LINE__);
2410 /* CQ mapping to QID */
2411 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2413 printf("%d: error mapping lb qid\n", __LINE__);
2417 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2419 printf("%d: error mapping lb qid\n", __LINE__);
2424 if (rte_event_dev_start(evdev) < 0) {
2425 printf("%d: Error with start call\n", __LINE__);
2429 /************** FORWARD ****************/
2431 for (i = 0; i < QID1_NUM; i++) {
2432 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2435 printf("%d: gen of pkt failed\n", __LINE__);
2439 ev.queue_id = t->qid[0];
2440 ev.op = RTE_EVENT_OP_NEW;
2442 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2444 printf("%d: Failed to enqueue\n", __LINE__);
2449 for (i = 0; i < QID2_NUM; i++) {
2450 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2453 printf("%d: gen of pkt failed\n", __LINE__);
2456 ev.queue_id = t->qid[1];
2457 ev.op = RTE_EVENT_OP_NEW;
2459 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2461 printf("%d: Failed to enqueue\n", __LINE__);
2467 rte_event_schedule(evdev);
2469 err = test_event_dev_stats_get(evdev, &stats);
2471 printf("%d: failed to get stats\n", __LINE__);
2475 if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2476 stats.tx_pkts != QID1_NUM + QID2_NUM) {
2477 printf("%d: Sched core didn't handle pkt as expected\n",
2482 if (stats.port_inflight[p1] != QID1_NUM) {
2483 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2487 if (stats.port_inflight[p2] != QID2_NUM) {
2488 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2493 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
2495 struct rte_event events[QID1_NUM + QID2_NUM];
2496 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2497 RTE_DIM(events), 0);
2499 if (deq_pkts != QID1_NUM) {
2500 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2503 err = test_event_dev_stats_get(evdev, &stats);
2504 if (stats.port_inflight[p1] != QID1_NUM) {
2505 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2509 for (i = 0; i < QID1_NUM; i++) {
2510 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2513 printf("%d: %s rte enqueue of inf release failed\n",
2514 __LINE__, __func__);
2520 * As the scheduler core decrements inflights, it needs to run to
2521 * process packets to act on the drop messages
2523 rte_event_schedule(evdev);
2525 err = test_event_dev_stats_get(evdev, &stats);
2526 if (stats.port_inflight[p1] != 0) {
2527 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2532 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2533 RTE_DIM(events), 0);
2534 if (deq_pkts != QID2_NUM) {
2535 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2538 err = test_event_dev_stats_get(evdev, &stats);
2539 if (stats.port_inflight[p2] != QID2_NUM) {
2540 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2544 for (i = 0; i < QID2_NUM; i++) {
2545 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2548 printf("%d: %s rte enqueue of inf release failed\n",
2549 __LINE__, __func__);
2555 * As the scheduler core decrements inflights, it needs to run to
2556 * process packets to act on the drop messages
2558 rte_event_schedule(evdev);
2560 err = test_event_dev_stats_get(evdev, &stats);
2561 if (stats.port_inflight[p2] != 0) {
2562 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2569 rte_event_dev_dump(evdev, stdout);
2575 parallel_basic(struct test *t, int check_order)
2577 const uint8_t rx_port = 0;
2578 const uint8_t w1_port = 1;
2579 const uint8_t w3_port = 3;
2580 const uint8_t tx_port = 4;
2583 uint32_t deq_pkts, j;
2584 struct rte_mbuf *mbufs[3];
2585 struct rte_mbuf *mbufs_out[3] = { 0 };
2586 const uint32_t MAGIC_SEQN = 1234;
2588 /* Create instance with 4 ports */
2589 if (init(t, 2, tx_port + 1) < 0 ||
2590 create_ports(t, tx_port + 1) < 0 ||
2591 (check_order ? create_ordered_qids(t, 1) :
2592 create_unordered_qids(t, 1)) < 0 ||
2593 create_directed_qids(t, 1, &tx_port)) {
2594 printf("%d: Error initializing device\n", __LINE__);
2600 * We need three ports, all mapped to the same ordered qid0. Then we'll
2601 * take a packet out to each port, re-enqueue in reverse order,
2602 * then make sure the reordering has taken place properly when we
2603 * dequeue from the tx_port.
2605 * Simplified test setup diagram:
2609 * qid0 - w2_port - qid1
2613 /* CQ mapping to QID for LB ports (directed mapped on create) */
2614 for (i = w1_port; i <= w3_port; i++) {
2615 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2618 printf("%d: error mapping lb qid\n", __LINE__);
2624 if (rte_event_dev_start(evdev) < 0) {
2625 printf("%d: Error with start call\n", __LINE__);
2629 /* Enqueue 3 packets to the rx port */
2630 for (i = 0; i < 3; i++) {
2631 struct rte_event ev;
2632 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2634 printf("%d: gen of pkt failed\n", __LINE__);
2638 ev.queue_id = t->qid[0];
2639 ev.op = RTE_EVENT_OP_NEW;
2641 mbufs[i]->seqn = MAGIC_SEQN + i;
2643 /* generate pkt and enqueue */
2644 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2646 printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2652 rte_event_schedule(evdev);
2654 /* use extra slot to make logic in loops easier */
2655 struct rte_event deq_ev[w3_port + 1];
2657 /* Dequeue the 3 packets, one from each worker port */
2658 for (i = w1_port; i <= w3_port; i++) {
2659 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2661 if (deq_pkts != 1) {
2662 printf("%d: Failed to deq\n", __LINE__);
2663 rte_event_dev_dump(evdev, stdout);
2668 /* Enqueue each packet in reverse order, flushing after each one */
2669 for (i = w3_port; i >= w1_port; i--) {
2671 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2672 deq_ev[i].queue_id = t->qid[1];
2673 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2675 printf("%d: Failed to enqueue\n", __LINE__);
2679 rte_event_schedule(evdev);
2681 /* dequeue from the tx ports, we should get 3 packets */
2682 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2685 /* Check to see if we've got all 3 packets */
2686 if (deq_pkts != 3) {
2687 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2688 __LINE__, deq_pkts, tx_port);
2689 rte_event_dev_dump(evdev, stdout);
2693 /* Check to see if the sequence numbers are in expected order */
2695 for (j = 0 ; j < deq_pkts ; j++) {
2696 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2698 "%d: Incorrect sequence number(%d) from port %d\n",
2699 __LINE__, mbufs_out[j]->seqn, tx_port);
2705 /* Destroy the instance */
2711 ordered_basic(struct test *t)
2713 return parallel_basic(t, 1);
2717 unordered_basic(struct test *t)
2719 return parallel_basic(t, 0);
2723 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2725 const struct rte_event new_ev = {
2726 .op = RTE_EVENT_OP_NEW
2727 /* all other fields zero */
2729 struct rte_event ev = new_ev;
2730 unsigned int rx_port = 0; /* port we get the first flow on */
2731 char rx_port_used_stat[64];
2732 char rx_port_free_stat[64];
2733 char other_port_used_stat[64];
2735 if (init(t, 1, 2) < 0 ||
2736 create_ports(t, 2) < 0 ||
2737 create_atomic_qids(t, 1) < 0) {
2738 printf("%d: Error initializing device\n", __LINE__);
2741 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2742 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2744 printf("%d: Error links queue to ports\n", __LINE__);
2747 if (rte_event_dev_start(evdev) < 0) {
2748 printf("%d: Error with start call\n", __LINE__);
2752 /* send one packet and see where it goes, port 0 or 1 */
2753 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2754 printf("%d: Error doing first enqueue\n", __LINE__);
2757 rte_event_schedule(evdev);
2759 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2763 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2764 "port_%u_cq_ring_used", rx_port);
2765 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2766 "port_%u_cq_ring_free", rx_port);
2767 snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2768 "port_%u_cq_ring_used", rx_port ^ 1);
2769 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2771 printf("%d: Error, first event not scheduled\n", __LINE__);
2775 /* now fill up the rx port's queue with one flow to cause HOLB */
2778 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2779 printf("%d: Error with enqueue\n", __LINE__);
2782 rte_event_schedule(evdev);
2783 } while (rte_event_dev_xstats_by_name_get(evdev,
2784 rx_port_free_stat, NULL) != 0);
2786 /* one more packet, which needs to stay in IQ - i.e. HOLB */
2788 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2789 printf("%d: Error with enqueue\n", __LINE__);
2792 rte_event_schedule(evdev);
2794 /* check that the other port still has an empty CQ */
2795 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2797 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2800 /* check IQ now has one packet */
2801 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2803 printf("%d: Error, QID does not have exactly 1 packet\n",
2808 /* send another flow, which should pass the other IQ entry */
2811 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2812 printf("%d: Error with enqueue\n", __LINE__);
2815 rte_event_schedule(evdev);
2817 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2819 printf("%d: Error, second flow did not pass out first\n",
2824 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2826 printf("%d: Error, QID does not have exactly 1 packet\n",
2833 rte_event_dev_dump(evdev, stdout);
2839 worker_loopback_worker_fn(void *arg)
2841 struct test *t = arg;
2842 uint8_t port = t->port[1];
2847 * Takes packets from the input port and then loops them back through
2848 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2849 * so each packet goes through 8*16 = 128 times.
2851 printf("%d: \tWorker function started\n", __LINE__);
2852 while (count < NUM_PACKETS) {
2853 #define BURST_SIZE 32
2854 struct rte_event ev[BURST_SIZE];
2855 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2862 for (i = 0; i < nb_rx; i++) {
2864 if (ev[i].queue_id != 8) {
2865 ev[i].op = RTE_EVENT_OP_FORWARD;
2866 enqd = rte_event_enqueue_burst(evdev, port,
2869 printf("%d: Can't enqueue FWD!!\n",
2877 ev[i].mbuf->udata64++;
2878 if (ev[i].mbuf->udata64 != 16) {
2879 ev[i].op = RTE_EVENT_OP_FORWARD;
2880 enqd = rte_event_enqueue_burst(evdev, port,
2883 printf("%d: Can't enqueue FWD!!\n",
2889 /* we have hit 16 iterations through system - drop */
2890 rte_pktmbuf_free(ev[i].mbuf);
2892 ev[i].op = RTE_EVENT_OP_RELEASE;
2893 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2895 printf("%d drop enqueue failed\n", __LINE__);
2905 worker_loopback_producer_fn(void *arg)
2907 struct test *t = arg;
2908 uint8_t port = t->port[0];
2911 printf("%d: \tProducer function started\n", __LINE__);
2912 while (count < NUM_PACKETS) {
2913 struct rte_mbuf *m = 0;
2915 m = rte_pktmbuf_alloc(t->mbuf_pool);
2916 } while (m == NULL);
2920 struct rte_event ev = {
2921 .op = RTE_EVENT_OP_NEW,
2922 .queue_id = t->qid[0],
2923 .flow_id = (uintptr_t)m & 0xFFFF,
2927 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2928 while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2940 worker_loopback(struct test *t)
2942 /* use a single producer core, and a worker core to see what happens
2943 * if the worker loops packets back multiple times
2945 struct test_event_dev_stats stats;
2946 uint64_t print_cycles = 0, cycles = 0;
2947 uint64_t tx_pkts = 0;
2949 int w_lcore, p_lcore;
2951 if (init(t, 8, 2) < 0 ||
2952 create_atomic_qids(t, 8) < 0) {
2953 printf("%d: Error initializing device\n", __LINE__);
2957 /* RX with low max events */
2958 static struct rte_event_port_conf conf = {
2959 .dequeue_depth = 32,
2960 .enqueue_depth = 64,
2962 /* beware: this cannot be initialized in the static above as it would
2963 * only be initialized once - and this needs to be set for multiple runs
2965 conf.new_event_threshold = 512;
2967 if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2968 printf("Error setting up RX port\n");
2972 /* TX with higher max events */
2973 conf.new_event_threshold = 4096;
2974 if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2975 printf("Error setting up TX port\n");
2980 /* CQ mapping to QID */
2981 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2982 if (err != 8) { /* should have mapped all queues*/
2983 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2987 if (rte_event_dev_start(evdev) < 0) {
2988 printf("%d: Error with start call\n", __LINE__);
2992 p_lcore = rte_get_next_lcore(
2993 /* start core */ -1,
2994 /* skip master */ 1,
2996 w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
2998 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
2999 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3001 print_cycles = cycles = rte_get_timer_cycles();
3002 while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3003 rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3005 rte_event_schedule(evdev);
3007 uint64_t new_cycles = rte_get_timer_cycles();
3009 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3010 test_event_dev_stats_get(evdev, &stats);
3012 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3013 __LINE__, stats.rx_pkts, stats.tx_pkts);
3015 print_cycles = new_cycles;
3017 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3018 test_event_dev_stats_get(evdev, &stats);
3019 if (stats.tx_pkts == tx_pkts) {
3020 rte_event_dev_dump(evdev, stdout);
3021 printf("Dumping xstats:\n");
3024 "%d: No schedules for seconds, deadlock\n",
3028 tx_pkts = stats.tx_pkts;
3029 cycles = new_cycles;
3032 rte_event_schedule(evdev); /* ensure all completions are flushed */
3034 rte_eal_mp_wait_lcore();
3040 static struct rte_mempool *eventdev_func_mempool;
3043 test_sw_eventdev(void)
3045 struct test *t = malloc(sizeof(struct test));
3048 /* manually initialize the op, older gcc's complain on static
3049 * initialization of struct elements that are a bitfield.
3051 release_ev.op = RTE_EVENT_OP_RELEASE;
3053 const char *eventdev_name = "event_sw0";
3054 evdev = rte_event_dev_get_dev_id(eventdev_name);
3056 printf("%d: Eventdev %s not found - creating.\n",
3057 __LINE__, eventdev_name);
3058 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3059 printf("Error creating eventdev\n");
3062 evdev = rte_event_dev_get_dev_id(eventdev_name);
3064 printf("Error finding newly created eventdev\n");
3069 /* Only create mbuf pool once, reuse for each test run */
3070 if (!eventdev_func_mempool) {
3071 eventdev_func_mempool = rte_pktmbuf_pool_create(
3072 "EVENTDEV_SW_SA_MBUF_POOL",
3073 (1<<12), /* 4k buffers */
3074 32 /*MBUF_CACHE_SIZE*/,
3076 512, /* use very small mbufs */
3078 if (!eventdev_func_mempool) {
3079 printf("ERROR creating mempool\n");
3083 t->mbuf_pool = eventdev_func_mempool;
3084 printf("*** Running Single Directed Packet test...\n");
3085 ret = test_single_directed_packet(t);
3087 printf("ERROR - Single Directed Packet test FAILED.\n");
3090 printf("*** Running Directed Forward Credit test...\n");
3091 ret = test_directed_forward_credits(t);
3093 printf("ERROR - Directed Forward Credit test FAILED.\n");
3096 printf("*** Running Single Load Balanced Packet test...\n");
3097 ret = single_packet(t);
3099 printf("ERROR - Single Packet test FAILED.\n");
3102 printf("*** Running Unordered Basic test...\n");
3103 ret = unordered_basic(t);
3105 printf("ERROR - Unordered Basic test FAILED.\n");
3108 printf("*** Running Ordered Basic test...\n");
3109 ret = ordered_basic(t);
3111 printf("ERROR - Ordered Basic test FAILED.\n");
3114 printf("*** Running Burst Packets test...\n");
3115 ret = burst_packets(t);
3117 printf("ERROR - Burst Packets test FAILED.\n");
3120 printf("*** Running Load Balancing test...\n");
3121 ret = load_balancing(t);
3123 printf("ERROR - Load Balancing test FAILED.\n");
3126 printf("*** Running Prioritized Directed test...\n");
3127 ret = test_priority_directed(t);
3129 printf("ERROR - Prioritized Directed test FAILED.\n");
3132 printf("*** Running Prioritized Atomic test...\n");
3133 ret = test_priority_atomic(t);
3135 printf("ERROR - Prioritized Atomic test FAILED.\n");
3139 printf("*** Running Prioritized Ordered test...\n");
3140 ret = test_priority_ordered(t);
3142 printf("ERROR - Prioritized Ordered test FAILED.\n");
3145 printf("*** Running Prioritized Unordered test...\n");
3146 ret = test_priority_unordered(t);
3148 printf("ERROR - Prioritized Unordered test FAILED.\n");
3151 printf("*** Running Invalid QID test...\n");
3152 ret = invalid_qid(t);
3154 printf("ERROR - Invalid QID test FAILED.\n");
3157 printf("*** Running Load Balancing History test...\n");
3158 ret = load_balancing_history(t);
3160 printf("ERROR - Load Balancing History test FAILED.\n");
3163 printf("*** Running Inflight Count test...\n");
3164 ret = inflight_counts(t);
3166 printf("ERROR - Inflight Count test FAILED.\n");
3169 printf("*** Running Abuse Inflights test...\n");
3170 ret = abuse_inflights(t);
3172 printf("ERROR - Abuse Inflights test FAILED.\n");
3175 printf("*** Running XStats test...\n");
3176 ret = xstats_tests(t);
3178 printf("ERROR - XStats test FAILED.\n");
3181 printf("*** Running XStats ID Reset test...\n");
3182 ret = xstats_id_reset_tests(t);
3184 printf("ERROR - XStats ID Reset test FAILED.\n");
3187 printf("*** Running XStats Brute Force test...\n");
3188 ret = xstats_brute_force(t);
3190 printf("ERROR - XStats Brute Force test FAILED.\n");
3193 printf("*** Running XStats ID Abuse test...\n");
3194 ret = xstats_id_abuse_tests(t);
3196 printf("ERROR - XStats ID Abuse test FAILED.\n");
3199 printf("*** Running QID Priority test...\n");
3200 ret = qid_priorities(t);
3202 printf("ERROR - QID Priority test FAILED.\n");
3205 printf("*** Running Ordered Reconfigure test...\n");
3206 ret = ordered_reconfigure(t);
3208 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3211 printf("*** Running Port LB Single Reconfig test...\n");
3212 ret = port_single_lb_reconfig(t);
3214 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3217 printf("*** Running Port Reconfig Credits test...\n");
3218 ret = port_reconfig_credits(t);
3220 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3223 printf("*** Running Head-of-line-blocking test...\n");
3226 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3229 if (rte_lcore_count() >= 3) {
3230 printf("*** Running Worker loopback test...\n");
3231 ret = worker_loopback(t);
3233 printf("ERROR - Worker loopback test FAILED.\n");
3237 printf("### Not enough cores for worker loopback test.\n");
3238 printf("### Need at least 3 cores for test.\n");
3241 * Free test instance, leaving mempool initialized, and a pointer to it
3242 * in static eventdev_func_mempool, as it is re-used on re-runs
3249 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);