7da6c256e80cb8da66f0565e88c84cbabdfd59a6
[dpdk.git] / test / test / test_eventdev_sw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <string.h>
36 #include <stdint.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <sys/queue.h>
40
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_launch.h>
44 #include <rte_eal.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
49 #include <rte_cycles.h>
50
51 #include <rte_eventdev.h>
52 #include "test.h"
53
54 #define MAX_PORTS 16
55 #define MAX_QIDS 16
56 #define NUM_PACKETS (1<<18)
57
58 static int evdev;
59
60 struct test {
61         struct rte_mempool *mbuf_pool;
62         uint8_t port[MAX_PORTS];
63         uint8_t qid[MAX_QIDS];
64         int nb_qids;
65 };
66
67 static struct rte_event release_ev;
68
69 static inline struct rte_mbuf *
70 rte_gen_arp(int portid, struct rte_mempool *mp)
71 {
72         /*
73          * len = 14 + 46
74          * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
75          */
76         static const uint8_t arp_request[] = {
77                 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
78                 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
79                 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
80                 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
81                 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
82                 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83                 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84                 0x00, 0x00, 0x00, 0x00
85         };
86         struct rte_mbuf *m;
87         int pkt_len = sizeof(arp_request) - 1;
88
89         m = rte_pktmbuf_alloc(mp);
90         if (!m)
91                 return 0;
92
93         memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
94                 arp_request, pkt_len);
95         rte_pktmbuf_pkt_len(m) = pkt_len;
96         rte_pktmbuf_data_len(m) = pkt_len;
97
98         RTE_SET_USED(portid);
99
100         return m;
101 }
102
103 static void
104 xstats_print(void)
105 {
106         const uint32_t XSTATS_MAX = 1024;
107         uint32_t i;
108         uint32_t ids[XSTATS_MAX];
109         uint64_t values[XSTATS_MAX];
110         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
111
112         for (i = 0; i < XSTATS_MAX; i++)
113                 ids[i] = i;
114
115         /* Device names / values */
116         int ret = rte_event_dev_xstats_names_get(evdev,
117                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
118                                         xstats_names, ids, XSTATS_MAX);
119         if (ret < 0) {
120                 printf("%d: xstats names get() returned error\n",
121                         __LINE__);
122                 return;
123         }
124         ret = rte_event_dev_xstats_get(evdev,
125                                         RTE_EVENT_DEV_XSTATS_DEVICE,
126                                         0, ids, values, ret);
127         if (ret > (signed int)XSTATS_MAX)
128                 printf("%s %d: more xstats available than space\n",
129                                 __func__, __LINE__);
130         for (i = 0; (signed int)i < ret; i++) {
131                 printf("%d : %s : %"PRIu64"\n",
132                                 i, xstats_names[i].name, values[i]);
133         }
134
135         /* Port names / values */
136         ret = rte_event_dev_xstats_names_get(evdev,
137                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
138                                         xstats_names, ids, XSTATS_MAX);
139         ret = rte_event_dev_xstats_get(evdev,
140                                         RTE_EVENT_DEV_XSTATS_PORT, 1,
141                                         ids, values, ret);
142         if (ret > (signed int)XSTATS_MAX)
143                 printf("%s %d: more xstats available than space\n",
144                                 __func__, __LINE__);
145         for (i = 0; (signed int)i < ret; i++) {
146                 printf("%d : %s : %"PRIu64"\n",
147                                 i, xstats_names[i].name, values[i]);
148         }
149
150         /* Queue names / values */
151         ret = rte_event_dev_xstats_names_get(evdev,
152                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
153                                         xstats_names, ids, XSTATS_MAX);
154         ret = rte_event_dev_xstats_get(evdev,
155                                         RTE_EVENT_DEV_XSTATS_QUEUE,
156                                         1, ids, values, ret);
157         if (ret > (signed int)XSTATS_MAX)
158                 printf("%s %d: more xstats available than space\n",
159                                 __func__, __LINE__);
160         for (i = 0; (signed int)i < ret; i++) {
161                 printf("%d : %s : %"PRIu64"\n",
162                                 i, xstats_names[i].name, values[i]);
163         }
164 }
165
166 /* initialization and config */
167 static inline int
168 init(struct test *t, int nb_queues, int nb_ports)
169 {
170         struct rte_event_dev_config config = {
171                         .nb_event_queues = nb_queues,
172                         .nb_event_ports = nb_ports,
173                         .nb_event_queue_flows = 1024,
174                         .nb_events_limit = 4096,
175                         .nb_event_port_dequeue_depth = 128,
176                         .nb_event_port_enqueue_depth = 128,
177         };
178         int ret;
179
180         void *temp = t->mbuf_pool; /* save and restore mbuf pool */
181
182         memset(t, 0, sizeof(*t));
183         t->mbuf_pool = temp;
184
185         ret = rte_event_dev_configure(evdev, &config);
186         if (ret < 0)
187                 printf("%d: Error configuring device\n", __LINE__);
188         return ret;
189 };
190
191 static inline int
192 create_ports(struct test *t, int num_ports)
193 {
194         int i;
195         static const struct rte_event_port_conf conf = {
196                         .new_event_threshold = 1024,
197                         .dequeue_depth = 32,
198                         .enqueue_depth = 64,
199         };
200         if (num_ports > MAX_PORTS)
201                 return -1;
202
203         for (i = 0; i < num_ports; i++) {
204                 if (rte_event_port_setup(evdev, i, &conf) < 0) {
205                         printf("Error setting up port %d\n", i);
206                         return -1;
207                 }
208                 t->port[i] = i;
209         }
210
211         return 0;
212 }
213
214 static inline int
215 create_lb_qids(struct test *t, int num_qids, uint32_t flags)
216 {
217         int i;
218
219         /* Q creation */
220         const struct rte_event_queue_conf conf = {
221                         .event_queue_cfg = flags,
222                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
223                         .nb_atomic_flows = 1024,
224                         .nb_atomic_order_sequences = 1024,
225         };
226
227         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
228                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
229                         printf("%d: error creating qid %d\n", __LINE__, i);
230                         return -1;
231                 }
232                 t->qid[i] = i;
233         }
234         t->nb_qids += num_qids;
235         if (t->nb_qids > MAX_QIDS)
236                 return -1;
237
238         return 0;
239 }
240
241 static inline int
242 create_atomic_qids(struct test *t, int num_qids)
243 {
244         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
245 }
246
247 static inline int
248 create_ordered_qids(struct test *t, int num_qids)
249 {
250         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
251 }
252
253
254 static inline int
255 create_unordered_qids(struct test *t, int num_qids)
256 {
257         return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
258 }
259
260 static inline int
261 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
262 {
263         int i;
264
265         /* Q creation */
266         static const struct rte_event_queue_conf conf = {
267                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
268                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
269                         .nb_atomic_flows = 1024,
270                         .nb_atomic_order_sequences = 1024,
271         };
272
273         for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
274                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
275                         printf("%d: error creating qid %d\n", __LINE__, i);
276                         return -1;
277                 }
278                 t->qid[i] = i;
279
280                 if (rte_event_port_link(evdev, ports[i - t->nb_qids],
281                                 &t->qid[i], NULL, 1) != 1) {
282                         printf("%d: error creating link for qid %d\n",
283                                         __LINE__, i);
284                         return -1;
285                 }
286         }
287         t->nb_qids += num_qids;
288         if (t->nb_qids > MAX_QIDS)
289                 return -1;
290
291         return 0;
292 }
293
294 /* destruction */
295 static inline int
296 cleanup(struct test *t __rte_unused)
297 {
298         rte_event_dev_stop(evdev);
299         rte_event_dev_close(evdev);
300         return 0;
301 };
302
303 struct test_event_dev_stats {
304         uint64_t rx_pkts;       /**< Total packets received */
305         uint64_t rx_dropped;    /**< Total packets dropped (Eg Invalid QID) */
306         uint64_t tx_pkts;       /**< Total packets transmitted */
307
308         /** Packets received on this port */
309         uint64_t port_rx_pkts[MAX_PORTS];
310         /** Packets dropped on this port */
311         uint64_t port_rx_dropped[MAX_PORTS];
312         /** Packets inflight on this port */
313         uint64_t port_inflight[MAX_PORTS];
314         /** Packets transmitted on this port */
315         uint64_t port_tx_pkts[MAX_PORTS];
316         /** Packets received on this qid */
317         uint64_t qid_rx_pkts[MAX_QIDS];
318         /** Packets dropped on this qid */
319         uint64_t qid_rx_dropped[MAX_QIDS];
320         /** Packets transmitted on this qid */
321         uint64_t qid_tx_pkts[MAX_QIDS];
322 };
323
324 static inline int
325 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
326 {
327         static uint32_t i;
328         static uint32_t total_ids[3]; /* rx, tx and drop */
329         static uint32_t port_rx_pkts_ids[MAX_PORTS];
330         static uint32_t port_rx_dropped_ids[MAX_PORTS];
331         static uint32_t port_inflight_ids[MAX_PORTS];
332         static uint32_t port_tx_pkts_ids[MAX_PORTS];
333         static uint32_t qid_rx_pkts_ids[MAX_QIDS];
334         static uint32_t qid_rx_dropped_ids[MAX_QIDS];
335         static uint32_t qid_tx_pkts_ids[MAX_QIDS];
336
337
338         stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
339                         "dev_rx", &total_ids[0]);
340         stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
341                         "dev_drop", &total_ids[1]);
342         stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
343                         "dev_tx", &total_ids[2]);
344         for (i = 0; i < MAX_PORTS; i++) {
345                 char name[32];
346                 snprintf(name, sizeof(name), "port_%u_rx", i);
347                 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
348                                 dev_id, name, &port_rx_pkts_ids[i]);
349                 snprintf(name, sizeof(name), "port_%u_drop", i);
350                 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
351                                 dev_id, name, &port_rx_dropped_ids[i]);
352                 snprintf(name, sizeof(name), "port_%u_inflight", i);
353                 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
354                                 dev_id, name, &port_inflight_ids[i]);
355                 snprintf(name, sizeof(name), "port_%u_tx", i);
356                 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
357                                 dev_id, name, &port_tx_pkts_ids[i]);
358         }
359         for (i = 0; i < MAX_QIDS; i++) {
360                 char name[32];
361                 snprintf(name, sizeof(name), "qid_%u_rx", i);
362                 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
363                                 dev_id, name, &qid_rx_pkts_ids[i]);
364                 snprintf(name, sizeof(name), "qid_%u_drop", i);
365                 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
366                                 dev_id, name, &qid_rx_dropped_ids[i]);
367                 snprintf(name, sizeof(name), "qid_%u_tx", i);
368                 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
369                                 dev_id, name, &qid_tx_pkts_ids[i]);
370         }
371
372         return 0;
373 }
374
375 /* run_prio_packet_test
376  * This performs a basic packet priority check on the test instance passed in.
377  * It is factored out of the main priority tests as the same tests must be
378  * performed to ensure prioritization of each type of QID.
379  *
380  * Requirements:
381  *  - An initialized test structure, including mempool
382  *  - t->port[0] is initialized for both Enq / Deq of packets to the QID
383  *  - t->qid[0] is the QID to be tested
384  *  - if LB QID, the CQ must be mapped to the QID.
385  */
386 static int
387 run_prio_packet_test(struct test *t)
388 {
389         int err;
390         const uint32_t MAGIC_SEQN[] = {4711, 1234};
391         const uint32_t PRIORITY[] = {
392                 RTE_EVENT_DEV_PRIORITY_NORMAL,
393                 RTE_EVENT_DEV_PRIORITY_HIGHEST
394         };
395         unsigned int i;
396         for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
397                 /* generate pkt and enqueue */
398                 struct rte_event ev;
399                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
400                 if (!arp) {
401                         printf("%d: gen of pkt failed\n", __LINE__);
402                         return -1;
403                 }
404                 arp->seqn = MAGIC_SEQN[i];
405
406                 ev = (struct rte_event){
407                         .priority = PRIORITY[i],
408                         .op = RTE_EVENT_OP_NEW,
409                         .queue_id = t->qid[0],
410                         .mbuf = arp
411                 };
412                 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
413                 if (err < 0) {
414                         printf("%d: error failed to enqueue\n", __LINE__);
415                         return -1;
416                 }
417         }
418
419         rte_event_schedule(evdev);
420
421         struct test_event_dev_stats stats;
422         err = test_event_dev_stats_get(evdev, &stats);
423         if (err) {
424                 printf("%d: error failed to get stats\n", __LINE__);
425                 return -1;
426         }
427
428         if (stats.port_rx_pkts[t->port[0]] != 2) {
429                 printf("%d: error stats incorrect for directed port\n",
430                                 __LINE__);
431                 rte_event_dev_dump(evdev, stdout);
432                 return -1;
433         }
434
435         struct rte_event ev, ev2;
436         uint32_t deq_pkts;
437         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
438         if (deq_pkts != 1) {
439                 printf("%d: error failed to deq\n", __LINE__);
440                 rte_event_dev_dump(evdev, stdout);
441                 return -1;
442         }
443         if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
444                 printf("%d: first packet out not highest priority\n",
445                                 __LINE__);
446                 rte_event_dev_dump(evdev, stdout);
447                 return -1;
448         }
449         rte_pktmbuf_free(ev.mbuf);
450
451         deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
452         if (deq_pkts != 1) {
453                 printf("%d: error failed to deq\n", __LINE__);
454                 rte_event_dev_dump(evdev, stdout);
455                 return -1;
456         }
457         if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
458                 printf("%d: second packet out not lower priority\n",
459                                 __LINE__);
460                 rte_event_dev_dump(evdev, stdout);
461                 return -1;
462         }
463         rte_pktmbuf_free(ev2.mbuf);
464
465         cleanup(t);
466         return 0;
467 }
468
469 static int
470 test_single_directed_packet(struct test *t)
471 {
472         const int rx_enq = 0;
473         const int wrk_enq = 2;
474         int err;
475
476         /* Create instance with 3 directed QIDs going to 3 ports */
477         if (init(t, 3, 3) < 0 ||
478                         create_ports(t, 3) < 0 ||
479                         create_directed_qids(t, 3, t->port) < 0)
480                 return -1;
481
482         if (rte_event_dev_start(evdev) < 0) {
483                 printf("%d: Error with start call\n", __LINE__);
484                 return -1;
485         }
486
487         /************** FORWARD ****************/
488         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
489         struct rte_event ev = {
490                         .op = RTE_EVENT_OP_NEW,
491                         .queue_id = wrk_enq,
492                         .mbuf = arp,
493         };
494
495         if (!arp) {
496                 printf("%d: gen of pkt failed\n", __LINE__);
497                 return -1;
498         }
499
500         const uint32_t MAGIC_SEQN = 4711;
501         arp->seqn = MAGIC_SEQN;
502
503         /* generate pkt and enqueue */
504         err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
505         if (err < 0) {
506                 printf("%d: error failed to enqueue\n", __LINE__);
507                 return -1;
508         }
509
510         /* Run schedule() as dir packets may need to be re-ordered */
511         rte_event_schedule(evdev);
512
513         struct test_event_dev_stats stats;
514         err = test_event_dev_stats_get(evdev, &stats);
515         if (err) {
516                 printf("%d: error failed to get stats\n", __LINE__);
517                 return -1;
518         }
519
520         if (stats.port_rx_pkts[rx_enq] != 1) {
521                 printf("%d: error stats incorrect for directed port\n",
522                                 __LINE__);
523                 return -1;
524         }
525
526         uint32_t deq_pkts;
527         deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
528         if (deq_pkts != 1) {
529                 printf("%d: error failed to deq\n", __LINE__);
530                 return -1;
531         }
532
533         err = test_event_dev_stats_get(evdev, &stats);
534         if (stats.port_rx_pkts[wrk_enq] != 0 &&
535                         stats.port_rx_pkts[wrk_enq] != 1) {
536                 printf("%d: error directed stats post-dequeue\n", __LINE__);
537                 return -1;
538         }
539
540         if (ev.mbuf->seqn != MAGIC_SEQN) {
541                 printf("%d: error magic sequence number not dequeued\n",
542                                 __LINE__);
543                 return -1;
544         }
545
546         rte_pktmbuf_free(ev.mbuf);
547         cleanup(t);
548         return 0;
549 }
550
551 static int
552 test_directed_forward_credits(struct test *t)
553 {
554         uint32_t i;
555         int32_t err;
556
557         if (init(t, 1, 1) < 0 ||
558                         create_ports(t, 1) < 0 ||
559                         create_directed_qids(t, 1, t->port) < 0)
560                 return -1;
561
562         if (rte_event_dev_start(evdev) < 0) {
563                 printf("%d: Error with start call\n", __LINE__);
564                 return -1;
565         }
566
567         struct rte_event ev = {
568                         .op = RTE_EVENT_OP_NEW,
569                         .queue_id = 0,
570         };
571
572         for (i = 0; i < 1000; i++) {
573                 err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
574                 if (err < 0) {
575                         printf("%d: error failed to enqueue\n", __LINE__);
576                         return -1;
577                 }
578                 rte_event_schedule(evdev);
579
580                 uint32_t deq_pkts;
581                 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
582                 if (deq_pkts != 1) {
583                         printf("%d: error failed to deq\n", __LINE__);
584                         return -1;
585                 }
586
587                 /* re-write event to be a forward, and continue looping it */
588                 ev.op = RTE_EVENT_OP_FORWARD;
589         }
590
591         cleanup(t);
592         return 0;
593 }
594
595
596 static int
597 test_priority_directed(struct test *t)
598 {
599         if (init(t, 1, 1) < 0 ||
600                         create_ports(t, 1) < 0 ||
601                         create_directed_qids(t, 1, t->port) < 0) {
602                 printf("%d: Error initializing device\n", __LINE__);
603                 return -1;
604         }
605
606         if (rte_event_dev_start(evdev) < 0) {
607                 printf("%d: Error with start call\n", __LINE__);
608                 return -1;
609         }
610
611         return run_prio_packet_test(t);
612 }
613
614 static int
615 test_priority_atomic(struct test *t)
616 {
617         if (init(t, 1, 1) < 0 ||
618                         create_ports(t, 1) < 0 ||
619                         create_atomic_qids(t, 1) < 0) {
620                 printf("%d: Error initializing device\n", __LINE__);
621                 return -1;
622         }
623
624         /* map the QID */
625         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
626                 printf("%d: error mapping qid to port\n", __LINE__);
627                 return -1;
628         }
629         if (rte_event_dev_start(evdev) < 0) {
630                 printf("%d: Error with start call\n", __LINE__);
631                 return -1;
632         }
633
634         return run_prio_packet_test(t);
635 }
636
637 static int
638 test_priority_ordered(struct test *t)
639 {
640         if (init(t, 1, 1) < 0 ||
641                         create_ports(t, 1) < 0 ||
642                         create_ordered_qids(t, 1) < 0) {
643                 printf("%d: Error initializing device\n", __LINE__);
644                 return -1;
645         }
646
647         /* map the QID */
648         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
649                 printf("%d: error mapping qid to port\n", __LINE__);
650                 return -1;
651         }
652         if (rte_event_dev_start(evdev) < 0) {
653                 printf("%d: Error with start call\n", __LINE__);
654                 return -1;
655         }
656
657         return run_prio_packet_test(t);
658 }
659
660 static int
661 test_priority_unordered(struct test *t)
662 {
663         if (init(t, 1, 1) < 0 ||
664                         create_ports(t, 1) < 0 ||
665                         create_unordered_qids(t, 1) < 0) {
666                 printf("%d: Error initializing device\n", __LINE__);
667                 return -1;
668         }
669
670         /* map the QID */
671         if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
672                 printf("%d: error mapping qid to port\n", __LINE__);
673                 return -1;
674         }
675         if (rte_event_dev_start(evdev) < 0) {
676                 printf("%d: Error with start call\n", __LINE__);
677                 return -1;
678         }
679
680         return run_prio_packet_test(t);
681 }
682
683 static int
684 burst_packets(struct test *t)
685 {
686         /************** CONFIG ****************/
687         uint32_t i;
688         int err;
689         int ret;
690
691         /* Create instance with 2 ports and 2 queues */
692         if (init(t, 2, 2) < 0 ||
693                         create_ports(t, 2) < 0 ||
694                         create_atomic_qids(t, 2) < 0) {
695                 printf("%d: Error initializing device\n", __LINE__);
696                 return -1;
697         }
698
699         /* CQ mapping to QID */
700         ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
701         if (ret != 1) {
702                 printf("%d: error mapping lb qid0\n", __LINE__);
703                 return -1;
704         }
705         ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
706         if (ret != 1) {
707                 printf("%d: error mapping lb qid1\n", __LINE__);
708                 return -1;
709         }
710
711         if (rte_event_dev_start(evdev) < 0) {
712                 printf("%d: Error with start call\n", __LINE__);
713                 return -1;
714         }
715
716         /************** FORWARD ****************/
717         const uint32_t rx_port = 0;
718         const uint32_t NUM_PKTS = 2;
719
720         for (i = 0; i < NUM_PKTS; i++) {
721                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
722                 if (!arp) {
723                         printf("%d: error generating pkt\n", __LINE__);
724                         return -1;
725                 }
726
727                 struct rte_event ev = {
728                                 .op = RTE_EVENT_OP_NEW,
729                                 .queue_id = i % 2,
730                                 .flow_id = i % 3,
731                                 .mbuf = arp,
732                 };
733                 /* generate pkt and enqueue */
734                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
735                 if (err < 0) {
736                         printf("%d: Failed to enqueue\n", __LINE__);
737                         return -1;
738                 }
739         }
740         rte_event_schedule(evdev);
741
742         /* Check stats for all NUM_PKTS arrived to sched core */
743         struct test_event_dev_stats stats;
744
745         err = test_event_dev_stats_get(evdev, &stats);
746         if (err) {
747                 printf("%d: failed to get stats\n", __LINE__);
748                 return -1;
749         }
750         if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
751                 printf("%d: Sched core didn't receive all %d pkts\n",
752                                 __LINE__, NUM_PKTS);
753                 rte_event_dev_dump(evdev, stdout);
754                 return -1;
755         }
756
757         uint32_t deq_pkts;
758         int p;
759
760         deq_pkts = 0;
761         /******** DEQ QID 1 *******/
762         do {
763                 struct rte_event ev;
764                 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
765                 deq_pkts += p;
766                 rte_pktmbuf_free(ev.mbuf);
767         } while (p);
768
769         if (deq_pkts != NUM_PKTS/2) {
770                 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
771                                 __LINE__);
772                 return -1;
773         }
774
775         /******** DEQ QID 2 *******/
776         deq_pkts = 0;
777         do {
778                 struct rte_event ev;
779                 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
780                 deq_pkts += p;
781                 rte_pktmbuf_free(ev.mbuf);
782         } while (p);
783         if (deq_pkts != NUM_PKTS/2) {
784                 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
785                                 __LINE__);
786                 return -1;
787         }
788
789         cleanup(t);
790         return 0;
791 }
792
793 static int
794 abuse_inflights(struct test *t)
795 {
796         const int rx_enq = 0;
797         const int wrk_enq = 2;
798         int err;
799
800         /* Create instance with 4 ports */
801         if (init(t, 1, 4) < 0 ||
802                         create_ports(t, 4) < 0 ||
803                         create_atomic_qids(t, 1) < 0) {
804                 printf("%d: Error initializing device\n", __LINE__);
805                 return -1;
806         }
807
808         /* CQ mapping to QID */
809         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
810         if (err != 1) {
811                 printf("%d: error mapping lb qid\n", __LINE__);
812                 cleanup(t);
813                 return -1;
814         }
815
816         if (rte_event_dev_start(evdev) < 0) {
817                 printf("%d: Error with start call\n", __LINE__);
818                 return -1;
819         }
820
821         /* Enqueue op only */
822         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
823         if (err < 0) {
824                 printf("%d: Failed to enqueue\n", __LINE__);
825                 return -1;
826         }
827
828         /* schedule */
829         rte_event_schedule(evdev);
830
831         struct test_event_dev_stats stats;
832
833         err = test_event_dev_stats_get(evdev, &stats);
834         if (err) {
835                 printf("%d: failed to get stats\n", __LINE__);
836                 return -1;
837         }
838
839         if (stats.rx_pkts != 0 ||
840                         stats.tx_pkts != 0 ||
841                         stats.port_inflight[wrk_enq] != 0) {
842                 printf("%d: Sched core didn't handle pkt as expected\n",
843                                 __LINE__);
844                 return -1;
845         }
846
847         cleanup(t);
848         return 0;
849 }
850
851 static int
852 xstats_tests(struct test *t)
853 {
854         const int wrk_enq = 2;
855         int err;
856
857         /* Create instance with 4 ports */
858         if (init(t, 1, 4) < 0 ||
859                         create_ports(t, 4) < 0 ||
860                         create_atomic_qids(t, 1) < 0) {
861                 printf("%d: Error initializing device\n", __LINE__);
862                 return -1;
863         }
864
865         /* CQ mapping to QID */
866         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
867         if (err != 1) {
868                 printf("%d: error mapping lb qid\n", __LINE__);
869                 cleanup(t);
870                 return -1;
871         }
872
873         if (rte_event_dev_start(evdev) < 0) {
874                 printf("%d: Error with start call\n", __LINE__);
875                 return -1;
876         }
877
878         const uint32_t XSTATS_MAX = 1024;
879
880         uint32_t i;
881         uint32_t ids[XSTATS_MAX];
882         uint64_t values[XSTATS_MAX];
883         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
884
885         for (i = 0; i < XSTATS_MAX; i++)
886                 ids[i] = i;
887
888         /* Device names / values */
889         int ret = rte_event_dev_xstats_names_get(evdev,
890                                         RTE_EVENT_DEV_XSTATS_DEVICE,
891                                         0, xstats_names, ids, XSTATS_MAX);
892         if (ret != 6) {
893                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
894                 return -1;
895         }
896         ret = rte_event_dev_xstats_get(evdev,
897                                         RTE_EVENT_DEV_XSTATS_DEVICE,
898                                         0, ids, values, ret);
899         if (ret != 6) {
900                 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
901                 return -1;
902         }
903
904         /* Port names / values */
905         ret = rte_event_dev_xstats_names_get(evdev,
906                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
907                                         xstats_names, ids, XSTATS_MAX);
908         if (ret != 21) {
909                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
910                 return -1;
911         }
912         ret = rte_event_dev_xstats_get(evdev,
913                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
914                                         ids, values, ret);
915         if (ret != 21) {
916                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
917                 return -1;
918         }
919
920         /* Queue names / values */
921         ret = rte_event_dev_xstats_names_get(evdev,
922                                         RTE_EVENT_DEV_XSTATS_QUEUE,
923                                         0, xstats_names, ids, XSTATS_MAX);
924         if (ret != 17) {
925                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
926                 return -1;
927         }
928
929         /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
930         ret = rte_event_dev_xstats_get(evdev,
931                                         RTE_EVENT_DEV_XSTATS_QUEUE,
932                                         1, ids, values, ret);
933         if (ret != -EINVAL) {
934                 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
935                 return -1;
936         }
937
938         ret = rte_event_dev_xstats_get(evdev,
939                                         RTE_EVENT_DEV_XSTATS_QUEUE,
940                                         0, ids, values, ret);
941         if (ret != 17) {
942                 printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
943                 return -1;
944         }
945
946         /* enqueue packets to check values */
947         for (i = 0; i < 3; i++) {
948                 struct rte_event ev;
949                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
950                 if (!arp) {
951                         printf("%d: gen of pkt failed\n", __LINE__);
952                         return -1;
953                 }
954                 ev.queue_id = t->qid[i];
955                 ev.op = RTE_EVENT_OP_NEW;
956                 ev.mbuf = arp;
957                 ev.flow_id = 7;
958                 arp->seqn = i;
959
960                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
961                 if (err != 1) {
962                         printf("%d: Failed to enqueue\n", __LINE__);
963                         return -1;
964                 }
965         }
966
967         rte_event_schedule(evdev);
968
969         /* Device names / values */
970         int num_stats = rte_event_dev_xstats_names_get(evdev,
971                                         RTE_EVENT_DEV_XSTATS_DEVICE, 0,
972                                         xstats_names, ids, XSTATS_MAX);
973         if (num_stats < 0)
974                 goto fail;
975         ret = rte_event_dev_xstats_get(evdev,
976                                         RTE_EVENT_DEV_XSTATS_DEVICE,
977                                         0, ids, values, num_stats);
978         static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
979         for (i = 0; (signed int)i < ret; i++) {
980                 if (expected[i] != values[i]) {
981                         printf(
982                                 "%d Error xstat %d (id %d) %s : %"PRIu64
983                                 ", expect %"PRIu64"\n",
984                                 __LINE__, i, ids[i], xstats_names[i].name,
985                                 values[i], expected[i]);
986                         goto fail;
987                 }
988         }
989
990         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
991                                         0, NULL, 0);
992
993         /* ensure reset statistics are zero-ed */
994         static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
995         ret = rte_event_dev_xstats_get(evdev,
996                                         RTE_EVENT_DEV_XSTATS_DEVICE,
997                                         0, ids, values, num_stats);
998         for (i = 0; (signed int)i < ret; i++) {
999                 if (expected_zero[i] != values[i]) {
1000                         printf(
1001                                 "%d Error, xstat %d (id %d) %s : %"PRIu64
1002                                 ", expect %"PRIu64"\n",
1003                                 __LINE__, i, ids[i], xstats_names[i].name,
1004                                 values[i], expected_zero[i]);
1005                         goto fail;
1006                 }
1007         }
1008
1009         /* port reset checks */
1010         num_stats = rte_event_dev_xstats_names_get(evdev,
1011                                         RTE_EVENT_DEV_XSTATS_PORT, 0,
1012                                         xstats_names, ids, XSTATS_MAX);
1013         if (num_stats < 0)
1014                 goto fail;
1015         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1016                                         0, ids, values, num_stats);
1017
1018         static const uint64_t port_expected[] = {
1019                 3 /* rx */,
1020                 0 /* tx */,
1021                 0 /* drop */,
1022                 0 /* inflights */,
1023                 0 /* avg pkt cycles */,
1024                 29 /* credits */,
1025                 0 /* rx ring used */,
1026                 4096 /* rx ring free */,
1027                 0 /* cq ring used */,
1028                 32 /* cq ring free */,
1029                 0 /* dequeue calls */,
1030                 /* 10 dequeue burst buckets */
1031                 0, 0, 0, 0, 0,
1032                 0, 0, 0, 0, 0,
1033         };
1034         if (ret != RTE_DIM(port_expected)) {
1035                 printf(
1036                         "%s %d: wrong number of port stats (%d), expected %zu\n",
1037                         __func__, __LINE__, ret, RTE_DIM(port_expected));
1038         }
1039
1040         for (i = 0; (signed int)i < ret; i++) {
1041                 if (port_expected[i] != values[i]) {
1042                         printf(
1043                                 "%s : %d: Error stat %s is %"PRIu64
1044                                 ", expected %"PRIu64"\n",
1045                                 __func__, __LINE__, xstats_names[i].name,
1046                                 values[i], port_expected[i]);
1047                         goto fail;
1048                 }
1049         }
1050
1051         ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
1052                                         0, NULL, 0);
1053
1054         /* ensure reset statistics are zero-ed */
1055         static const uint64_t port_expected_zero[] = {
1056                 0 /* rx */,
1057                 0 /* tx */,
1058                 0 /* drop */,
1059                 0 /* inflights */,
1060                 0 /* avg pkt cycles */,
1061                 29 /* credits */,
1062                 0 /* rx ring used */,
1063                 4096 /* rx ring free */,
1064                 0 /* cq ring used */,
1065                 32 /* cq ring free */,
1066                 0 /* dequeue calls */,
1067                 /* 10 dequeue burst buckets */
1068                 0, 0, 0, 0, 0,
1069                 0, 0, 0, 0, 0,
1070         };
1071         ret = rte_event_dev_xstats_get(evdev,
1072                                         RTE_EVENT_DEV_XSTATS_PORT,
1073                                         0, ids, values, num_stats);
1074         for (i = 0; (signed int)i < ret; i++) {
1075                 if (port_expected_zero[i] != values[i]) {
1076                         printf(
1077                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1078                                 ", expect %"PRIu64"\n",
1079                                 __LINE__, i, ids[i], xstats_names[i].name,
1080                                 values[i], port_expected_zero[i]);
1081                         goto fail;
1082                 }
1083         }
1084
1085         /* QUEUE STATS TESTS */
1086         num_stats = rte_event_dev_xstats_names_get(evdev,
1087                                                 RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1088                                                 xstats_names, ids, XSTATS_MAX);
1089         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1090                                         0, ids, values, num_stats);
1091         if (ret < 0) {
1092                 printf("xstats get returned %d\n", ret);
1093                 goto fail;
1094         }
1095         if ((unsigned int)ret > XSTATS_MAX)
1096                 printf("%s %d: more xstats available than space\n",
1097                                 __func__, __LINE__);
1098
1099         static const uint64_t queue_expected[] = {
1100                 3 /* rx */,
1101                 3 /* tx */,
1102                 0 /* drop */,
1103                 3 /* inflights */,
1104                 512 /* iq size */,
1105                 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
1106                 /* QID-to-Port: pinned_flows, packets */
1107                 0, 0,
1108                 0, 0,
1109                 1, 3,
1110                 0, 0,
1111         };
1112         for (i = 0; (signed int)i < ret; i++) {
1113                 if (queue_expected[i] != values[i]) {
1114                         printf(
1115                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1116                                 ", expect %"PRIu64"\n",
1117                                 __LINE__, i, ids[i], xstats_names[i].name,
1118                                 values[i], queue_expected[i]);
1119                         goto fail;
1120                 }
1121         }
1122
1123         /* Reset the queue stats here */
1124         ret = rte_event_dev_xstats_reset(evdev,
1125                                         RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1126                                         NULL,
1127                                         0);
1128
1129         /* Verify that the resetable stats are reset, and others are not */
1130         static const uint64_t queue_expected_zero[] = {
1131                 0 /* rx */,
1132                 0 /* tx */,
1133                 0 /* drop */,
1134                 3 /* inflight */,
1135                 512 /* iq size */,
1136                 0, 0, 0, 0, /* 4 iq used */
1137                 /* QID-to-Port: pinned_flows, packets */
1138                 0, 0,
1139                 0, 0,
1140                 1, 0,
1141                 0, 0,
1142         };
1143
1144         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
1145                                         ids, values, num_stats);
1146         int fails = 0;
1147         for (i = 0; (signed int)i < ret; i++) {
1148                 if (queue_expected_zero[i] != values[i]) {
1149                         printf(
1150                                 "%d, Error, xstat %d (id %d) %s : %"PRIu64
1151                                 ", expect %"PRIu64"\n",
1152                                 __LINE__, i, ids[i], xstats_names[i].name,
1153                                 values[i], queue_expected_zero[i]);
1154                         fails++;
1155                 }
1156         }
1157         if (fails) {
1158                 printf("%d : %d of values were not as expected above\n",
1159                                 __LINE__, fails);
1160                 goto fail;
1161         }
1162
1163         cleanup(t);
1164         return 0;
1165
1166 fail:
1167         rte_event_dev_dump(0, stdout);
1168         cleanup(t);
1169         return -1;
1170 }
1171
1172
1173 static int
1174 xstats_id_abuse_tests(struct test *t)
1175 {
1176         int err;
1177         const uint32_t XSTATS_MAX = 1024;
1178         const uint32_t link_port = 2;
1179
1180         uint32_t ids[XSTATS_MAX];
1181         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1182
1183         /* Create instance with 4 ports */
1184         if (init(t, 1, 4) < 0 ||
1185                         create_ports(t, 4) < 0 ||
1186                         create_atomic_qids(t, 1) < 0) {
1187                 printf("%d: Error initializing device\n", __LINE__);
1188                 goto fail;
1189         }
1190
1191         err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
1192         if (err != 1) {
1193                 printf("%d: error mapping lb qid\n", __LINE__);
1194                 goto fail;
1195         }
1196
1197         if (rte_event_dev_start(evdev) < 0) {
1198                 printf("%d: Error with start call\n", __LINE__);
1199                 goto fail;
1200         }
1201
1202         /* no test for device, as it ignores the port/q number */
1203         int num_stats = rte_event_dev_xstats_names_get(evdev,
1204                                         RTE_EVENT_DEV_XSTATS_PORT,
1205                                         UINT8_MAX-1, xstats_names, ids,
1206                                         XSTATS_MAX);
1207         if (num_stats != 0) {
1208                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1209                                 0, num_stats);
1210                 goto fail;
1211         }
1212
1213         num_stats = rte_event_dev_xstats_names_get(evdev,
1214                                         RTE_EVENT_DEV_XSTATS_QUEUE,
1215                                         UINT8_MAX-1, xstats_names, ids,
1216                                         XSTATS_MAX);
1217         if (num_stats != 0) {
1218                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1219                                 0, num_stats);
1220                 goto fail;
1221         }
1222
1223         cleanup(t);
1224         return 0;
1225 fail:
1226         cleanup(t);
1227         return -1;
1228 }
1229
1230 static int
1231 port_reconfig_credits(struct test *t)
1232 {
1233         if (init(t, 1, 1) < 0) {
1234                 printf("%d: Error initializing device\n", __LINE__);
1235                 return -1;
1236         }
1237
1238         uint32_t i;
1239         const uint32_t NUM_ITERS = 32;
1240         for (i = 0; i < NUM_ITERS; i++) {
1241                 const struct rte_event_queue_conf conf = {
1242                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1243                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1244                         .nb_atomic_flows = 1024,
1245                         .nb_atomic_order_sequences = 1024,
1246                 };
1247                 if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1248                         printf("%d: error creating qid\n", __LINE__);
1249                         return -1;
1250                 }
1251                 t->qid[0] = 0;
1252
1253                 static const struct rte_event_port_conf port_conf = {
1254                                 .new_event_threshold = 128,
1255                                 .dequeue_depth = 32,
1256                                 .enqueue_depth = 64,
1257                 };
1258                 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1259                         printf("%d Error setting up port\n", __LINE__);
1260                         return -1;
1261                 }
1262
1263                 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
1264                 if (links != 1) {
1265                         printf("%d: error mapping lb qid\n", __LINE__);
1266                         goto fail;
1267                 }
1268
1269                 if (rte_event_dev_start(evdev) < 0) {
1270                         printf("%d: Error with start call\n", __LINE__);
1271                         goto fail;
1272                 }
1273
1274                 const uint32_t NPKTS = 1;
1275                 uint32_t j;
1276                 for (j = 0; j < NPKTS; j++) {
1277                         struct rte_event ev;
1278                         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1279                         if (!arp) {
1280                                 printf("%d: gen of pkt failed\n", __LINE__);
1281                                 goto fail;
1282                         }
1283                         ev.queue_id = t->qid[0];
1284                         ev.op = RTE_EVENT_OP_NEW;
1285                         ev.mbuf = arp;
1286                         int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
1287                         if (err != 1) {
1288                                 printf("%d: Failed to enqueue\n", __LINE__);
1289                                 rte_event_dev_dump(0, stdout);
1290                                 goto fail;
1291                         }
1292                 }
1293
1294                 rte_event_schedule(evdev);
1295
1296                 struct rte_event ev[NPKTS];
1297                 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
1298                                                         NPKTS, 0);
1299                 if (deq != 1)
1300                         printf("%d error; no packet dequeued\n", __LINE__);
1301
1302                 /* let cleanup below stop the device on last iter */
1303                 if (i != NUM_ITERS-1)
1304                         rte_event_dev_stop(evdev);
1305         }
1306
1307         cleanup(t);
1308         return 0;
1309 fail:
1310         cleanup(t);
1311         return -1;
1312 }
1313
1314 static int
1315 port_single_lb_reconfig(struct test *t)
1316 {
1317         if (init(t, 2, 2) < 0) {
1318                 printf("%d: Error initializing device\n", __LINE__);
1319                 goto fail;
1320         }
1321
1322         static const struct rte_event_queue_conf conf_lb_atomic = {
1323                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1324                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1325                 .nb_atomic_flows = 1024,
1326                 .nb_atomic_order_sequences = 1024,
1327         };
1328         if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
1329                 printf("%d: error creating qid\n", __LINE__);
1330                 goto fail;
1331         }
1332
1333         static const struct rte_event_queue_conf conf_single_link = {
1334                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1335                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
1336                 .nb_atomic_flows = 1024,
1337                 .nb_atomic_order_sequences = 1024,
1338         };
1339         if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
1340                 printf("%d: error creating qid\n", __LINE__);
1341                 goto fail;
1342         }
1343
1344         struct rte_event_port_conf port_conf = {
1345                 .new_event_threshold = 128,
1346                 .dequeue_depth = 32,
1347                 .enqueue_depth = 64,
1348         };
1349         if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
1350                 printf("%d Error setting up port\n", __LINE__);
1351                 goto fail;
1352         }
1353         if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
1354                 printf("%d Error setting up port\n", __LINE__);
1355                 goto fail;
1356         }
1357
1358         /* link port to lb queue */
1359         uint8_t queue_id = 0;
1360         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1361                 printf("%d: error creating link for qid\n", __LINE__);
1362                 goto fail;
1363         }
1364
1365         int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
1366         if (ret != 1) {
1367                 printf("%d: Error unlinking lb port\n", __LINE__);
1368                 goto fail;
1369         }
1370
1371         queue_id = 1;
1372         if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
1373                 printf("%d: error creating link for qid\n", __LINE__);
1374                 goto fail;
1375         }
1376
1377         queue_id = 0;
1378         int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
1379         if (err != 1) {
1380                 printf("%d: error mapping lb qid\n", __LINE__);
1381                 goto fail;
1382         }
1383
1384         if (rte_event_dev_start(evdev) < 0) {
1385                 printf("%d: Error with start call\n", __LINE__);
1386                 goto fail;
1387         }
1388
1389         cleanup(t);
1390         return 0;
1391 fail:
1392         cleanup(t);
1393         return -1;
1394 }
1395
1396 static int
1397 xstats_brute_force(struct test *t)
1398 {
1399         uint32_t i;
1400         const uint32_t XSTATS_MAX = 1024;
1401         uint32_t ids[XSTATS_MAX];
1402         uint64_t values[XSTATS_MAX];
1403         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1404
1405
1406         /* Create instance with 4 ports */
1407         if (init(t, 1, 4) < 0 ||
1408                         create_ports(t, 4) < 0 ||
1409                         create_atomic_qids(t, 1) < 0) {
1410                 printf("%d: Error initializing device\n", __LINE__);
1411                 return -1;
1412         }
1413
1414         int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1415         if (err != 1) {
1416                 printf("%d: error mapping lb qid\n", __LINE__);
1417                 goto fail;
1418         }
1419
1420         if (rte_event_dev_start(evdev) < 0) {
1421                 printf("%d: Error with start call\n", __LINE__);
1422                 goto fail;
1423         }
1424
1425         for (i = 0; i < XSTATS_MAX; i++)
1426                 ids[i] = i;
1427
1428         for (i = 0; i < 3; i++) {
1429                 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
1430                 uint32_t j;
1431                 for (j = 0; j < UINT8_MAX; j++) {
1432                         rte_event_dev_xstats_names_get(evdev, mode,
1433                                 j, xstats_names, ids, XSTATS_MAX);
1434
1435                         rte_event_dev_xstats_get(evdev, mode, j, ids,
1436                                                  values, XSTATS_MAX);
1437                 }
1438         }
1439
1440         cleanup(t);
1441         return 0;
1442 fail:
1443         cleanup(t);
1444         return -1;
1445 }
1446
1447 static int
1448 xstats_id_reset_tests(struct test *t)
1449 {
1450         const int wrk_enq = 2;
1451         int err;
1452
1453         /* Create instance with 4 ports */
1454         if (init(t, 1, 4) < 0 ||
1455                         create_ports(t, 4) < 0 ||
1456                         create_atomic_qids(t, 1) < 0) {
1457                 printf("%d: Error initializing device\n", __LINE__);
1458                 return -1;
1459         }
1460
1461         /* CQ mapping to QID */
1462         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
1463         if (err != 1) {
1464                 printf("%d: error mapping lb qid\n", __LINE__);
1465                 goto fail;
1466         }
1467
1468         if (rte_event_dev_start(evdev) < 0) {
1469                 printf("%d: Error with start call\n", __LINE__);
1470                 goto fail;
1471         }
1472
1473 #define XSTATS_MAX 1024
1474         int ret;
1475         uint32_t i;
1476         uint32_t ids[XSTATS_MAX];
1477         uint64_t values[XSTATS_MAX];
1478         struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
1479
1480         for (i = 0; i < XSTATS_MAX; i++)
1481                 ids[i] = i;
1482
1483 #define NUM_DEV_STATS 6
1484         /* Device names / values */
1485         int num_stats = rte_event_dev_xstats_names_get(evdev,
1486                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1487                                         0, xstats_names, ids, XSTATS_MAX);
1488         if (num_stats != NUM_DEV_STATS) {
1489                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1490                                 NUM_DEV_STATS, num_stats);
1491                 goto fail;
1492         }
1493         ret = rte_event_dev_xstats_get(evdev,
1494                                         RTE_EVENT_DEV_XSTATS_DEVICE,
1495                                         0, ids, values, num_stats);
1496         if (ret != NUM_DEV_STATS) {
1497                 printf("%d: expected %d stats, got return %d\n", __LINE__,
1498                                 NUM_DEV_STATS, ret);
1499                 goto fail;
1500         }
1501
1502 #define NPKTS 7
1503         for (i = 0; i < NPKTS; i++) {
1504                 struct rte_event ev;
1505                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1506                 if (!arp) {
1507                         printf("%d: gen of pkt failed\n", __LINE__);
1508                         goto fail;
1509                 }
1510                 ev.queue_id = t->qid[i];
1511                 ev.op = RTE_EVENT_OP_NEW;
1512                 ev.mbuf = arp;
1513                 arp->seqn = i;
1514
1515                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1516                 if (err != 1) {
1517                         printf("%d: Failed to enqueue\n", __LINE__);
1518                         goto fail;
1519                 }
1520         }
1521
1522         rte_event_schedule(evdev);
1523
1524         static const char * const dev_names[] = {
1525                 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
1526                 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
1527         };
1528         uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
1529         for (i = 0; (int)i < ret; i++) {
1530                 unsigned int id;
1531                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1532                                                                 dev_names[i],
1533                                                                 &id);
1534                 if (id != i) {
1535                         printf("%d: %s id incorrect, expected %d got %d\n",
1536                                         __LINE__, dev_names[i], i, id);
1537                         goto fail;
1538                 }
1539                 if (val != dev_expected[i]) {
1540                         printf("%d: %s value incorrect, expected %"
1541                                 PRIu64" got %d\n", __LINE__, dev_names[i],
1542                                 dev_expected[i], id);
1543                         goto fail;
1544                 }
1545                 /* reset to zero */
1546                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1547                                                 RTE_EVENT_DEV_XSTATS_DEVICE, 0,
1548                                                 &id,
1549                                                 1);
1550                 if (reset_ret) {
1551                         printf("%d: failed to reset successfully\n", __LINE__);
1552                         goto fail;
1553                 }
1554                 dev_expected[i] = 0;
1555                 /* check value again */
1556                 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
1557                 if (val != dev_expected[i]) {
1558                         printf("%d: %s value incorrect, expected %"PRIu64
1559                                 " got %"PRIu64"\n", __LINE__, dev_names[i],
1560                                 dev_expected[i], val);
1561                         goto fail;
1562                 }
1563         };
1564
1565 /* 48 is stat offset from start of the devices whole xstats.
1566  * This WILL break every time we add a statistic to a port
1567  * or the device, but there is no other way to test
1568  */
1569 #define PORT_OFF 48
1570 /* num stats for the tested port. CQ size adds more stats to a port */
1571 #define NUM_PORT_STATS 21
1572 /* the port to test. */
1573 #define PORT 2
1574         num_stats = rte_event_dev_xstats_names_get(evdev,
1575                                         RTE_EVENT_DEV_XSTATS_PORT, PORT,
1576                                         xstats_names, ids, XSTATS_MAX);
1577         if (num_stats != NUM_PORT_STATS) {
1578                 printf("%d: expected %d stats, got return %d\n",
1579                         __LINE__, NUM_PORT_STATS, num_stats);
1580                 goto fail;
1581         }
1582         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
1583                                         ids, values, num_stats);
1584
1585         if (ret != NUM_PORT_STATS) {
1586                 printf("%d: expected %d stats, got return %d\n",
1587                                 __LINE__, NUM_PORT_STATS, ret);
1588                 goto fail;
1589         }
1590         static const char * const port_names[] = {
1591                 "port_2_rx",
1592                 "port_2_tx",
1593                 "port_2_drop",
1594                 "port_2_inflight",
1595                 "port_2_avg_pkt_cycles",
1596                 "port_2_credits",
1597                 "port_2_rx_ring_used",
1598                 "port_2_rx_ring_free",
1599                 "port_2_cq_ring_used",
1600                 "port_2_cq_ring_free",
1601                 "port_2_dequeue_calls",
1602                 "port_2_dequeues_returning_0",
1603                 "port_2_dequeues_returning_1-4",
1604                 "port_2_dequeues_returning_5-8",
1605                 "port_2_dequeues_returning_9-12",
1606                 "port_2_dequeues_returning_13-16",
1607                 "port_2_dequeues_returning_17-20",
1608                 "port_2_dequeues_returning_21-24",
1609                 "port_2_dequeues_returning_25-28",
1610                 "port_2_dequeues_returning_29-32",
1611                 "port_2_dequeues_returning_33-36",
1612         };
1613         uint64_t port_expected[] = {
1614                 0, /* rx */
1615                 NPKTS, /* tx */
1616                 0, /* drop */
1617                 NPKTS, /* inflight */
1618                 0, /* avg pkt cycles */
1619                 0, /* credits */
1620                 0, /* rx ring used */
1621                 4096, /* rx ring free */
1622                 NPKTS,  /* cq ring used */
1623                 25, /* cq ring free */
1624                 0, /* dequeue zero calls */
1625                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1626                 0, 0, 0, 0, 0,
1627         };
1628         uint64_t port_expected_zero[] = {
1629                 0, /* rx */
1630                 0, /* tx */
1631                 0, /* drop */
1632                 NPKTS, /* inflight */
1633                 0, /* avg pkt cycles */
1634                 0, /* credits */
1635                 0, /* rx ring used */
1636                 4096, /* rx ring free */
1637                 NPKTS,  /* cq ring used */
1638                 25, /* cq ring free */
1639                 0, /* dequeue zero calls */
1640                 0, 0, 0, 0, 0, /* 10 dequeue buckets */
1641                 0, 0, 0, 0, 0,
1642         };
1643         if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
1644                         RTE_DIM(port_names) != NUM_PORT_STATS) {
1645                 printf("%d: port array of wrong size\n", __LINE__);
1646                 goto fail;
1647         }
1648
1649         int failed = 0;
1650         for (i = 0; (int)i < ret; i++) {
1651                 unsigned int id;
1652                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1653                                                                 port_names[i],
1654                                                                 &id);
1655                 if (id != i + PORT_OFF) {
1656                         printf("%d: %s id incorrect, expected %d got %d\n",
1657                                         __LINE__, port_names[i], i+PORT_OFF,
1658                                         id);
1659                         failed = 1;
1660                 }
1661                 if (val != port_expected[i]) {
1662                         printf("%d: %s value incorrect, expected %"PRIu64
1663                                 " got %d\n", __LINE__, port_names[i],
1664                                 port_expected[i], id);
1665                         failed = 1;
1666                 }
1667                 /* reset to zero */
1668                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1669                                                 RTE_EVENT_DEV_XSTATS_PORT, PORT,
1670                                                 &id,
1671                                                 1);
1672                 if (reset_ret) {
1673                         printf("%d: failed to reset successfully\n", __LINE__);
1674                         failed = 1;
1675                 }
1676                 /* check value again */
1677                 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
1678                 if (val != port_expected_zero[i]) {
1679                         printf("%d: %s value incorrect, expected %"PRIu64
1680                                 " got %"PRIu64"\n", __LINE__, port_names[i],
1681                                 port_expected_zero[i], val);
1682                         failed = 1;
1683                 }
1684         };
1685         if (failed)
1686                 goto fail;
1687
1688 /* num queue stats */
1689 #define NUM_Q_STATS 17
1690 /* queue offset from start of the devices whole xstats.
1691  * This will break every time we add a statistic to a device/port/queue
1692  */
1693 #define QUEUE_OFF 90
1694         const uint32_t queue = 0;
1695         num_stats = rte_event_dev_xstats_names_get(evdev,
1696                                         RTE_EVENT_DEV_XSTATS_QUEUE, queue,
1697                                         xstats_names, ids, XSTATS_MAX);
1698         if (num_stats != NUM_Q_STATS) {
1699                 printf("%d: expected %d stats, got return %d\n",
1700                         __LINE__, NUM_Q_STATS, num_stats);
1701                 goto fail;
1702         }
1703         ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
1704                                         queue, ids, values, num_stats);
1705         if (ret != NUM_Q_STATS) {
1706                 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
1707                 goto fail;
1708         }
1709         static const char * const queue_names[] = {
1710                 "qid_0_rx",
1711                 "qid_0_tx",
1712                 "qid_0_drop",
1713                 "qid_0_inflight",
1714                 "qid_0_iq_size",
1715                 "qid_0_iq_0_used",
1716                 "qid_0_iq_1_used",
1717                 "qid_0_iq_2_used",
1718                 "qid_0_iq_3_used",
1719                 "qid_0_port_0_pinned_flows",
1720                 "qid_0_port_0_packets",
1721                 "qid_0_port_1_pinned_flows",
1722                 "qid_0_port_1_packets",
1723                 "qid_0_port_2_pinned_flows",
1724                 "qid_0_port_2_packets",
1725                 "qid_0_port_3_pinned_flows",
1726                 "qid_0_port_3_packets",
1727         };
1728         uint64_t queue_expected[] = {
1729                 7, /* rx */
1730                 7, /* tx */
1731                 0, /* drop */
1732                 7, /* inflight */
1733                 512, /* iq size */
1734                 0, /* iq 0 used */
1735                 0, /* iq 1 used */
1736                 0, /* iq 2 used */
1737                 0, /* iq 3 used */
1738                 /* QID-to-Port: pinned_flows, packets */
1739                 0, 0,
1740                 0, 0,
1741                 1, 7,
1742                 0, 0,
1743         };
1744         uint64_t queue_expected_zero[] = {
1745                 0, /* rx */
1746                 0, /* tx */
1747                 0, /* drop */
1748                 7, /* inflight */
1749                 512, /* iq size */
1750                 0, /* iq 0 used */
1751                 0, /* iq 1 used */
1752                 0, /* iq 2 used */
1753                 0, /* iq 3 used */
1754                 /* QID-to-Port: pinned_flows, packets */
1755                 0, 0,
1756                 0, 0,
1757                 1, 0,
1758                 0, 0,
1759         };
1760         if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
1761                         RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
1762                         RTE_DIM(queue_names) != NUM_Q_STATS) {
1763                 printf("%d : queue array of wrong size\n", __LINE__);
1764                 goto fail;
1765         }
1766
1767         failed = 0;
1768         for (i = 0; (int)i < ret; i++) {
1769                 unsigned int id;
1770                 uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
1771                                                                 queue_names[i],
1772                                                                 &id);
1773                 if (id != i + QUEUE_OFF) {
1774                         printf("%d: %s id incorrect, expected %d got %d\n",
1775                                         __LINE__, queue_names[i], i+QUEUE_OFF,
1776                                         id);
1777                         failed = 1;
1778                 }
1779                 if (val != queue_expected[i]) {
1780                         printf("%d: %d: %s value , expected %"PRIu64
1781                                 " got %"PRIu64"\n", i, __LINE__,
1782                                 queue_names[i], queue_expected[i], val);
1783                         failed = 1;
1784                 }
1785                 /* reset to zero */
1786                 int reset_ret = rte_event_dev_xstats_reset(evdev,
1787                                                 RTE_EVENT_DEV_XSTATS_QUEUE,
1788                                                 queue, &id, 1);
1789                 if (reset_ret) {
1790                         printf("%d: failed to reset successfully\n", __LINE__);
1791                         failed = 1;
1792                 }
1793                 /* check value again */
1794                 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
1795                                                         0);
1796                 if (val != queue_expected_zero[i]) {
1797                         printf("%d: %s value incorrect, expected %"PRIu64
1798                                 " got %"PRIu64"\n", __LINE__, queue_names[i],
1799                                 queue_expected_zero[i], val);
1800                         failed = 1;
1801                 }
1802         };
1803
1804         if (failed)
1805                 goto fail;
1806
1807         cleanup(t);
1808         return 0;
1809 fail:
1810         cleanup(t);
1811         return -1;
1812 }
1813
1814 static int
1815 ordered_reconfigure(struct test *t)
1816 {
1817         if (init(t, 1, 1) < 0 ||
1818                         create_ports(t, 1) < 0) {
1819                 printf("%d: Error initializing device\n", __LINE__);
1820                 return -1;
1821         }
1822
1823         const struct rte_event_queue_conf conf = {
1824                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
1825                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
1826                         .nb_atomic_flows = 1024,
1827                         .nb_atomic_order_sequences = 1024,
1828         };
1829
1830         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1831                 printf("%d: error creating qid\n", __LINE__);
1832                 goto failed;
1833         }
1834
1835         if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
1836                 printf("%d: error creating qid, for 2nd time\n", __LINE__);
1837                 goto failed;
1838         }
1839
1840         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1841         if (rte_event_dev_start(evdev) < 0) {
1842                 printf("%d: Error with start call\n", __LINE__);
1843                 return -1;
1844         }
1845
1846         cleanup(t);
1847         return 0;
1848 failed:
1849         cleanup(t);
1850         return -1;
1851 }
1852
1853 static int
1854 qid_priorities(struct test *t)
1855 {
1856         /* Test works by having a CQ with enough empty space for all packets,
1857          * and enqueueing 3 packets to 3 QIDs. They must return based on the
1858          * priority of the QID, not the ingress order, to pass the test
1859          */
1860         unsigned int i;
1861         /* Create instance with 1 ports, and 3 qids */
1862         if (init(t, 3, 1) < 0 ||
1863                         create_ports(t, 1) < 0) {
1864                 printf("%d: Error initializing device\n", __LINE__);
1865                 return -1;
1866         }
1867
1868         for (i = 0; i < 3; i++) {
1869                 /* Create QID */
1870                 const struct rte_event_queue_conf conf = {
1871                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
1872                         /* increase priority (0 == highest), as we go */
1873                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
1874                         .nb_atomic_flows = 1024,
1875                         .nb_atomic_order_sequences = 1024,
1876                 };
1877
1878                 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
1879                         printf("%d: error creating qid %d\n", __LINE__, i);
1880                         return -1;
1881                 }
1882                 t->qid[i] = i;
1883         }
1884         t->nb_qids = i;
1885         /* map all QIDs to port */
1886         rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
1887
1888         if (rte_event_dev_start(evdev) < 0) {
1889                 printf("%d: Error with start call\n", __LINE__);
1890                 return -1;
1891         }
1892
1893         /* enqueue 3 packets, setting seqn and QID to check priority */
1894         for (i = 0; i < 3; i++) {
1895                 struct rte_event ev;
1896                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1897                 if (!arp) {
1898                         printf("%d: gen of pkt failed\n", __LINE__);
1899                         return -1;
1900                 }
1901                 ev.queue_id = t->qid[i];
1902                 ev.op = RTE_EVENT_OP_NEW;
1903                 ev.mbuf = arp;
1904                 arp->seqn = i;
1905
1906                 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
1907                 if (err != 1) {
1908                         printf("%d: Failed to enqueue\n", __LINE__);
1909                         return -1;
1910                 }
1911         }
1912
1913         rte_event_schedule(evdev);
1914
1915         /* dequeue packets, verify priority was upheld */
1916         struct rte_event ev[32];
1917         uint32_t deq_pkts =
1918                 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
1919         if (deq_pkts != 3) {
1920                 printf("%d: failed to deq packets\n", __LINE__);
1921                 rte_event_dev_dump(evdev, stdout);
1922                 return -1;
1923         }
1924         for (i = 0; i < 3; i++) {
1925                 if (ev[i].mbuf->seqn != 2-i) {
1926                         printf(
1927                                 "%d: qid priority test: seqn %d incorrectly prioritized\n",
1928                                         __LINE__, i);
1929                 }
1930         }
1931
1932         cleanup(t);
1933         return 0;
1934 }
1935
1936 static int
1937 load_balancing(struct test *t)
1938 {
1939         const int rx_enq = 0;
1940         int err;
1941         uint32_t i;
1942
1943         if (init(t, 1, 4) < 0 ||
1944                         create_ports(t, 4) < 0 ||
1945                         create_atomic_qids(t, 1) < 0) {
1946                 printf("%d: Error initializing device\n", __LINE__);
1947                 return -1;
1948         }
1949
1950         for (i = 0; i < 3; i++) {
1951                 /* map port 1 - 3 inclusive */
1952                 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
1953                                 NULL, 1) != 1) {
1954                         printf("%d: error mapping qid to port %d\n",
1955                                         __LINE__, i);
1956                         return -1;
1957                 }
1958         }
1959
1960         if (rte_event_dev_start(evdev) < 0) {
1961                 printf("%d: Error with start call\n", __LINE__);
1962                 return -1;
1963         }
1964
1965         /************** FORWARD ****************/
1966         /*
1967          * Create a set of flows that test the load-balancing operation of the
1968          * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
1969          * with a new flow, which should be sent to the 3rd mapped CQ
1970          */
1971         static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
1972
1973         for (i = 0; i < RTE_DIM(flows); i++) {
1974                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
1975                 if (!arp) {
1976                         printf("%d: gen of pkt failed\n", __LINE__);
1977                         return -1;
1978                 }
1979
1980                 struct rte_event ev = {
1981                                 .op = RTE_EVENT_OP_NEW,
1982                                 .queue_id = t->qid[0],
1983                                 .flow_id = flows[i],
1984                                 .mbuf = arp,
1985                 };
1986                 /* generate pkt and enqueue */
1987                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
1988                 if (err < 0) {
1989                         printf("%d: Failed to enqueue\n", __LINE__);
1990                         return -1;
1991                 }
1992         }
1993
1994         rte_event_schedule(evdev);
1995
1996         struct test_event_dev_stats stats;
1997         err = test_event_dev_stats_get(evdev, &stats);
1998         if (err) {
1999                 printf("%d: failed to get stats\n", __LINE__);
2000                 return -1;
2001         }
2002
2003         if (stats.port_inflight[1] != 4) {
2004                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2005                                 __func__);
2006                 return -1;
2007         }
2008         if (stats.port_inflight[2] != 2) {
2009                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2010                                 __func__);
2011                 return -1;
2012         }
2013         if (stats.port_inflight[3] != 3) {
2014                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2015                                 __func__);
2016                 return -1;
2017         }
2018
2019         cleanup(t);
2020         return 0;
2021 }
2022
2023 static int
2024 load_balancing_history(struct test *t)
2025 {
2026         struct test_event_dev_stats stats = {0};
2027         const int rx_enq = 0;
2028         int err;
2029         uint32_t i;
2030
2031         /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
2032         if (init(t, 1, 4) < 0 ||
2033                         create_ports(t, 4) < 0 ||
2034                         create_atomic_qids(t, 1) < 0)
2035                 return -1;
2036
2037         /* CQ mapping to QID */
2038         if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
2039                 printf("%d: error mapping port 1 qid\n", __LINE__);
2040                 return -1;
2041         }
2042         if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
2043                 printf("%d: error mapping port 2 qid\n", __LINE__);
2044                 return -1;
2045         }
2046         if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
2047                 printf("%d: error mapping port 3 qid\n", __LINE__);
2048                 return -1;
2049         }
2050         if (rte_event_dev_start(evdev) < 0) {
2051                 printf("%d: Error with start call\n", __LINE__);
2052                 return -1;
2053         }
2054
2055         /*
2056          * Create a set of flows that test the load-balancing operation of the
2057          * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
2058          * the packet from CQ 0, send in a new set of flows. Ensure that:
2059          *  1. The new flow 3 gets into the empty CQ0
2060          *  2. packets for existing flow gets added into CQ1
2061          *  3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
2062          *     more outstanding pkts
2063          *
2064          *  This test makes sure that when a flow ends (i.e. all packets
2065          *  have been completed for that flow), that the flow can be moved
2066          *  to a different CQ when new packets come in for that flow.
2067          */
2068         static uint32_t flows1[] = {0, 1, 1, 2};
2069
2070         for (i = 0; i < RTE_DIM(flows1); i++) {
2071                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2072                 struct rte_event ev = {
2073                                 .flow_id = flows1[i],
2074                                 .op = RTE_EVENT_OP_NEW,
2075                                 .queue_id = t->qid[0],
2076                                 .event_type = RTE_EVENT_TYPE_CPU,
2077                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2078                                 .mbuf = arp
2079                 };
2080
2081                 if (!arp) {
2082                         printf("%d: gen of pkt failed\n", __LINE__);
2083                         return -1;
2084                 }
2085                 arp->hash.rss = flows1[i];
2086                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2087                 if (err < 0) {
2088                         printf("%d: Failed to enqueue\n", __LINE__);
2089                         return -1;
2090                 }
2091         }
2092
2093         /* call the scheduler */
2094         rte_event_schedule(evdev);
2095
2096         /* Dequeue the flow 0 packet from port 1, so that we can then drop */
2097         struct rte_event ev;
2098         if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
2099                 printf("%d: failed to dequeue\n", __LINE__);
2100                 return -1;
2101         }
2102         if (ev.mbuf->hash.rss != flows1[0]) {
2103                 printf("%d: unexpected flow received\n", __LINE__);
2104                 return -1;
2105         }
2106
2107         /* drop the flow 0 packet from port 1 */
2108         rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
2109
2110         /* call the scheduler */
2111         rte_event_schedule(evdev);
2112
2113         /*
2114          * Set up the next set of flows, first a new flow to fill up
2115          * CQ 0, so that the next flow 0 packet should go to CQ2
2116          */
2117         static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
2118
2119         for (i = 0; i < RTE_DIM(flows2); i++) {
2120                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2121                 struct rte_event ev = {
2122                                 .flow_id = flows2[i],
2123                                 .op = RTE_EVENT_OP_NEW,
2124                                 .queue_id = t->qid[0],
2125                                 .event_type = RTE_EVENT_TYPE_CPU,
2126                                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
2127                                 .mbuf = arp
2128                 };
2129
2130                 if (!arp) {
2131                         printf("%d: gen of pkt failed\n", __LINE__);
2132                         return -1;
2133                 }
2134                 arp->hash.rss = flows2[i];
2135
2136                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2137                 if (err < 0) {
2138                         printf("%d: Failed to enqueue\n", __LINE__);
2139                         return -1;
2140                 }
2141         }
2142
2143         /* schedule */
2144         rte_event_schedule(evdev);
2145
2146         err = test_event_dev_stats_get(evdev, &stats);
2147         if (err) {
2148                 printf("%d:failed to get stats\n", __LINE__);
2149                 return -1;
2150         }
2151
2152         /*
2153          * Now check the resulting inflights on each port.
2154          */
2155         if (stats.port_inflight[1] != 3) {
2156                 printf("%d:%s: port 1 inflight not correct\n", __LINE__,
2157                                 __func__);
2158                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2159                                 (unsigned int)stats.port_inflight[1],
2160                                 (unsigned int)stats.port_inflight[2],
2161                                 (unsigned int)stats.port_inflight[3]);
2162                 return -1;
2163         }
2164         if (stats.port_inflight[2] != 4) {
2165                 printf("%d:%s: port 2 inflight not correct\n", __LINE__,
2166                                 __func__);
2167                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2168                                 (unsigned int)stats.port_inflight[1],
2169                                 (unsigned int)stats.port_inflight[2],
2170                                 (unsigned int)stats.port_inflight[3]);
2171                 return -1;
2172         }
2173         if (stats.port_inflight[3] != 2) {
2174                 printf("%d:%s: port 3 inflight not correct\n", __LINE__,
2175                                 __func__);
2176                 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
2177                                 (unsigned int)stats.port_inflight[1],
2178                                 (unsigned int)stats.port_inflight[2],
2179                                 (unsigned int)stats.port_inflight[3]);
2180                 return -1;
2181         }
2182
2183         for (i = 1; i <= 3; i++) {
2184                 struct rte_event ev;
2185                 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
2186                         rte_event_enqueue_burst(evdev, i, &release_ev, 1);
2187         }
2188         rte_event_schedule(evdev);
2189
2190         cleanup(t);
2191         return 0;
2192 }
2193
2194 static int
2195 invalid_qid(struct test *t)
2196 {
2197         struct test_event_dev_stats stats;
2198         const int rx_enq = 0;
2199         int err;
2200         uint32_t i;
2201
2202         if (init(t, 1, 4) < 0 ||
2203                         create_ports(t, 4) < 0 ||
2204                         create_atomic_qids(t, 1) < 0) {
2205                 printf("%d: Error initializing device\n", __LINE__);
2206                 return -1;
2207         }
2208
2209         /* CQ mapping to QID */
2210         for (i = 0; i < 4; i++) {
2211                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
2212                                 NULL, 1);
2213                 if (err != 1) {
2214                         printf("%d: error mapping port 1 qid\n", __LINE__);
2215                         return -1;
2216                 }
2217         }
2218
2219         if (rte_event_dev_start(evdev) < 0) {
2220                 printf("%d: Error with start call\n", __LINE__);
2221                 return -1;
2222         }
2223
2224         /*
2225          * Send in a packet with an invalid qid to the scheduler.
2226          * We should see the packed enqueued OK, but the inflights for
2227          * that packet should not be incremented, and the rx_dropped
2228          * should be incremented.
2229          */
2230         static uint32_t flows1[] = {20};
2231
2232         for (i = 0; i < RTE_DIM(flows1); i++) {
2233                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2234                 if (!arp) {
2235                         printf("%d: gen of pkt failed\n", __LINE__);
2236                         return -1;
2237                 }
2238
2239                 struct rte_event ev = {
2240                                 .op = RTE_EVENT_OP_NEW,
2241                                 .queue_id = t->qid[0] + flows1[i],
2242                                 .flow_id = i,
2243                                 .mbuf = arp,
2244                 };
2245                 /* generate pkt and enqueue */
2246                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2247                 if (err < 0) {
2248                         printf("%d: Failed to enqueue\n", __LINE__);
2249                         return -1;
2250                 }
2251         }
2252
2253         /* call the scheduler */
2254         rte_event_schedule(evdev);
2255
2256         err = test_event_dev_stats_get(evdev, &stats);
2257         if (err) {
2258                 printf("%d: failed to get stats\n", __LINE__);
2259                 return -1;
2260         }
2261
2262         /*
2263          * Now check the resulting inflights on the port, and the rx_dropped.
2264          */
2265         if (stats.port_inflight[0] != 0) {
2266                 printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
2267                                 __func__);
2268                 rte_event_dev_dump(evdev, stdout);
2269                 return -1;
2270         }
2271         if (stats.port_rx_dropped[0] != 1) {
2272                 printf("%d:%s: port 1 drops\n", __LINE__, __func__);
2273                 rte_event_dev_dump(evdev, stdout);
2274                 return -1;
2275         }
2276         /* each packet drop should only be counted in one place - port or dev */
2277         if (stats.rx_dropped != 0) {
2278                 printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
2279                                 __func__);
2280                 rte_event_dev_dump(evdev, stdout);
2281                 return -1;
2282         }
2283
2284         cleanup(t);
2285         return 0;
2286 }
2287
2288 static int
2289 single_packet(struct test *t)
2290 {
2291         const uint32_t MAGIC_SEQN = 7321;
2292         struct rte_event ev;
2293         struct test_event_dev_stats stats;
2294         const int rx_enq = 0;
2295         const int wrk_enq = 2;
2296         int err;
2297
2298         /* Create instance with 4 ports */
2299         if (init(t, 1, 4) < 0 ||
2300                         create_ports(t, 4) < 0 ||
2301                         create_atomic_qids(t, 1) < 0) {
2302                 printf("%d: Error initializing device\n", __LINE__);
2303                 return -1;
2304         }
2305
2306         /* CQ mapping to QID */
2307         err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
2308         if (err != 1) {
2309                 printf("%d: error mapping lb qid\n", __LINE__);
2310                 cleanup(t);
2311                 return -1;
2312         }
2313
2314         if (rte_event_dev_start(evdev) < 0) {
2315                 printf("%d: Error with start call\n", __LINE__);
2316                 return -1;
2317         }
2318
2319         /************** Gen pkt and enqueue ****************/
2320         struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2321         if (!arp) {
2322                 printf("%d: gen of pkt failed\n", __LINE__);
2323                 return -1;
2324         }
2325
2326         ev.op = RTE_EVENT_OP_NEW;
2327         ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
2328         ev.mbuf = arp;
2329         ev.queue_id = 0;
2330         ev.flow_id = 3;
2331         arp->seqn = MAGIC_SEQN;
2332
2333         err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2334         if (err < 0) {
2335                 printf("%d: Failed to enqueue\n", __LINE__);
2336                 return -1;
2337         }
2338
2339         rte_event_schedule(evdev);
2340
2341         err = test_event_dev_stats_get(evdev, &stats);
2342         if (err) {
2343                 printf("%d: failed to get stats\n", __LINE__);
2344                 return -1;
2345         }
2346
2347         if (stats.rx_pkts != 1 ||
2348                         stats.tx_pkts != 1 ||
2349                         stats.port_inflight[wrk_enq] != 1) {
2350                 printf("%d: Sched core didn't handle pkt as expected\n",
2351                                 __LINE__);
2352                 rte_event_dev_dump(evdev, stdout);
2353                 return -1;
2354         }
2355
2356         uint32_t deq_pkts;
2357
2358         deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
2359         if (deq_pkts < 1) {
2360                 printf("%d: Failed to deq\n", __LINE__);
2361                 return -1;
2362         }
2363
2364         err = test_event_dev_stats_get(evdev, &stats);
2365         if (err) {
2366                 printf("%d: failed to get stats\n", __LINE__);
2367                 return -1;
2368         }
2369
2370         err = test_event_dev_stats_get(evdev, &stats);
2371         if (ev.mbuf->seqn != MAGIC_SEQN) {
2372                 printf("%d: magic sequence number not dequeued\n", __LINE__);
2373                 return -1;
2374         }
2375
2376         rte_pktmbuf_free(ev.mbuf);
2377         err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
2378         if (err < 0) {
2379                 printf("%d: Failed to enqueue\n", __LINE__);
2380                 return -1;
2381         }
2382         rte_event_schedule(evdev);
2383
2384         err = test_event_dev_stats_get(evdev, &stats);
2385         if (stats.port_inflight[wrk_enq] != 0) {
2386                 printf("%d: port inflight not correct\n", __LINE__);
2387                 return -1;
2388         }
2389
2390         cleanup(t);
2391         return 0;
2392 }
2393
2394 static int
2395 inflight_counts(struct test *t)
2396 {
2397         struct rte_event ev;
2398         struct test_event_dev_stats stats;
2399         const int rx_enq = 0;
2400         const int p1 = 1;
2401         const int p2 = 2;
2402         int err;
2403         int i;
2404
2405         /* Create instance with 4 ports */
2406         if (init(t, 2, 3) < 0 ||
2407                         create_ports(t, 3) < 0 ||
2408                         create_atomic_qids(t, 2) < 0) {
2409                 printf("%d: Error initializing device\n", __LINE__);
2410                 return -1;
2411         }
2412
2413         /* CQ mapping to QID */
2414         err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
2415         if (err != 1) {
2416                 printf("%d: error mapping lb qid\n", __LINE__);
2417                 cleanup(t);
2418                 return -1;
2419         }
2420         err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
2421         if (err != 1) {
2422                 printf("%d: error mapping lb qid\n", __LINE__);
2423                 cleanup(t);
2424                 return -1;
2425         }
2426
2427         if (rte_event_dev_start(evdev) < 0) {
2428                 printf("%d: Error with start call\n", __LINE__);
2429                 return -1;
2430         }
2431
2432         /************** FORWARD ****************/
2433 #define QID1_NUM 5
2434         for (i = 0; i < QID1_NUM; i++) {
2435                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2436
2437                 if (!arp) {
2438                         printf("%d: gen of pkt failed\n", __LINE__);
2439                         goto err;
2440                 }
2441
2442                 ev.queue_id =  t->qid[0];
2443                 ev.op = RTE_EVENT_OP_NEW;
2444                 ev.mbuf = arp;
2445                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2446                 if (err != 1) {
2447                         printf("%d: Failed to enqueue\n", __LINE__);
2448                         goto err;
2449                 }
2450         }
2451 #define QID2_NUM 3
2452         for (i = 0; i < QID2_NUM; i++) {
2453                 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
2454
2455                 if (!arp) {
2456                         printf("%d: gen of pkt failed\n", __LINE__);
2457                         goto err;
2458                 }
2459                 ev.queue_id =  t->qid[1];
2460                 ev.op = RTE_EVENT_OP_NEW;
2461                 ev.mbuf = arp;
2462                 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
2463                 if (err != 1) {
2464                         printf("%d: Failed to enqueue\n", __LINE__);
2465                         goto err;
2466                 }
2467         }
2468
2469         /* schedule */
2470         rte_event_schedule(evdev);
2471
2472         err = test_event_dev_stats_get(evdev, &stats);
2473         if (err) {
2474                 printf("%d: failed to get stats\n", __LINE__);
2475                 goto err;
2476         }
2477
2478         if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
2479                         stats.tx_pkts != QID1_NUM + QID2_NUM) {
2480                 printf("%d: Sched core didn't handle pkt as expected\n",
2481                                 __LINE__);
2482                 goto err;
2483         }
2484
2485         if (stats.port_inflight[p1] != QID1_NUM) {
2486                 printf("%d: %s port 1 inflight not correct\n", __LINE__,
2487                                 __func__);
2488                 goto err;
2489         }
2490         if (stats.port_inflight[p2] != QID2_NUM) {
2491                 printf("%d: %s port 2 inflight not correct\n", __LINE__,
2492                                 __func__);
2493                 goto err;
2494         }
2495
2496         /************** DEQUEUE INFLIGHT COUNT CHECKS  ****************/
2497         /* port 1 */
2498         struct rte_event events[QID1_NUM + QID2_NUM];
2499         uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
2500                         RTE_DIM(events), 0);
2501
2502         if (deq_pkts != QID1_NUM) {
2503                 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
2504                 goto err;
2505         }
2506         err = test_event_dev_stats_get(evdev, &stats);
2507         if (stats.port_inflight[p1] != QID1_NUM) {
2508                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2509                                 __LINE__);
2510                 goto err;
2511         }
2512         for (i = 0; i < QID1_NUM; i++) {
2513                 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
2514                                 1);
2515                 if (err != 1) {
2516                         printf("%d: %s rte enqueue of inf release failed\n",
2517                                 __LINE__, __func__);
2518                         goto err;
2519                 }
2520         }
2521
2522         /*
2523          * As the scheduler core decrements inflights, it needs to run to
2524          * process packets to act on the drop messages
2525          */
2526         rte_event_schedule(evdev);
2527
2528         err = test_event_dev_stats_get(evdev, &stats);
2529         if (stats.port_inflight[p1] != 0) {
2530                 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
2531                 goto err;
2532         }
2533
2534         /* port2 */
2535         deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
2536                         RTE_DIM(events), 0);
2537         if (deq_pkts != QID2_NUM) {
2538                 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
2539                 goto err;
2540         }
2541         err = test_event_dev_stats_get(evdev, &stats);
2542         if (stats.port_inflight[p2] != QID2_NUM) {
2543                 printf("%d: port 1 inflight decrement after DEQ != 0\n",
2544                                 __LINE__);
2545                 goto err;
2546         }
2547         for (i = 0; i < QID2_NUM; i++) {
2548                 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
2549                                 1);
2550                 if (err != 1) {
2551                         printf("%d: %s rte enqueue of inf release failed\n",
2552                                 __LINE__, __func__);
2553                         goto err;
2554                 }
2555         }
2556
2557         /*
2558          * As the scheduler core decrements inflights, it needs to run to
2559          * process packets to act on the drop messages
2560          */
2561         rte_event_schedule(evdev);
2562
2563         err = test_event_dev_stats_get(evdev, &stats);
2564         if (stats.port_inflight[p2] != 0) {
2565                 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
2566                 goto err;
2567         }
2568         cleanup(t);
2569         return 0;
2570
2571 err:
2572         rte_event_dev_dump(evdev, stdout);
2573         cleanup(t);
2574         return -1;
2575 }
2576
2577 static int
2578 parallel_basic(struct test *t, int check_order)
2579 {
2580         const uint8_t rx_port = 0;
2581         const uint8_t w1_port = 1;
2582         const uint8_t w3_port = 3;
2583         const uint8_t tx_port = 4;
2584         int err;
2585         int i;
2586         uint32_t deq_pkts, j;
2587         struct rte_mbuf *mbufs[3];
2588         struct rte_mbuf *mbufs_out[3] = { 0 };
2589         const uint32_t MAGIC_SEQN = 1234;
2590
2591         /* Create instance with 4 ports */
2592         if (init(t, 2, tx_port + 1) < 0 ||
2593                         create_ports(t, tx_port + 1) < 0 ||
2594                         (check_order ?  create_ordered_qids(t, 1) :
2595                                 create_unordered_qids(t, 1)) < 0 ||
2596                         create_directed_qids(t, 1, &tx_port)) {
2597                 printf("%d: Error initializing device\n", __LINE__);
2598                 return -1;
2599         }
2600
2601         /*
2602          * CQ mapping to QID
2603          * We need three ports, all mapped to the same ordered qid0. Then we'll
2604          * take a packet out to each port, re-enqueue in reverse order,
2605          * then make sure the reordering has taken place properly when we
2606          * dequeue from the tx_port.
2607          *
2608          * Simplified test setup diagram:
2609          *
2610          * rx_port        w1_port
2611          *        \     /         \
2612          *         qid0 - w2_port - qid1
2613          *              \         /     \
2614          *                w3_port        tx_port
2615          */
2616         /* CQ mapping to QID for LB ports (directed mapped on create) */
2617         for (i = w1_port; i <= w3_port; i++) {
2618                 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
2619                                 1);
2620                 if (err != 1) {
2621                         printf("%d: error mapping lb qid\n", __LINE__);
2622                         cleanup(t);
2623                         return -1;
2624                 }
2625         }
2626
2627         if (rte_event_dev_start(evdev) < 0) {
2628                 printf("%d: Error with start call\n", __LINE__);
2629                 return -1;
2630         }
2631
2632         /* Enqueue 3 packets to the rx port */
2633         for (i = 0; i < 3; i++) {
2634                 struct rte_event ev;
2635                 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
2636                 if (!mbufs[i]) {
2637                         printf("%d: gen of pkt failed\n", __LINE__);
2638                         return -1;
2639                 }
2640
2641                 ev.queue_id = t->qid[0];
2642                 ev.op = RTE_EVENT_OP_NEW;
2643                 ev.mbuf = mbufs[i];
2644                 mbufs[i]->seqn = MAGIC_SEQN + i;
2645
2646                 /* generate pkt and enqueue */
2647                 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
2648                 if (err != 1) {
2649                         printf("%d: Failed to enqueue pkt %u, retval = %u\n",
2650                                         __LINE__, i, err);
2651                         return -1;
2652                 }
2653         }
2654
2655         rte_event_schedule(evdev);
2656
2657         /* use extra slot to make logic in loops easier */
2658         struct rte_event deq_ev[w3_port + 1];
2659
2660         /* Dequeue the 3 packets, one from each worker port */
2661         for (i = w1_port; i <= w3_port; i++) {
2662                 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
2663                                 &deq_ev[i], 1, 0);
2664                 if (deq_pkts != 1) {
2665                         printf("%d: Failed to deq\n", __LINE__);
2666                         rte_event_dev_dump(evdev, stdout);
2667                         return -1;
2668                 }
2669         }
2670
2671         /* Enqueue each packet in reverse order, flushing after each one */
2672         for (i = w3_port; i >= w1_port; i--) {
2673
2674                 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
2675                 deq_ev[i].queue_id = t->qid[1];
2676                 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
2677                 if (err != 1) {
2678                         printf("%d: Failed to enqueue\n", __LINE__);
2679                         return -1;
2680                 }
2681         }
2682         rte_event_schedule(evdev);
2683
2684         /* dequeue from the tx ports, we should get 3 packets */
2685         deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
2686                         3, 0);
2687
2688         /* Check to see if we've got all 3 packets */
2689         if (deq_pkts != 3) {
2690                 printf("%d: expected 3 pkts at tx port got %d from port %d\n",
2691                         __LINE__, deq_pkts, tx_port);
2692                 rte_event_dev_dump(evdev, stdout);
2693                 return 1;
2694         }
2695
2696         /* Check to see if the sequence numbers are in expected order */
2697         if (check_order) {
2698                 for (j = 0 ; j < deq_pkts ; j++) {
2699                         if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
2700                                 printf(
2701                                         "%d: Incorrect sequence number(%d) from port %d\n",
2702                                         __LINE__, mbufs_out[j]->seqn, tx_port);
2703                                 return -1;
2704                         }
2705                 }
2706         }
2707
2708         /* Destroy the instance */
2709         cleanup(t);
2710         return 0;
2711 }
2712
2713 static int
2714 ordered_basic(struct test *t)
2715 {
2716         return parallel_basic(t, 1);
2717 }
2718
2719 static int
2720 unordered_basic(struct test *t)
2721 {
2722         return parallel_basic(t, 0);
2723 }
2724
2725 static int
2726 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
2727 {
2728         const struct rte_event new_ev = {
2729                         .op = RTE_EVENT_OP_NEW
2730                         /* all other fields zero */
2731         };
2732         struct rte_event ev = new_ev;
2733         unsigned int rx_port = 0; /* port we get the first flow on */
2734         char rx_port_used_stat[64];
2735         char rx_port_free_stat[64];
2736         char other_port_used_stat[64];
2737
2738         if (init(t, 1, 2) < 0 ||
2739                         create_ports(t, 2) < 0 ||
2740                         create_atomic_qids(t, 1) < 0) {
2741                 printf("%d: Error initializing device\n", __LINE__);
2742                 return -1;
2743         }
2744         int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2745         if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
2746                         nb_links != 1) {
2747                 printf("%d: Error links queue to ports\n", __LINE__);
2748                 goto err;
2749         }
2750         if (rte_event_dev_start(evdev) < 0) {
2751                 printf("%d: Error with start call\n", __LINE__);
2752                 goto err;
2753         }
2754
2755         /* send one packet and see where it goes, port 0 or 1 */
2756         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2757                 printf("%d: Error doing first enqueue\n", __LINE__);
2758                 goto err;
2759         }
2760         rte_event_schedule(evdev);
2761
2762         if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
2763                         != 1)
2764                 rx_port = 1;
2765
2766         snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
2767                         "port_%u_cq_ring_used", rx_port);
2768         snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
2769                         "port_%u_cq_ring_free", rx_port);
2770         snprintf(other_port_used_stat, sizeof(other_port_used_stat),
2771                         "port_%u_cq_ring_used", rx_port ^ 1);
2772         if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
2773                         != 1) {
2774                 printf("%d: Error, first event not scheduled\n", __LINE__);
2775                 goto err;
2776         }
2777
2778         /* now fill up the rx port's queue with one flow to cause HOLB */
2779         do {
2780                 ev = new_ev;
2781                 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2782                         printf("%d: Error with enqueue\n", __LINE__);
2783                         goto err;
2784                 }
2785                 rte_event_schedule(evdev);
2786         } while (rte_event_dev_xstats_by_name_get(evdev,
2787                                 rx_port_free_stat, NULL) != 0);
2788
2789         /* one more packet, which needs to stay in IQ - i.e. HOLB */
2790         ev = new_ev;
2791         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2792                 printf("%d: Error with enqueue\n", __LINE__);
2793                 goto err;
2794         }
2795         rte_event_schedule(evdev);
2796
2797         /* check that the other port still has an empty CQ */
2798         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2799                         != 0) {
2800                 printf("%d: Error, second port CQ is not empty\n", __LINE__);
2801                 goto err;
2802         }
2803         /* check IQ now has one packet */
2804         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2805                         != 1) {
2806                 printf("%d: Error, QID does not have exactly 1 packet\n",
2807                         __LINE__);
2808                 goto err;
2809         }
2810
2811         /* send another flow, which should pass the other IQ entry */
2812         ev = new_ev;
2813         ev.flow_id = 1;
2814         if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
2815                 printf("%d: Error with enqueue\n", __LINE__);
2816                 goto err;
2817         }
2818         rte_event_schedule(evdev);
2819
2820         if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
2821                         != 1) {
2822                 printf("%d: Error, second flow did not pass out first\n",
2823                         __LINE__);
2824                 goto err;
2825         }
2826
2827         if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
2828                         != 1) {
2829                 printf("%d: Error, QID does not have exactly 1 packet\n",
2830                         __LINE__);
2831                 goto err;
2832         }
2833         cleanup(t);
2834         return 0;
2835 err:
2836         rte_event_dev_dump(evdev, stdout);
2837         cleanup(t);
2838         return -1;
2839 }
2840
2841 static int
2842 worker_loopback_worker_fn(void *arg)
2843 {
2844         struct test *t = arg;
2845         uint8_t port = t->port[1];
2846         int count = 0;
2847         int enqd;
2848
2849         /*
2850          * Takes packets from the input port and then loops them back through
2851          * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
2852          * so each packet goes through 8*16 = 128 times.
2853          */
2854         printf("%d: \tWorker function started\n", __LINE__);
2855         while (count < NUM_PACKETS) {
2856 #define BURST_SIZE 32
2857                 struct rte_event ev[BURST_SIZE];
2858                 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
2859                                 BURST_SIZE, 0);
2860                 if (nb_rx == 0) {
2861                         rte_pause();
2862                         continue;
2863                 }
2864
2865                 for (i = 0; i < nb_rx; i++) {
2866                         ev[i].queue_id++;
2867                         if (ev[i].queue_id != 8) {
2868                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2869                                 enqd = rte_event_enqueue_burst(evdev, port,
2870                                                 &ev[i], 1);
2871                                 if (enqd != 1) {
2872                                         printf("%d: Can't enqueue FWD!!\n",
2873                                                         __LINE__);
2874                                         return -1;
2875                                 }
2876                                 continue;
2877                         }
2878
2879                         ev[i].queue_id = 0;
2880                         ev[i].mbuf->udata64++;
2881                         if (ev[i].mbuf->udata64 != 16) {
2882                                 ev[i].op = RTE_EVENT_OP_FORWARD;
2883                                 enqd = rte_event_enqueue_burst(evdev, port,
2884                                                 &ev[i], 1);
2885                                 if (enqd != 1) {
2886                                         printf("%d: Can't enqueue FWD!!\n",
2887                                                         __LINE__);
2888                                         return -1;
2889                                 }
2890                                 continue;
2891                         }
2892                         /* we have hit 16 iterations through system - drop */
2893                         rte_pktmbuf_free(ev[i].mbuf);
2894                         count++;
2895                         ev[i].op = RTE_EVENT_OP_RELEASE;
2896                         enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
2897                         if (enqd != 1) {
2898                                 printf("%d drop enqueue failed\n", __LINE__);
2899                                 return -1;
2900                         }
2901                 }
2902         }
2903
2904         return 0;
2905 }
2906
2907 static int
2908 worker_loopback_producer_fn(void *arg)
2909 {
2910         struct test *t = arg;
2911         uint8_t port = t->port[0];
2912         uint64_t count = 0;
2913
2914         printf("%d: \tProducer function started\n", __LINE__);
2915         while (count < NUM_PACKETS) {
2916                 struct rte_mbuf *m = 0;
2917                 do {
2918                         m = rte_pktmbuf_alloc(t->mbuf_pool);
2919                 } while (m == NULL);
2920
2921                 m->udata64 = 0;
2922
2923                 struct rte_event ev = {
2924                                 .op = RTE_EVENT_OP_NEW,
2925                                 .queue_id = t->qid[0],
2926                                 .flow_id = (uintptr_t)m & 0xFFFF,
2927                                 .mbuf = m,
2928                 };
2929
2930                 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
2931                         while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
2932                                         1)
2933                                 rte_pause();
2934                 }
2935
2936                 count++;
2937         }
2938
2939         return 0;
2940 }
2941
2942 static int
2943 worker_loopback(struct test *t)
2944 {
2945         /* use a single producer core, and a worker core to see what happens
2946          * if the worker loops packets back multiple times
2947          */
2948         struct test_event_dev_stats stats;
2949         uint64_t print_cycles = 0, cycles = 0;
2950         uint64_t tx_pkts = 0;
2951         int err;
2952         int w_lcore, p_lcore;
2953
2954         if (init(t, 8, 2) < 0 ||
2955                         create_atomic_qids(t, 8) < 0) {
2956                 printf("%d: Error initializing device\n", __LINE__);
2957                 return -1;
2958         }
2959
2960         /* RX with low max events */
2961         static struct rte_event_port_conf conf = {
2962                         .dequeue_depth = 32,
2963                         .enqueue_depth = 64,
2964         };
2965         /* beware: this cannot be initialized in the static above as it would
2966          * only be initialized once - and this needs to be set for multiple runs
2967          */
2968         conf.new_event_threshold = 512;
2969
2970         if (rte_event_port_setup(evdev, 0, &conf) < 0) {
2971                 printf("Error setting up RX port\n");
2972                 return -1;
2973         }
2974         t->port[0] = 0;
2975         /* TX with higher max events */
2976         conf.new_event_threshold = 4096;
2977         if (rte_event_port_setup(evdev, 1, &conf) < 0) {
2978                 printf("Error setting up TX port\n");
2979                 return -1;
2980         }
2981         t->port[1] = 1;
2982
2983         /* CQ mapping to QID */
2984         err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
2985         if (err != 8) { /* should have mapped all queues*/
2986                 printf("%d: error mapping port 2 to all qids\n", __LINE__);
2987                 return -1;
2988         }
2989
2990         if (rte_event_dev_start(evdev) < 0) {
2991                 printf("%d: Error with start call\n", __LINE__);
2992                 return -1;
2993         }
2994
2995         p_lcore = rte_get_next_lcore(
2996                         /* start core */ -1,
2997                         /* skip master */ 1,
2998                         /* wrap */ 0);
2999         w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
3000
3001         rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
3002         rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
3003
3004         print_cycles = cycles = rte_get_timer_cycles();
3005         while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
3006                         rte_eal_get_lcore_state(w_lcore) != FINISHED) {
3007
3008                 rte_event_schedule(evdev);
3009
3010                 uint64_t new_cycles = rte_get_timer_cycles();
3011
3012                 if (new_cycles - print_cycles > rte_get_timer_hz()) {
3013                         test_event_dev_stats_get(evdev, &stats);
3014                         printf(
3015                                 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
3016                                 __LINE__, stats.rx_pkts, stats.tx_pkts);
3017
3018                         print_cycles = new_cycles;
3019                 }
3020                 if (new_cycles - cycles > rte_get_timer_hz() * 3) {
3021                         test_event_dev_stats_get(evdev, &stats);
3022                         if (stats.tx_pkts == tx_pkts) {
3023                                 rte_event_dev_dump(evdev, stdout);
3024                                 printf("Dumping xstats:\n");
3025                                 xstats_print();
3026                                 printf(
3027                                         "%d: No schedules for seconds, deadlock\n",
3028                                         __LINE__);
3029                                 return -1;
3030                         }
3031                         tx_pkts = stats.tx_pkts;
3032                         cycles = new_cycles;
3033                 }
3034         }
3035         rte_event_schedule(evdev); /* ensure all completions are flushed */
3036
3037         rte_eal_mp_wait_lcore();
3038
3039         cleanup(t);
3040         return 0;
3041 }
3042
3043 static struct rte_mempool *eventdev_func_mempool;
3044
3045 static int
3046 test_sw_eventdev(void)
3047 {
3048         struct test *t = malloc(sizeof(struct test));
3049         int ret;
3050
3051         /* manually initialize the op, older gcc's complain on static
3052          * initialization of struct elements that are a bitfield.
3053          */
3054         release_ev.op = RTE_EVENT_OP_RELEASE;
3055
3056         const char *eventdev_name = "event_sw0";
3057         evdev = rte_event_dev_get_dev_id(eventdev_name);
3058         if (evdev < 0) {
3059                 printf("%d: Eventdev %s not found - creating.\n",
3060                                 __LINE__, eventdev_name);
3061                 if (rte_vdev_init(eventdev_name, NULL) < 0) {
3062                         printf("Error creating eventdev\n");
3063                         return -1;
3064                 }
3065                 evdev = rte_event_dev_get_dev_id(eventdev_name);
3066                 if (evdev < 0) {
3067                         printf("Error finding newly created eventdev\n");
3068                         return -1;
3069                 }
3070         }
3071
3072         /* Only create mbuf pool once, reuse for each test run */
3073         if (!eventdev_func_mempool) {
3074                 eventdev_func_mempool = rte_pktmbuf_pool_create(
3075                                 "EVENTDEV_SW_SA_MBUF_POOL",
3076                                 (1<<12), /* 4k buffers */
3077                                 32 /*MBUF_CACHE_SIZE*/,
3078                                 0,
3079                                 512, /* use very small mbufs */
3080                                 rte_socket_id());
3081                 if (!eventdev_func_mempool) {
3082                         printf("ERROR creating mempool\n");
3083                         return -1;
3084                 }
3085         }
3086         t->mbuf_pool = eventdev_func_mempool;
3087         printf("*** Running Single Directed Packet test...\n");
3088         ret = test_single_directed_packet(t);
3089         if (ret != 0) {
3090                 printf("ERROR - Single Directed Packet test FAILED.\n");
3091                 return ret;
3092         }
3093         printf("*** Running Directed Forward Credit test...\n");
3094         ret = test_directed_forward_credits(t);
3095         if (ret != 0) {
3096                 printf("ERROR - Directed Forward Credit test FAILED.\n");
3097                 return ret;
3098         }
3099         printf("*** Running Single Load Balanced Packet test...\n");
3100         ret = single_packet(t);
3101         if (ret != 0) {
3102                 printf("ERROR - Single Packet test FAILED.\n");
3103                 return ret;
3104         }
3105         printf("*** Running Unordered Basic test...\n");
3106         ret = unordered_basic(t);
3107         if (ret != 0) {
3108                 printf("ERROR -  Unordered Basic test FAILED.\n");
3109                 return ret;
3110         }
3111         printf("*** Running Ordered Basic test...\n");
3112         ret = ordered_basic(t);
3113         if (ret != 0) {
3114                 printf("ERROR -  Ordered Basic test FAILED.\n");
3115                 return ret;
3116         }
3117         printf("*** Running Burst Packets test...\n");
3118         ret = burst_packets(t);
3119         if (ret != 0) {
3120                 printf("ERROR - Burst Packets test FAILED.\n");
3121                 return ret;
3122         }
3123         printf("*** Running Load Balancing test...\n");
3124         ret = load_balancing(t);
3125         if (ret != 0) {
3126                 printf("ERROR - Load Balancing test FAILED.\n");
3127                 return ret;
3128         }
3129         printf("*** Running Prioritized Directed test...\n");
3130         ret = test_priority_directed(t);
3131         if (ret != 0) {
3132                 printf("ERROR - Prioritized Directed test FAILED.\n");
3133                 return ret;
3134         }
3135         printf("*** Running Prioritized Atomic test...\n");
3136         ret = test_priority_atomic(t);
3137         if (ret != 0) {
3138                 printf("ERROR - Prioritized Atomic test FAILED.\n");
3139                 return ret;
3140         }
3141
3142         printf("*** Running Prioritized Ordered test...\n");
3143         ret = test_priority_ordered(t);
3144         if (ret != 0) {
3145                 printf("ERROR - Prioritized Ordered test FAILED.\n");
3146                 return ret;
3147         }
3148         printf("*** Running Prioritized Unordered test...\n");
3149         ret = test_priority_unordered(t);
3150         if (ret != 0) {
3151                 printf("ERROR - Prioritized Unordered test FAILED.\n");
3152                 return ret;
3153         }
3154         printf("*** Running Invalid QID test...\n");
3155         ret = invalid_qid(t);
3156         if (ret != 0) {
3157                 printf("ERROR - Invalid QID test FAILED.\n");
3158                 return ret;
3159         }
3160         printf("*** Running Load Balancing History test...\n");
3161         ret = load_balancing_history(t);
3162         if (ret != 0) {
3163                 printf("ERROR - Load Balancing History test FAILED.\n");
3164                 return ret;
3165         }
3166         printf("*** Running Inflight Count test...\n");
3167         ret = inflight_counts(t);
3168         if (ret != 0) {
3169                 printf("ERROR - Inflight Count test FAILED.\n");
3170                 return ret;
3171         }
3172         printf("*** Running Abuse Inflights test...\n");
3173         ret = abuse_inflights(t);
3174         if (ret != 0) {
3175                 printf("ERROR - Abuse Inflights test FAILED.\n");
3176                 return ret;
3177         }
3178         printf("*** Running XStats test...\n");
3179         ret = xstats_tests(t);
3180         if (ret != 0) {
3181                 printf("ERROR - XStats test FAILED.\n");
3182                 return ret;
3183         }
3184         printf("*** Running XStats ID Reset test...\n");
3185         ret = xstats_id_reset_tests(t);
3186         if (ret != 0) {
3187                 printf("ERROR - XStats ID Reset test FAILED.\n");
3188                 return ret;
3189         }
3190         printf("*** Running XStats Brute Force test...\n");
3191         ret = xstats_brute_force(t);
3192         if (ret != 0) {
3193                 printf("ERROR - XStats Brute Force test FAILED.\n");
3194                 return ret;
3195         }
3196         printf("*** Running XStats ID Abuse test...\n");
3197         ret = xstats_id_abuse_tests(t);
3198         if (ret != 0) {
3199                 printf("ERROR - XStats ID Abuse test FAILED.\n");
3200                 return ret;
3201         }
3202         printf("*** Running QID Priority test...\n");
3203         ret = qid_priorities(t);
3204         if (ret != 0) {
3205                 printf("ERROR - QID Priority test FAILED.\n");
3206                 return ret;
3207         }
3208         printf("*** Running Ordered Reconfigure test...\n");
3209         ret = ordered_reconfigure(t);
3210         if (ret != 0) {
3211                 printf("ERROR - Ordered Reconfigure test FAILED.\n");
3212                 return ret;
3213         }
3214         printf("*** Running Port LB Single Reconfig test...\n");
3215         ret = port_single_lb_reconfig(t);
3216         if (ret != 0) {
3217                 printf("ERROR - Port LB Single Reconfig test FAILED.\n");
3218                 return ret;
3219         }
3220         printf("*** Running Port Reconfig Credits test...\n");
3221         ret = port_reconfig_credits(t);
3222         if (ret != 0) {
3223                 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
3224                 return ret;
3225         }
3226         printf("*** Running Head-of-line-blocking test...\n");
3227         ret = holb(t);
3228         if (ret != 0) {
3229                 printf("ERROR - Head-of-line-blocking test FAILED.\n");
3230                 return ret;
3231         }
3232         if (rte_lcore_count() >= 3) {
3233                 printf("*** Running Worker loopback test...\n");
3234                 ret = worker_loopback(t);
3235                 if (ret != 0) {
3236                         printf("ERROR - Worker loopback test FAILED.\n");
3237                         return ret;
3238                 }
3239         } else {
3240                 printf("### Not enough cores for worker loopback test.\n");
3241                 printf("### Need at least 3 cores for test.\n");
3242         }
3243         /*
3244          * Free test instance, leaving mempool initialized, and a pointer to it
3245          * in static eventdev_func_mempool, as it is re-used on re-runs
3246          */
3247         free(t);
3248
3249         return 0;
3250 }
3251
3252 REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);